diff --git a/.travis.yml b/.travis.yml index b7467ab8a..bad4a77f1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,9 @@ sudo: false language: go go: - - 1.11.x - 1.12.x - 1.13.x + - 1.14.x before_install: - export PATH=/home/travis/gopath/bin:$PATH @@ -45,4 +45,4 @@ deploy: on: tags: true repo: chrislusf/seaweedfs - go: 1.13.x + go: 1.14.x diff --git a/README.md b/README.md index 835557f30..0c8a5d9c7 100644 --- a/README.md +++ b/README.md @@ -81,17 +81,15 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two 1. to store billions of files! 2. to serve the files fast! -SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (just one disk read operation). +SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation). + +SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity, without any client side changes. There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) -SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Etcd/Cassandra/LevelDB. - -[Back to TOC](#table-of-contents) - -## Features ## +On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc. [Back to TOC](#table-of-contents) @@ -104,8 +102,10 @@ SeaweedFS can work very well with just the object store. [[Filer]] can then be a * Adding/Removing servers does **not** cause any data re-balancing. * Optionally fix the orientation for jpeg pictures. * Support ETag, Accept-Range, Last-Modified, etc. -* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance. +* Support in-memory/leveldb/readonly mode tuning for memory/performance balance. * Support rebalancing the writable and readonly volumes. +* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data. +* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. [Back to TOC](#table-of-contents) @@ -113,7 +113,6 @@ SeaweedFS can work very well with just the object store. [[Filer]] can then be a * [filer server][Filer] provide "normal" directories and files via http. * [mount filer][Mount] to read and write files directly as a local directory via FUSE. * [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling. -* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. * [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. @@ -125,6 +124,7 @@ SeaweedFS can work very well with just the object store. [[Filer]] can then be a [Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System [WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV [ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage +[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier [Back to TOC](#table-of-contents) @@ -318,6 +318,16 @@ Each individual file size is limited to the volume size. All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. +### Tiered Storage to the cloud ### + +The local volume servers are much faster, while cloud storages have elastic capacity and are actually more cost-efficient if not accessed often (usually free to upload, but relatively costly to access). With the append-only structure and O(1) access time, SeaweedFS can take advantage of both local and cloud storage by offloading the warm data to the cloud. + +Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data. + +With the O(1) access time, the network latency cost is kept at minimum. + +If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput. + [Back to TOC](#table-of-contents) ## Compared to Other File Systems ## @@ -344,7 +354,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa * SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files. * SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached. -* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, Etcd, MySql, Postgres, etc, and is easy to customized. +* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized. * SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc. | System | File Meta | File Content Read| POSIX | REST API | Optimized for small files | @@ -376,7 +386,7 @@ Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS pl SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read. -SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, to manage file directories. There are proven, scalable, and easier to manage. +SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. There are proven, scalable, and easier to manage. | SeaweedFS | comparable to Ceph | advantage | | ------------- | ------------- | ---------------- | @@ -451,50 +461,49 @@ My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CP Write 1 million 1KB file: ``` Concurrency Level: 16 -Time taken for tests: 88.796 seconds +Time taken for tests: 66.753 seconds Complete requests: 1048576 Failed requests: 0 -Total transferred: 1106764659 bytes -Requests per second: 11808.87 [#/sec] -Transfer rate: 12172.05 [Kbytes/sec] +Total transferred: 1106789009 bytes +Requests per second: 15708.23 [#/sec] +Transfer rate: 16191.69 [Kbytes/sec] Connection Times (ms) min avg max std -Total: 0.2 1.3 44.8 0.9 +Total: 0.3 1.0 84.3 0.9 Percentage of the requests served within a certain time (ms) - 50% 1.1 ms - 66% 1.3 ms - 75% 1.5 ms - 80% 1.7 ms - 90% 2.1 ms - 95% 2.6 ms - 98% 3.7 ms - 99% 4.6 ms - 100% 44.8 ms + 50% 0.8 ms + 66% 1.0 ms + 75% 1.1 ms + 80% 1.2 ms + 90% 1.4 ms + 95% 1.7 ms + 98% 2.1 ms + 99% 2.6 ms + 100% 84.3 ms ``` Randomly read 1 million files: ``` Concurrency Level: 16 -Time taken for tests: 34.263 seconds +Time taken for tests: 22.301 seconds Complete requests: 1048576 Failed requests: 0 -Total transferred: 1106762945 bytes -Requests per second: 30603.34 [#/sec] -Transfer rate: 31544.49 [Kbytes/sec] +Total transferred: 1106812873 bytes +Requests per second: 47019.38 [#/sec] +Transfer rate: 48467.57 [Kbytes/sec] Connection Times (ms) min avg max std -Total: 0.0 0.5 20.7 0.7 +Total: 0.0 0.3 54.1 0.2 Percentage of the requests served within a certain time (ms) - 50% 0.4 ms - 75% 0.5 ms - 95% 0.6 ms - 98% 0.8 ms - 99% 1.2 ms - 100% 20.7 ms + 50% 0.3 ms + 90% 0.4 ms + 98% 0.6 ms + 99% 0.7 ms + 100% 54.1 ms ``` [Back to TOC](#table-of-contents) @@ -513,6 +522,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +The text of this page is available for modification and reuse under the terms of the Creative Commons Attribution-Sharealike 3.0 Unported License and the GNU Free Documentation License (unversioned, with no invariant sections, front-cover texts, or back-cover texts). + [Back to TOC](#table-of-contents) ## Stargazers over time ## diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 85cbb6143..306ce3aa1 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,5 +1,15 @@ -FROM golang:latest -RUN go get github.com/chrislusf/seaweedfs/weed +FROM frolvlad/alpine-glibc as builder +RUN apk add git go g++ +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install + +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /root/go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh # volume server gprc port EXPOSE 18080 @@ -20,10 +30,6 @@ RUN mkdir -p /data/filerldb2 VOLUME /data -RUN mkdir -p /etc/seaweedfs -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh -RUN cp /go/bin/weed /usr/bin/ ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.local b/docker/Dockerfile.local new file mode 100644 index 000000000..b4a7b6504 --- /dev/null +++ b/docker/Dockerfile.local @@ -0,0 +1,29 @@ +FROM alpine AS final +LABEL author="Chris Lu" +COPY ./weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY ./filer.toml /etc/seaweedfs/filer.toml +COPY ./entrypoint.sh /entrypoint.sh + +# volume server gprc port +EXPOSE 18080 +# volume server http port +EXPOSE 8080 +# filer server gprc port +EXPOSE 18888 +# filer server http port +EXPOSE 8888 +# master server shared gprc port +EXPOSE 19333 +# master server shared http port +EXPOSE 9333 +# s3 server http port +EXPOSE 8333 + +RUN mkdir -p /data/filerldb2 + +VOLUME /data + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Makefile b/docker/Makefile new file mode 100644 index 000000000..166188bc3 --- /dev/null +++ b/docker/Makefile @@ -0,0 +1,19 @@ +all: gen + +.PHONY : gen + +gen: dev + +build: + cd ../weed; GOOS=linux go build; mv weed ../docker/ + docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local . + rm ./weed + +dev: build + docker-compose -f local-dev-compose.yml -p seaweedfs up + +cluster: build + docker-compose -f local-cluster-compose.yml -p seaweedfs up + +clean: + rm ./weed diff --git a/docker/README.md b/docker/README.md index cfe281e71..65241b517 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,11 +11,19 @@ docker-compose -f seaweedfs-compose.yml -p seaweedfs up ``` -## Development +## Try latest tip ```bash -cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker -docker-compose -f dev-compose.yml -p seaweedfs up +wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml + +docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up + +``` +## Local Development + +```bash +cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker +make ``` diff --git a/docker/dev-compose.yml b/docker/dev-compose.yml deleted file mode 100644 index 0306b3cb0..000000000 --- a/docker/dev-compose.yml +++ /dev/null @@ -1,43 +0,0 @@ -version: '2' - -services: - master: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 9333:9333 - - 19333:19333 - command: "master" - volume: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8080:8080 - - 18080:18080 - command: 'volume -max=5 -mserver="master:9333" -port=8080' - depends_on: - - master - filer: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8888:8888 - - 18888:18888 - command: 'filer -master="master:9333"' - depends_on: - - master - - volume - s3: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8333:8333 - command: 's3 -filer="filer:8888"' - depends_on: - - master - - volume - - filer diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index c28bd263c..791527d3a 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -3,7 +3,7 @@ case "$1" in 'master') - ARGS="-ip `hostname -i` -mdir /data" + ARGS="-mdir /data" # Is this instance linked with an other master? (Docker commandline "--link master1:master") if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" diff --git a/docker/local-cluster-compose.yml b/docker/local-cluster-compose.yml new file mode 100644 index 000000000..0b6860fa1 --- /dev/null +++ b/docker/local-cluster-compose.yml @@ -0,0 +1,53 @@ +version: '2' + +services: + master0: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335" + master1: + image: chrislusf/seaweedfs:local + ports: + - 9334:9334 + - 19334:19334 + command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335" + master2: + image: chrislusf/seaweedfs:local + ports: + - 9335:9335 + - 19335:19335 + command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: '-v=2 volume -max=5 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume' + depends_on: + - master0 + - master1 + - master2 + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + command: '-v=4 filer -master="master0:9333,master1:9334,master2:9335"' + depends_on: + - master0 + - master1 + - master2 + - volume + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: '-v=4 s3 -filer="filer:8888"' + depends_on: + - master0 + - master1 + - master2 + - volume + - filer diff --git a/docker/local-dev-compose.yml b/docker/local-dev-compose.yml new file mode 100644 index 000000000..5ff42ed28 --- /dev/null +++ b/docker/local-dev-compose.yml @@ -0,0 +1,35 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + command: '-v=4 filer -master="master:9333"' + depends_on: + - master + - volume + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: '-v=4 s3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index d66b921bb..35509c541 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -4,28 +4,28 @@ services: master: image: chrislusf/seaweedfs # use a remote image ports: - - 9333:9333 - - 19333:19333 - command: "master" + - 9333:9333 + - 19333:19333 + command: "master -ip=master" volume: image: chrislusf/seaweedfs # use a remote image ports: - - 8080:8080 - - 18080:18080 + - 8080:8080 + - 18080:18080 command: 'volume -max=15 -mserver="master:9333" -port=8080' depends_on: - - master + - master filer: image: chrislusf/seaweedfs # use a remote image ports: - - 8888:8888 - - 18888:18888 + - 8888:8888 + - 18888:18888 command: 'filer -master="master:9333"' tty: true stdin_open: true depends_on: - - master - - volume + - master + - volume cronjob: image: chrislusf/seaweedfs # use a remote image command: 'cronjob' @@ -34,14 +34,14 @@ services: CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *' WEED_MASTER: master:9333 # Default: localhost:9333 depends_on: - - master - - volume + - master + - volume s3: image: chrislusf/seaweedfs # use a remote image ports: - - 8333:8333 + - 8333:8333 command: 's3 -filer="filer:8888"' depends_on: - - master - - volume - - filer + - master + - volume + - filer diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml new file mode 100644 index 000000000..197510a9f --- /dev/null +++ b/docker/seaweedfs-dev-compose.yml @@ -0,0 +1,35 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8080:8080 + - 18080:18080 + command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8888:8888 + - 18888:18888 + command: '-v=4 filer -master="master:9333"' + depends_on: + - master + - volume + s3: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8333:8333 + command: '-v=4 s3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer diff --git a/go.mod b/go.mod index 24948591a..78803f70e 100644 --- a/go.mod +++ b/go.mod @@ -4,21 +4,10 @@ go 1.12 require ( cloud.google.com/go v0.44.3 - contrib.go.opencensus.io/exporter/aws v0.0.0-20190807220307-c50fb1bd7f21 // indirect - contrib.go.opencensus.io/exporter/ocagent v0.6.0 // indirect - contrib.go.opencensus.io/exporter/stackdriver v0.12.5 // indirect - contrib.go.opencensus.io/resource v0.1.2 // indirect - github.com/Azure/azure-amqp-common-go v1.1.4 // indirect github.com/Azure/azure-pipeline-go v0.2.2 // indirect - github.com/Azure/azure-sdk-for-go v33.0.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.8.0 - github.com/Azure/go-autorest v13.0.0+incompatible // indirect - github.com/Azure/go-autorest/tracing v0.5.0 // indirect github.com/DataDog/zstd v1.4.1 // indirect - github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190828224159-d93c53a4824c // indirect github.com/Shopify/sarama v1.23.1 - github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect - github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 // indirect github.com/aws/aws-sdk-go v1.23.13 github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 github.com/coreos/etcd v3.3.15+incompatible // indirect @@ -28,38 +17,34 @@ require ( github.com/disintegration/imaging v1.6.1 github.com/dustin/go-humanize v1.0.0 github.com/eapache/go-resiliency v1.2.0 // indirect - github.com/gabriel-vasile/mimetype v0.3.17 - github.com/go-kit/kit v0.9.0 // indirect + github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a + github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 + github.com/frankban/quicktest v1.7.2 // indirect + github.com/gabriel-vasile/mimetype v1.0.0 github.com/go-redis/redis v6.15.2+incompatible github.com/go-sql-driver/mysql v1.4.1 github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect github.com/golang/protobuf v1.3.2 github.com/google/btree v1.0.0 - github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.1 // indirect github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect + github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f github.com/jcmturner/gofork v1.0.0 // indirect - github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9 // indirect github.com/karlseguin/ccache v2.0.3+incompatible github.com/karlseguin/expect v1.0.1 // indirect github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/crc32 v1.2.0 github.com/klauspost/reedsolomon v1.9.2 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/kr/pty v1.1.8 // indirect github.com/kurin/blazer v0.5.3 github.com/lib/pq v1.2.0 github.com/magiconair/properties v1.8.1 // indirect github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect - github.com/mattn/go-isatty v0.0.9 // indirect github.com/mattn/go-runewidth v0.0.4 // indirect - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/nats-io/gnatsd v1.4.1 // indirect - github.com/nats-io/go-nats v1.7.2 // indirect github.com/nats-io/nats-server/v2 v2.0.4 // indirect github.com/onsi/ginkgo v1.10.1 // indirect github.com/onsi/gomega v1.7.0 // indirect @@ -76,10 +61,7 @@ require ( github.com/rakyll/statik v0.1.6 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect - github.com/rogpeppe/fastuuid v1.2.0 // indirect - github.com/rogpeppe/go-internal v1.3.1 // indirect github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd - github.com/satori/go.uuid v1.2.0 github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff github.com/sirupsen/logrus v1.4.2 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -87,26 +69,20 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.4.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect - github.com/stretchr/testify v1.4.0 // indirect github.com/syndtr/goleveldb v1.0.0 github.com/tidwall/gjson v1.3.2 github.com/tidwall/match v1.0.1 - github.com/twinj/uuid v1.0.0 // indirect github.com/uber-go/atomic v1.4.0 // indirect github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect github.com/uber/jaeger-lib v2.0.0+incompatible // indirect - github.com/ugorji/go v1.1.7 // indirect github.com/willf/bitset v1.1.10 // indirect github.com/willf/bloom v2.0.3+incompatible github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect go.etcd.io/etcd v3.3.15+incompatible - go.mongodb.org/mongo-driver v1.1.0 // indirect gocloud.dev v0.16.0 gocloud.dev/pubsub/natspubsub v0.16.0 gocloud.dev/pubsub/rabbitpubsub v0.16.0 - golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 // indirect golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect - golang.org/x/mobile v0.0.0-20190830201351-c6da95954960 // indirect golang.org/x/net v0.0.0-20190909003024-a7b16738d86b golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 @@ -116,8 +92,7 @@ require ( gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect - honnef.co/go/tools v0.0.1-2019.2.2 // indirect - pack.ag/amqp v0.12.1 // indirect + sigs.k8s.io/yaml v1.1.0 // indirect ) replace github.com/satori/go.uuid v1.2.0 => github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b diff --git a/go.sum b/go.sum index 043cdb00d..d16280568 100644 --- a/go.sum +++ b/go.sum @@ -1,29 +1,17 @@ -bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= -cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ= cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= -contrib.go.opencensus.io/exporter/aws v0.0.0-20190807220307-c50fb1bd7f21/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= -contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= -contrib.go.opencensus.io/exporter/stackdriver v0.11.0/go.mod h1:hA7rlmtavV03FGxzWXAPBUnZeZBhWN/QYQAuMtxc9Bk= contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= -contrib.go.opencensus.io/exporter/stackdriver v0.12.5/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= -contrib.go.opencensus.io/resource v0.0.0-20190131005048-21591786a5e0/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= -contrib.go.opencensus.io/resource v0.1.2/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= -github.com/Azure/azure-amqp-common-go v1.1.3/go.mod h1:FhZtXirFANw40UXI2ntweO+VOkfaw8s6vZxUiRhLYW8= -github.com/Azure/azure-amqp-common-go v1.1.4/go.mod h1:FhZtXirFANw40UXI2ntweO+VOkfaw8s6vZxUiRhLYW8= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= @@ -31,26 +19,14 @@ github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZ github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v27.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v33.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-service-bus-go v0.4.1/go.mod h1:d9ho9e/06euiTwGpKxmlbpPhFUsfCsq6a4tZ68r51qI= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= -github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/go-autorest v11.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.1.2+incompatible h1:viZ3tV5l4gE2Sw0xrasFHytCGtzYCrT+um/rrSQ1BfA= -github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ= github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -58,12 +34,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58 github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190418212003-6ac0b49e7197/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= -github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190828224159-d93c53a4824c/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs= github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= @@ -71,19 +43,11 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY= github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.18.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.19.16/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.21.4 h1:1xB+x6Dzev8ETmeHEiSfUVbIzmC/0EyFfXMkJpzKPCE= -github.com/aws/aws-sdk-go v1.21.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -97,20 +61,20 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCS github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo= -github.com/chrislusf/seaweedfs v0.0.0-20190912032620-ae53f636804e h1:PmqW1XGq0V6KnwOFa3hOSqsqa/bH66zxWzCVMOo5Yi4= -github.com/chrislusf/seaweedfs v0.0.0-20190912032620-ae53f636804e/go.mod h1:e5Pz27e2DxLCFt6GbCBP5/qJygD4TkOL5xqSFYFq+2U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -130,9 +94,9 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE= github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -143,10 +107,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/disintegration/imaging v1.6.0 h1:nVPXRUUQ36Z7MNf0O77UzgnOb1mkMMor7lmJMJXc/mA= -github.com/disintegration/imaging v1.6.0/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE= github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -161,27 +122,28 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.8.6/go.mod h1:XB9+ce7x+IrsjgIVnRnql0O61gj/np0/bGDfhJI3sCU= -github.com/envoyproxy/protoc-gen-validate v0.0.0-20190405222122-d6164de49109/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M= +github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gabriel-vasile/mimetype v0.3.15 h1:qSK8E/VAF4pxtkxqarYRAVvYNDyCFJXKAYAyGNcESII= -github.com/gabriel-vasile/mimetype v0.3.15/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI= github.com/gabriel-vasile/mimetype v0.3.17 h1:NGWgggJJqTofUcTV1E7hkk2zVjZ54EfJa1z5O3z6By4= github.com/gabriel-vasile/mimetype v0.3.17/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI= +github.com/gabriel-vasile/mimetype v1.0.0 h1:0QKnAQQhG6oOsb4GK7iPlet7RtjHi9us8RF/nXoTxhI= +github.com/gabriel-vasile/mimetype v1.0.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.46.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= @@ -193,12 +155,8 @@ github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gocql/gocql v0.0.0-20190717234527-2ba2dd7440dc h1:m9VsbhR3h7mWKHLh5a+Q8LvBdWEjA6dgY1arxhxvQrU= -github.com/gocql/gocql v0.0.0-20190717234527-2ba2dd7440dc/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8= github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A= github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -213,6 +171,7 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -233,8 +192,11 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic= github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= +github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk= github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -242,15 +204,11 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:x github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/wire v0.2.2 h1:fSIRzE/K12IaNgV6X0173X/oLrTwHKRiMcFZhiDrN3s= -github.com/google/wire v0.2.2/go.mod h1:7FHVg6mFpFQrjeUZrm+BaD50N5jnDKm50uVPTpyYOmU= github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= @@ -268,6 +226,7 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -281,7 +240,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.11.0 h1:aT5ISUniaOTErogCQ+4pGoYNBB6rm6Fq3g1v8QwYGas= github.com/grpc-ecosystem/grpc-gateway v1.11.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= @@ -306,7 +265,6 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/joeslay/seaweedfs v0.0.0-20190912104409-d8c34b032fb6/go.mod h1:ljVry+CyFSNBLlKiell2UlxOKCvXXHjyBhiGDzXa+0c= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -314,8 +272,7 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9 h1:hJix6idebFclqlfZCHE7EUX7uqLCyb70nHNHH1XKGBg= -github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU= @@ -341,17 +298,13 @@ github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kurin/blazer v0.5.3 h1:SAgYv0TKU0kN/ETfO5ExjNAPyMt2FocO2s/UlCHfjAk= github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/lyft/protoc-gen-validate v0.1.0/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= @@ -365,7 +318,6 @@ github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrs github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -385,16 +337,14 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/gnatsd v1.4.1 h1:RconcfDeWpKCD6QIIwiVFcvForlXpWeJP7i5/lDLy44= -github.com/nats-io/gnatsd v1.4.1/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ= -github.com/nats-io/go-nats v1.7.2 h1:cJujlwCYR8iMz5ofZSD/p2WLW8FabhkQ2lIEVbSvNSA= -github.com/nats-io/go-nats v1.7.2/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY= +github.com/nats-io/jwt v0.2.14 h1:wA50KvFz/JXGXMHRygTWsRGh/ixxgC5E3kHvmtGLNf4= github.com/nats-io/jwt v0.2.14/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/nats-server/v2 v2.0.0/go.mod h1:RyVdsHHvY4B6c9pWG+uRLpZ0h0XsqiuKp2XCTurP5LI= +github.com/nats-io/nats-server/v2 v2.0.4 h1:XOMeQRbhl1lGNTIctPhih6pTa15NGif54Uas6ZW5q7g= github.com/nats-io/nats-server/v2 v2.0.4/go.mod h1:AWdGEVbjKRS9ZIx4DSP5eKW48nfFm7q3uiSkP/1KD7M= github.com/nats-io/nats.go v1.8.1 h1:6lF/f1/NN6kzUDBz6pyvQDEXO39jqXcWRLu/tKjtOUQ= github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= @@ -404,7 +354,9 @@ github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= +github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -412,17 +364,18 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= @@ -431,12 +384,11 @@ github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUr github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os= github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc= github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= +github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -456,11 +408,8 @@ github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100 h1:TRyps2d+2TsJv1Vk github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 h1:GIEq+wZfrl2bcJxpuSrEH4H7/nlf5YdmpS+dU9lNIt8= github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0/go.mod h1:G/6rJpnYwM0LKMec2rI82/5Kg6GaZMvlfB+e6/tvYmI= -github.com/pingcap/pd v2.1.17+incompatible h1:mpfJYffRC14jeAfiq0jbHkqXVc8ZGNV0Lr2xG1sJslw= github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b h1:6GfcYOX9/CCxPnNOivVxiDYXbZrCHU1mRp691iw9EYs= github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b/go.mod h1:YfrHdQ613A+E2FSugyXOdJmeZQbXNjpXX2doNe8MGj8= -github.com/pingcap/tidb v2.0.11+incompatible h1:Shz+ry1DzQNsPk1QAejnM+5tgjbwZuzPnIER5aCjQ6c= -github.com/pingcap/tidb v2.0.11+incompatible/go.mod h1:I8C6jrPINP2rrVunTRd7C9fRRhQrtR43S1/CL5ix/yQ= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20191015023537-709b39e7f8bb/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= @@ -470,13 +419,11 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -485,7 +432,6 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= @@ -493,7 +439,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -501,7 +446,6 @@ github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkp github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -509,7 +453,6 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78= github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs= github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= @@ -520,13 +463,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qq github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b h1:8O/3dJ2dGfuLVN0bo2B0IdkG0L8cjpmFJ4r8eRQBCi8= -github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg= github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -567,10 +505,12 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= @@ -581,16 +521,14 @@ github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI= github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/twinj/uuid v1.0.0 h1:fzz7COZnDrXGTAOHGuUGYd6sG+JMq+AoE7+Jlu0przk= -github.com/twinj/uuid v1.0.0/go.mod h1:mMgcE1RHFUFqe5AfiwlINXisXfDGro23fWdPUfOMjRY= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.17.0+incompatible h1:35tpDuT3k0oBiN/aGoSWuiFaqKgKZSciSMnWrazhSHE= @@ -601,10 +539,9 @@ github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= +github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= @@ -622,17 +559,12 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= -go.etcd.io/etcd v3.3.13+incompatible h1:jCejD5EMnlGxFvcGRyEV4VGlENZc7oPQX6o0t7n3xbw= -go.etcd.io/etcd v3.3.13+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ= go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= -go.mongodb.org/mongo-driver v1.0.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -646,44 +578,29 @@ go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -gocloud.dev v0.15.0 h1:Tl8dkOHWVZiYBYPxG2ouhpfmluoQGt3mY323DaAHaC8= -gocloud.dev v0.15.0/go.mod h1:ShXCyJaGrJu9y/7a6+DSCyBb9MFGZ1P5wwPa0Wu6w34= gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM= gocloud.dev v0.16.0/go.mod h1:xWGXD8t7bEhqPIuyAUFyXV9qHn+PvEY2F2GKPh7i/O0= -gocloud.dev/pubsub/natspubsub v0.15.0 h1:JarkPUp9xX9+A1v7VgZeY72bATZIQUzkyP1ANJ+bwU4= -gocloud.dev/pubsub/natspubsub v0.15.0/go.mod h1:zgjFYbmxa3Tiqlfp9BnZBULo+/lpK8vZPZ3YMG2MrkI= gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPGaGEI= gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI= -gocloud.dev/pubsub/rabbitpubsub v0.15.0 h1:Kl+NAY6nt1bUYZXQIbtCr/seoivwhGo7uc0L9XmOA+g= -gocloud.dev/pubsub/rabbitpubsub v0.15.0/go.mod h1:LGg5Acwcpry+GeLNaA01xm0Ij43YUis6kht2qRX2tg0= gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E= gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= -golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190829233526-b3c06291d021 h1:j6QOxNFMpEL1wIQX6TUdBPNfGZKmBOJS/vfSm8a7tdM= golang.org/x/image v0.0.0-20190829233526-b3c06291d021/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -692,26 +609,16 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20190830201351-c6da95954960/go.mod h1:mJOp/i0LXPxJZ9weeIadcPqKVfS05Ai7m6/t9z1Hs/Y= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190322120337-addf6b3196f6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -719,17 +626,10 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -744,7 +644,6 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -753,24 +652,17 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190830142957-1e83adbbebd0 h1:7z820YPX9pxWR59qM7BE5+fglp4D/mKqAwCvGt11b+8= -golang.org/x/sys v0.0.0-20190830142957-1e83adbbebd0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI= golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -778,7 +670,6 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -791,22 +682,12 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190724185037-8aa4eac1a7c1 h1:JwHzEZwWOyWUIR+OxPKGQGUfuOp/feyTesu6DEwqvsM= -golang.org/x/tools v0.0.0-20190724185037-8aa4eac1a7c1/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190830223141-573d9926052a h1:XAHT1kdPpnU8Hk+FPi42KZFhtNFEk4vBg1U4OmIeHTU= -golang.org/x/tools v0.0.0-20190830223141-573d9926052a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 h1:6S6bidS7O4yAwA5ORRbRIjvNQ9tGbLd5e+LRIaTeVDQ= golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373 h1:PPwnA7z1Pjf7XYaBP9GL1VAMZmcIWyFz7QCMSIIa3Bg= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= @@ -820,37 +701,27 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI= google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -861,9 +732,9 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -892,18 +763,14 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -istio.io/gogo-genproto v0.0.0-20190731221249-06e20ada0df2/go.mod h1:IjvrbUlRbbw4JCpsgvgihcz9USUwEoNTL/uwMtyV5yk= -istio.io/gogo-genproto v0.0.0-20190826122855-47f00599b597/go.mod h1:uKtbae4K9k2rjjX4ToV0l6etglbc1i7gqQ94XdkshzY= -pack.ag/amqp v0.8.0/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= -pack.ag/amqp v0.11.0/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= -pack.ag/amqp v0.12.1/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ= sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 000000000..5ec3ab407 --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,23 @@ +## SEAWEEDFS - helm chart (2.x) + +### info: +* master/filer/volume are stateful sets with anti-affinity on the hostname, +so your deployment will be spread/HA. +* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances) +and backup/HA memsql can provide. +* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer +with ENV. +* cert config exists and can be enabled, but not been tested. + +### current instances config (AIO): +1 instance for each type (master/filer/volume/s3) + +instances need node labels: +* sw-volume: true (for volume instance, specific tag) +* sw-backend: true (for all others, as they less resource demanding) + +you can update the replicas count for each node type in values.yaml, +need to add more nodes with the corresponding label. + +most of the configuration are available through values.yaml + diff --git a/k8s/seaweedfs/.helmignore b/k8s/seaweedfs/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/k8s/seaweedfs/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml new file mode 100644 index 000000000..136d91e20 --- /dev/null +++ b/k8s/seaweedfs/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +description: SeaweedFS +name: seaweedfs +version: 1.61 diff --git a/k8s/seaweedfs/templates/_helpers.tpl b/k8s/seaweedfs/templates/_helpers.tpl new file mode 100644 index 000000000..04a782f8b --- /dev/null +++ b/k8s/seaweedfs/templates/_helpers.tpl @@ -0,0 +1,114 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to +this (by the DNS naming spec). If release name contains chart name it will +be used as a full name. +*/}} +{{- define "seaweedfs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "seaweedfs.chart" -}} +{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "seaweedfs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Inject extra environment vars in the format key:value, if populated +*/}} +{{- define "seaweedfs.extraEnvironmentVars" -}} +{{- if .extraEnvironmentVars -}} +{{- range $key, $value := .extraEnvironmentVars }} +- name: {{ $key }} + value: {{ $value | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper filer image */}} +{{- define "filer.image" -}} +{{- if .Values.filer.imageOverride -}} +{{- $imageOverride := .Values.filer.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper postgresqlSchema image */}} +{{- define "filer.dbSchema.image" -}} +{{- if .Values.filer.dbSchema.imageOverride -}} +{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.global.repository | toString -}} +{{- $name := .Values.filer.dbSchema.imageName | toString -}} +{{- $tag := .Values.filer.dbSchema.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper master image */}} +{{- define "master.image" -}} +{{- if .Values.master.imageOverride -}} +{{- $imageOverride := .Values.master.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper s3 image */}} +{{- define "s3.image" -}} +{{- if .Values.s3.imageOverride -}} +{{- $imageOverride := .Values.s3.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper volume image */}} +{{- define "volume.image" -}} +{{- if .Values.volume.imageOverride -}} +{{- $imageOverride := .Values.volume.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/ca-cert.yaml b/k8s/seaweedfs/templates/ca-cert.yaml new file mode 100644 index 000000000..056f01502 --- /dev/null +++ b/k8s/seaweedfs/templates/ca-cert.yaml @@ -0,0 +1,14 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-ca-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + commonName: "{{ template "seaweedfs.name" . }}-root-ca" + isCA: true + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer +{{- end }} diff --git a/k8s/seaweedfs/templates/cert-clusterissuer.yaml b/k8s/seaweedfs/templates/cert-clusterissuer.yaml new file mode 100644 index 000000000..d0bd42593 --- /dev/null +++ b/k8s/seaweedfs/templates/cert-clusterissuer.yaml @@ -0,0 +1,8 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: ClusterIssuer +metadata: + name: {{ template "seaweedfs.name" . }}-clusterissuer +spec: + selfSigned: {} +{{- end }} diff --git a/k8s/seaweedfs/templates/client-cert.yaml b/k8s/seaweedfs/templates/client-cert.yaml new file mode 100644 index 000000000..4d27b5659 --- /dev/null +++ b/k8s/seaweedfs/templates/client-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-client-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-client-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/filer-cert.yaml b/k8s/seaweedfs/templates/filer-cert.yaml new file mode 100644 index 000000000..855183c54 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-filer-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/filer-service.yaml b/k8s/seaweedfs/templates/filer-service.yaml new file mode 100644 index 000000000..493859e36 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: filer +spec: + clusterIP: None + ports: + - name: "swfs-filer" + port: {{ .Values.filer.port }} + targetPort: {{ .Values.filer.port }} + protocol: TCP + - name: "swfs-filer-grpc" + port: {{ .Values.filer.grpcPort }} + targetPort: {{ .Values.filer.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: filer \ No newline at end of file diff --git a/k8s/seaweedfs/templates/filer-statefulset.yaml b/k8s/seaweedfs/templates/filer-statefulset.yaml new file mode 100644 index 000000000..43da74c43 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-statefulset.yaml @@ -0,0 +1,207 @@ +{{- if .Values.filer.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-filer + podManagementPolicy: Parallel + replicas: {{ .Values.filer.replicas }} + {{- if (gt (int .Values.filer.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.filer.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }} + {{- if .Values.filer.affinity }} + affinity: + {{ tpl .Values.filer.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.filer.tolerations }} + tolerations: + {{ tpl .Values.filer.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration + terminationGracePeriodSeconds: 60 + {{- if .Values.filer.priorityClassName }} + priorityClassName: {{ .Values.filer.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "filer.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: WEED_MYSQL_USERNAME + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: user + - name: WEED_MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: password + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.filer.extraEnvironmentVars }} + {{- range $key, $value := .Values.filer.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.filer.loggingOverrideLevel }} + -v={{ .Values.filer.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + filer \ + -port={{ .Values.filer.port }} \ + {{- if .Values.filer.disableHttp }} + -disableHttp \ + {{- end }} + {{- if .Values.filer.disableDirListing }} + -disableDirListing \ + {{- end }} + -dirListLimit={{ .Values.filer.dirListLimit }} \ + -ip=${POD_IP} \ + -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + {{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }} + volumeMounts: + - name: seaweedfs-filer-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }} + {{- end }} + ports: + - containerPort: {{ .Values.filer.port }} + name: swfs-filer + - containerPort: {{ .Values.filer.grpcPort }} + #name: swfs-filer-grpc + readinessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 5 + {{- if .Values.filer.resources }} + resources: + {{ tpl .Values.filer.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-filer-log-volume + hostPath: + path: /storage/logs/seaweedfs/filer + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.filer.extraVolumes . | indent 8 | trim }} + {{- if .Values.filer.nodeSelector }} + nodeSelector: + {{ tpl .Values.filer.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.filer.storage }}*/}} +{{/* {{- if .Values.filer.storageClass }}*/}} +{{/* storageClassName: {{ .Values.filer.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/seaweedfs/templates/ingress.yaml b/k8s/seaweedfs/templates/ingress.yaml new file mode 100644 index 000000000..dcd52c138 --- /dev/null +++ b/k8s/seaweedfs/templates/ingress.yaml @@ -0,0 +1,59 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-filer + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; +spec: + rules: + - http: + paths: + - path: /sw-filer/?(.*) + backend: + serviceName: {{ template "seaweedfs.name" . }}-filer + servicePort: {{ .Values.filer.port }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-master + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; +spec: + rules: + - http: + paths: + - path: /sw-master/?(.*) + backend: + serviceName: {{ template "seaweedfs.name" . }}-master + servicePort: {{ .Values.master.port }} diff --git a/k8s/seaweedfs/templates/master-cert.yaml b/k8s/seaweedfs/templates/master-cert.yaml new file mode 100644 index 000000000..a8b0fc1d1 --- /dev/null +++ b/k8s/seaweedfs/templates/master-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-master-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-master-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/master-service.yaml b/k8s/seaweedfs/templates/master-service.yaml new file mode 100644 index 000000000..f7603bd91 --- /dev/null +++ b/k8s/seaweedfs/templates/master-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: master + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + ports: + - name: "swfs-master" + port: {{ .Values.master.port }} + targetPort: {{ .Values.master.port }} + protocol: TCP + - name: "swfs-master-grpc" + port: {{ .Values.master.grpcPort }} + targetPort: {{ .Values.master.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: master \ No newline at end of file diff --git a/k8s/seaweedfs/templates/master-statefulset.yaml b/k8s/seaweedfs/templates/master-statefulset.yaml new file mode 100644 index 000000000..87050534f --- /dev/null +++ b/k8s/seaweedfs/templates/master-statefulset.yaml @@ -0,0 +1,199 @@ +{{- if .Values.master.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-master + podManagementPolicy: Parallel + replicas: {{ .Values.master.replicas }} + {{- if (gt (int .Values.master.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.master.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }} + {{- if .Values.master.affinity }} + affinity: + {{ tpl .Values.master.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: + {{ tpl .Values.master.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "master.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.master.loggingOverrideLevel }} + -v={{ .Values.master.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + master \ + -port={{ .Values.master.port }} \ + -mdir=/data \ + -ip.bind={{ .Values.master.ipBind }} \ + {{- if .Values.master.volumePreallocate }} + -volumePreallocate \ + {{- end }} + {{- if .Values.global.monitoring.enabled }} + -metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \ + {{- end }} + -volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \ + {{- if .Values.master.disableHttp }} + -disableHttp \ + {{- end }} + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \ + -peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name : data-{{ .Release.Namespace }} + mountPath: /data + - name: seaweedfs-master-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.master.port }} + name: swfs-master + - containerPort: {{ .Values.master.grpcPort }} + #name: swfs-master-grpc + readinessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 15 + successThreshold: 2 + failureThreshold: 100 + livenessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 6 + {{- if .Values.master.resources }} + resources: + {{ tpl .Values.master.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-master-log-volume + hostPath: + path: /storage/logs/seaweedfs/master + type: DirectoryOrCreate + - name: data-{{ .Release.Namespace }} + hostPath: + path: /ssd/seaweed-master/ + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.master.extraVolumes . | indent 8 | trim }} + {{- if .Values.master.nodeSelector }} + nodeSelector: + {{ tpl .Values.master.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.master.storage }}*/}} +{{/* {{- if .Values.master.storageClass }}*/}} +{{/* storageClassName: {{ .Values.master.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/seaweedfs/templates/s3-deployment.yaml b/k8s/seaweedfs/templates/s3-deployment.yaml new file mode 100644 index 000000000..1bb3283f1 --- /dev/null +++ b/k8s/seaweedfs/templates/s3-deployment.yaml @@ -0,0 +1,158 @@ +{{- if .Values.s3.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-s3 + replicas: {{ .Values.s3.replicas }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }} + {{- if .Values.s3.tolerations }} + tolerations: + {{ tpl .Values.s3.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.s3.priorityClassName }} + priorityClassName: {{ .Values.s3.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "s3.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed \ + {{- if .Values.s3.loggingOverrideLevel }} + -v={{ .Values.s3.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + s3 \ + -port={{ .Values.s3.port }} \ + {{- if .Values.global.enableSecurity }} + -cert.file=/usr/local/share/ca-certificates/client/tls.crt \ + -key.file=/usr/local/share/ca-certificates/client/tls.key \ + {{- end }} + {{- if .Values.s3.domainName }} + -domainName={{ .Values.s3.domainName }} \ + {{- end }} + -filer={{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.port }} + {{- if or (.Values.global.enableSecurity) (.Values.s3.extraVolumeMounts) }} + volumeMounts: + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }} + {{- end }} + ports: + - containerPort: {{ .Values.s3.port }} + name: swfs-s3 + readinessProbe: + httpGet: + path: / + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: / + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 60 + successThreshold: 1 + failureThreshold: 20 + {{- if .Values.s3.resources }} + resources: + {{ tpl .Values.s3.resources . | nindent 12 | trim }} + {{- end }} + volumes: + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.s3.extraVolumes . | indent 8 | trim }} + {{- if .Values.s3.nodeSelector }} + nodeSelector: + {{ tpl .Values.s3.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/k8s/seaweedfs/templates/s3-service.yaml b/k8s/seaweedfs/templates/s3-service.yaml new file mode 100644 index 000000000..b088e25fa --- /dev/null +++ b/k8s/seaweedfs/templates/s3-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: s3 +spec: + ports: + - name: "swfs-s3" + port: {{ .Values.s3.port }} + targetPort: {{ .Values.s3.port }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: s3 \ No newline at end of file diff --git a/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml b/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml new file mode 100644 index 000000000..c943ea50f --- /dev/null +++ b/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml @@ -0,0 +1,1352 @@ +{{- if .Values.global.monitoring.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: seaweefsfs-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + seaweedfs.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "Prometheus", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": 10423, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": "30s", + "rows": [ + { + "collapse": false, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 49, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 45, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_filer_request_total[1m]) * 5", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 252, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 47, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le, exported_instance))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 40, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_volumeServer_request_total[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_volumes) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{collection}}`}} {{`{{type}}`}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_max_volumes)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 50, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{collection}}`}} {{`{{type}}`}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Collection and Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 51, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (exported_instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Host", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Volume Server", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_filerStore_request_total [1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Store", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 242, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 52, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_memstats_alloc_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "bytes allocated", + "refId": "B" + }, + { + "expr": "rate(go_memstats_alloc_bytes_total{exported_job=\"filer\"}[30s])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "alloc rate", + "refId": "A" + }, + { + "expr": "go_memstats_stack_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "stack inuse", + "refId": "C" + }, + { + "expr": "go_memstats_heap_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "heap inuse", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Memory Stats", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_gc_duration_seconds{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{quantile}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go GC duration quantiles", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Routines", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Instances", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "SeaweedFS", + "version": 3 + } +{{- end }} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml new file mode 100644 index 000000000..c6132c9ea --- /dev/null +++ b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: secret-seaweedfs-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/resource-policy": keep + "helm.sh/hook": "pre-install" +stringData: + user: "YourSWUser" + password: "HardCodedPassword" + # better to random generate and create in DB + # password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }} diff --git a/k8s/seaweedfs/templates/security-configmap.yaml b/k8s/seaweedfs/templates/security-configmap.yaml new file mode 100644 index 000000000..7d06614ec --- /dev/null +++ b/k8s/seaweedfs/templates/security-configmap.yaml @@ -0,0 +1,52 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "seaweedfs.name" . }}-security-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + security.toml: |- + # this file is read by master, volume server, and filer + + # the jwt signing key is read by master and volume server + # a jwt expires in 10 seconds + [jwt.signing] + key = "{{ randAlphaNum 10 | b64enc }}" + + # all grpc tls authentications are mutual + # the values for the following ca, cert, and key are paths to the PERM files. + [grpc] + ca = "/usr/local/share/ca-certificates/ca/tls.crt" + + [grpc.volume] + cert = "/usr/local/share/ca-certificates/volume/tls.crt" + key = "/usr/local/share/ca-certificates/volume/tls.key" + + [grpc.master] + cert = "/usr/local/share/ca-certificates/master/tls.crt" + key = "/usr/local/share/ca-certificates/master/tls.key" + + [grpc.filer] + cert = "/usr/local/share/ca-certificates/filer/tls.crt" + key = "/usr/local/share/ca-certificates/filer/tls.key" + + # use this for any place needs a grpc client + # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" + [grpc.client] + cert = "/usr/local/share/ca-certificates/client/tls.crt" + key = "/usr/local/share/ca-certificates/client/tls.key" + + # volume server https options + # Note: work in progress! + # this does not work with other clients, e.g., "weed filer|mount" etc, yet. + [https.client] + enabled = false + [https.volume] + cert = "" + key = "" +{{- end }} diff --git a/k8s/seaweedfs/templates/service-account.yaml b/k8s/seaweedfs/templates/service-account.yaml new file mode 100644 index 000000000..e82ef7d62 --- /dev/null +++ b/k8s/seaweedfs/templates/service-account.yaml @@ -0,0 +1,29 @@ +#hack for delete pod master after migration +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: seaweefds-rw-cr +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: seaweefds-rw-sa + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: system:serviceaccount:seaweefds-rw-sa:default +subjects: +- kind: ServiceAccount + name: seaweefds-rw-sa + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: seaweefds-rw-cr \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-cert.yaml b/k8s/seaweedfs/templates/volume-cert.yaml new file mode 100644 index 000000000..72c62a0f5 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-volume-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/volume-service.yaml b/k8s/seaweedfs/templates/volume-service.yaml new file mode 100644 index 000000000..fc7716681 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: volume +spec: + clusterIP: None + ports: + - name: "swfs-volume" + port: {{ .Values.volume.port }} + targetPort: {{ .Values.volume.port }} + protocol: TCP + - name: "swfs-volume-18080" + port: {{ .Values.volume.grpcPort }} + targetPort: {{ .Values.volume.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: volume \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-statefulset.yaml b/k8s/seaweedfs/templates/volume-statefulset.yaml new file mode 100644 index 000000000..9c6ddcd9f --- /dev/null +++ b/k8s/seaweedfs/templates/volume-statefulset.yaml @@ -0,0 +1,187 @@ +{{- if .Values.volume.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-volume + replicas: {{ .Values.volume.replicas }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + spec: + {{- if .Values.volume.affinity }} + affinity: + {{ tpl .Values.volume.affinity . | nindent 8 | trim }} + {{- end }} + restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }} + {{- if .Values.volume.tolerations }} + tolerations: + {{ tpl .Values.volume.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.volume.priorityClassName }} + priorityClassName: {{ .Values.volume.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "volume.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.volume.loggingOverrideLevel }} + -v={{ .Values.volume.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + volume \ + -port={{ .Values.volume.port }} \ + -dir={{ .Values.volume.dir }} \ + -max={{ .Values.volume.maxVolumes }} \ + {{- if .Values.volume.rack }} + -rack={{ .Values.volume.rack }} \ + {{- end }} + {{- if .Values.volume.dataCenter }} + -dataCenter={{ .Values.volume.dataCenter }} \ + {{- end }} + -ip.bind={{ .Values.volume.ipBind }} \ + -read.redirect={{ .Values.volume.readRedirect }} \ + {{- if .Values.volume.whiteList }} + -whiteList={{ .Values.volume.whiteList }} \ + {{- end }} + {{- if .Values.volume.imagesFixOrientation }} + -images.fix.orientation \ + {{- end }} + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \ + -compactionMBps={{ .Values.volume.compactionMBps }} \ + -mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name: seaweedfs-volume-storage + mountPath: "/data/" + - name: seaweedfs-volume-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.volume.port }} + name: swfs-vol + - containerPort: {{ .Values.volume.grpcPort }} + #name: swfs-vol-grpc + readinessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 10 + {{- if .Values.volume.resources }} + resources: + {{ tpl .Values.volume.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-volume-log-volume + hostPath: + path: /storage/logs/seaweedfs/volume + type: DirectoryOrCreate + - name: seaweedfs-volume-storage + hostPath: + path: /storage/object_store/ + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{- if .Values.volume.extraVolumes }} + {{ tpl .Values.volume.extraVolumes . | indent 8 | trim }} + {{- end }} + {{- if .Values.volume.nodeSelector }} + nodeSelector: + {{ tpl .Values.volume.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml new file mode 100644 index 000000000..d3e030a08 --- /dev/null +++ b/k8s/seaweedfs/values.yaml @@ -0,0 +1,308 @@ +# Available parameters and their default values for the SeaweedFS chart. + +global: + registry: "" + repository: "" + imageName: chrislusf/seaweedfs + imageTag: "1.61" + imagePullPolicy: IfNotPresent + imagePullSecrets: imagepullsecret + restartPolicy: Always + loggingLevel: 1 + enableSecurity: false + monitoring: + enabled: false + gatewayHost: null + gatewayPort: null + +image: + registry: "" + repository: "" + +master: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 9333 + grpcPort: 19333 + ipBind: "0.0.0.0" + volumePreallocate: false + volumeSizeLimitMB: 30000 + loggingOverrideLevel: null + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + extraVolumes: "" + extraVolumeMounts: "" + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + # Resource requests, limits, etc. for the master cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: master + topologyKey: kubernetes.io/hostname + + # Toleration Settings for master pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for master pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to master pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + +volume: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + port: 8080 + grpcPort: 18080 + ipBind: "0.0.0.0" + replicas: 1 + loggingOverrideLevel: null + + # limit background compaction or copying speed in mega bytes per second + compactionMBps: "40" + + # Directories to store data files. dir[,dir]... (default "/tmp") + dir: "/data" + + # Maximum numbers of volumes, count[,count]... (default "7") + maxVolumes: "10000" + + # Volume server's rack name + rack: null + + # Volume server's data center name + dataCenter: null + + # Redirect moved or non-local volumes. (default true) + readRedirect: true + + # Comma separated Ip addresses having write permission. No limit if empty. + whiteList: null + + # Adjust jpg orientation when uploading. + imagesFixOrientation: false + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: volume + topologyKey: kubernetes.io/hostname + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-volume: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + +filer: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 8888 + grpcPort: 18888 + loggingOverrideLevel: null + + # Limit sub dir listing size (default 100000) + dirListLimit: 100000 + + # Turn off directory listing + disableDirListing: false + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: filer + topologyKey: kubernetes.io/hostname + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + dbSchema: + imageName: db-schema + imageTag: "development" + imageOverride: "" + + # extraEnvVars is a list of extra enviroment variables to set with the stateful set. + extraEnvironmentVars: + WEED_MYSQL_ENABLED: "true" + WEED_MYSQL_HOSTNAME: "mysql-db-host" + WEED_MYSQL_PORT: "3306" + WEED_MYSQL_DATABASE: "sw_database" + WEED_MYSQL_CONNECTION_MAX_IDLE: "10" + WEED_MYSQL_CONNECTION_MAX_OPEN: "150" + # enable usage of memsql as filer backend + WEED_MYSQL_INTERPOLATEPARAMS: "true" + WEED_LEVELDB2_ENABLED: "false" + # with http DELETE, by default the filer would check whether a folder is empty. + # recursive_delete will delete all sub folders and files, similar to "rm -Rf" + WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false" + # directories under this folder will be automatically creating a separate bucket + WEED_FILER_BUCKETS_FOLDER: "/buckets" + # directories under this folder will be store message queue data + WEED_FILER_QUEUES_FOLDER: "/queues" + +s3: + enabled: true + repository: null + imageName: null + imageTag: null + restartPolicy: null + replicas: 1 + port: 8333 + loggingOverrideLevel: null + + # Suffix of the host name, {bucket}.{domainName} + domainName: "" + + extraVolumes: "" + extraVolumeMounts: "" + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + +certificates: + commonName: "SeaweedFS CA" + ipAddresses: [] + keyAlgorithm: rsa + keySize: 2048 + duration: 2160h # 90d + renewBefore: 360h # 15d diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 47742ab8d..0c585a941 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -4,7 +4,7 @@ com.github.chrislusf seaweedfs-client - 1.2.3 + 1.2.4 org.sonatype.oss diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java index a1e3cdb89..84aa26ad9 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java @@ -7,6 +7,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.List; public class FilerClient { @@ -173,17 +174,18 @@ public class FilerClient { } public List listEntries(String path, String entryPrefix, String lastEntryName, int limit) { - List entries = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() + Iterator iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() .setDirectory(path) .setPrefix(entryPrefix) .setStartFromFileName(lastEntryName) .setLimit(limit) - .build()).getEntriesList(); - List fixedEntries = new ArrayList<>(entries.size()); - for (FilerProto.Entry entry : entries) { - fixedEntries.add(fixEntryAfterReading(entry)); + .build()); + List entries = new ArrayList<>(); + while (iter.hasNext()){ + FilerProto.ListEntriesResponse resp = iter.next(); + entries.add(fixEntryAfterReading(resp.getEntry())); } - return fixedEntries; + return entries; } public FilerProto.Entry lookupEntry(String directory, String entryName) { diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java index 2efa64580..b08c14467 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java @@ -63,7 +63,7 @@ public class SeaweedRead { if (!chunkView.isFullChunk) { request.setHeader(HttpHeaders.ACCEPT_ENCODING, ""); request.setHeader(HttpHeaders.RANGE, - String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size)); + String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size - 1)); } try { diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 18ccca44f..8df46e917 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -12,7 +12,7 @@ service SeaweedFiler { rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) { } - rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) { + rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) { } rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) { @@ -24,6 +24,9 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc StreamDeleteEntries (stream DeleteEntryRequest) returns (stream DeleteEntryResponse) { + } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { } @@ -64,7 +67,7 @@ message ListEntriesRequest { } message ListEntriesResponse { - repeated Entry entries = 1; + Entry entry = 1; } message Entry { @@ -96,6 +99,8 @@ message FileChunk { string source_file_id = 6; // to be deprecated FileId fid = 7; FileId source_fid = 8; + bytes cipher_key = 9; + bool is_gzipped = 10; } message FileId { @@ -123,9 +128,11 @@ message FuseAttributes { message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { @@ -145,6 +152,7 @@ message DeleteEntryRequest { } message DeleteEntryResponse { + string error = 1; } message AtomicRenameEntryRequest { @@ -163,6 +171,7 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string parent_path = 6; } message AssignVolumeResponse { @@ -171,6 +180,9 @@ message AssignVolumeResponse { string public_url = 3; int32 count = 4; string auth = 5; + string collection = 6; + string replication = 7; + string error = 8; } message LookupVolumeRequest { @@ -217,4 +229,7 @@ message GetFilerConfigurationResponse { string replication = 2; string collection = 3; uint32 max_mb = 4; + string dir_buckets = 5; + string dir_queues = 6; + bool cipher = 7; } diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml index 3b964951e..d818bc878 100644 --- a/other/java/hdfs2/dependency-reduced-pom.xml +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -127,7 +127,7 @@ - 1.2.3 + 1.2.4 2.9.2 diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml index 7782ccbe2..b8c8cb891 100644 --- a/other/java/hdfs2/pom.xml +++ b/other/java/hdfs2/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.2.3 + 1.2.4 2.9.2 diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml index 6a12b1617..ca53ffd22 100644 --- a/other/java/hdfs3/dependency-reduced-pom.xml +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -127,7 +127,7 @@ - 1.2.3 + 1.2.4 3.1.1 diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml index 2af787767..f5207213c 100644 --- a/other/java/hdfs3/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.2.3 + 1.2.4 3.1.1 diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go index 04db7cfc1..afe651c4e 100644 --- a/unmaintained/change_superblock/change_superblock.go +++ b/unmaintained/change_superblock/change_superblock.go @@ -8,9 +8,9 @@ import ( "strconv" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -51,7 +51,7 @@ func main() { datBackend := backend.NewDiskFile(datFile) defer datBackend.Close() - superBlock, err := storage.ReadSuperBlock(datBackend) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { glog.Fatalf("cannot parse existing super block: %v", err) @@ -63,7 +63,7 @@ func main() { hasChange := false if *targetReplica != "" { - replica, err := storage.NewReplicaPlacementFromString(*targetReplica) + replica, err := super_block.NewReplicaPlacementFromString(*targetReplica) if err != nil { glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err) diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go index e88a48c96..d6110d870 100644 --- a/unmaintained/fix_dat/fix_dat.go +++ b/unmaintained/fix_dat/fix_dat.go @@ -9,9 +9,9 @@ import ( "strconv" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -59,7 +59,7 @@ func main() { } defer newDatFile.Close() - superBlock, err := storage.ReadSuperBlock(datBackend) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { glog.Fatalf("Read Volume Data superblock %v", err) } @@ -67,13 +67,13 @@ func main() { iterateEntries(datBackend, indexFile, func(n *needle.Needle, offset int64) { fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize) - _, s, _, e := n.Append(datBackend, superBlock.Version()) + _, s, _, e := n.Append(datBackend, superBlock.Version) fmt.Printf("size %d error %v\n", s, e) }) } -func iterateEntries(datBackend backend.DataStorageBackend, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) { +func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) { // start to read index file var readerOffset int64 bytes := make([]byte, 16) @@ -81,13 +81,13 @@ func iterateEntries(datBackend backend.DataStorageBackend, idxFile *os.File, vis readerOffset += int64(count) // start to read dat file - superBlock, err := storage.ReadSuperBlock(datBackend) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { fmt.Printf("cannot read dat file super block: %v", err) return } offset := int64(superBlock.BlockSize()) - version := superBlock.Version() + version := superBlock.Version n, _, rest, err := needle.ReadNeedleHeader(datBackend, version, offset) if err != nil { fmt.Printf("cannot read needle header: %v", err) diff --git a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go index a162d1757..84173a663 100644 --- a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go +++ b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go @@ -10,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -24,16 +25,16 @@ func Checksum(n *needle.Needle) string { type VolumeFileScanner4SeeDat struct { version needle.Version - block storage.SuperBlock + block super_block.SuperBlock dir string hashes map[string]bool dat *os.File - datBackend backend.DataStorageBackend + datBackend backend.BackendStorageFile } -func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version scanner.block = superBlock return nil diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 28bcabb9b..96d4ccdf6 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -1,51 +1,60 @@ package main import ( - "bytes" "flag" "fmt" "log" "math/rand" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) var ( - master = flag.String("master", "127.0.0.1:9333", "the master server") - repeat = flag.Int("n", 5, "repeat how many times") + master = flag.String("master", "127.0.0.1:9333", "the master server") + repeat = flag.Int("n", 5, "repeat how many times") + garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold") ) func main() { flag.Parse() util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + genFile(grpcDialOption, 0) for i := 0; i < *repeat; i++ { - assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) - if err != nil { - log.Fatalf("assign: %v", err) - } + // create 2 files, and delete one of them - data := make([]byte, 1024) - rand.Read(data) - reader := bytes.NewReader(data) + assignResult, targetUrl := genFile(grpcDialOption, i) - targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + util.Delete(targetUrl, string(assignResult.Auth)) - _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth) - if err != nil { - log.Fatalf("upload: %v", err) - } + println("vacuum", i, "threshold", *garbageThreshold) + util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold)) - util.Delete(targetUrl, string(assignResult.Auth)) + } - util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master)) +} +func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) { + assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) + if err != nil { + log.Fatalf("assign: %v", err) } + data := make([]byte, 1024) + rand.Read(data) + + targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + + _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth) + if err != nil { + log.Fatalf("upload: %v", err) + } + return assignResult, targetUrl } diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index 84a06c625..efc58e751 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -19,8 +20,8 @@ type VolumeFileScanner4SeeDat struct { version needle.Version } -func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go index f0ef51c09..3c2d36d22 100644 --- a/unmaintained/volume_tailer/volume_tailer.go +++ b/unmaintained/volume_tailer/volume_tailer.go @@ -25,7 +25,7 @@ func main() { flag.Parse() util2.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") vid := needle.VolumeId(*volumeId) diff --git a/weed/command/backup.go b/weed/command/backup.go index cef2bbe3a..eb2b5ba4a 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -5,8 +5,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/storage" @@ -64,7 +64,7 @@ var cmdBackup = &Command{ func runBackup(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if *s.volumeId == -1 { return false @@ -98,15 +98,15 @@ func runBackup(cmd *Command, args []string) bool { return true } } - var replication *storage.ReplicaPlacement + var replication *super_block.ReplicaPlacement if *s.replication != "" { - replication, err = storage.NewReplicaPlacementFromString(*s.replication) + replication, err = super_block.NewReplicaPlacementFromString(*s.replication) if err != nil { fmt.Printf("Error generate volume %d replication %s : %v\n", vid, *s.replication, err) return true } } else { - replication, err = storage.NewReplicaPlacementFromString(stats.Replication) + replication, err = super_block.NewReplicaPlacementFromString(stats.Replication) if err != nil { fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err) return true @@ -119,7 +119,7 @@ func runBackup(cmd *Command, args []string) bool { } if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) { - if err = v.Compact(0, 0); err != nil { + if err = v.Compact2(30 * 1024 * 1024 * 1024); err != nil { fmt.Printf("Compact Volume before synchronizing %v\n", err) return true } diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 26be1fe3a..e85ab1b9b 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -15,11 +15,11 @@ import ( "sync" "time" - "github.com/spf13/viper" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" @@ -41,6 +41,7 @@ type BenchmarkOptions struct { maxCpu *int grpcDialOption grpc.DialOption masterClient *wdclient.MasterClient + grpcRead *bool } var ( @@ -65,6 +66,7 @@ func init() { b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") + b.grpcRead = cmdBenchmark.Flag.Bool("grpcRead", false, "use grpc API to read") sharedBytes = make([]byte, 1024) } @@ -109,7 +111,7 @@ var ( func runBenchmark(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { @@ -125,7 +127,7 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ",")) + b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", 0, strings.Split(*b.masters, ",")) go b.masterClient.KeepConnectedToMaster() b.masterClient.WaitUntilConnected() @@ -279,23 +281,61 @@ func readFiles(fileIdLineChan chan string, s *stat) { fmt.Printf("reading file %s\n", fid) } start := time.Now() - url, err := b.masterClient.LookupFileId(fid) - if err != nil { - s.failed++ - println("!!!! ", fid, " location not found!!!!!") - continue + var bytesRead int + var err error + if *b.grpcRead { + volumeServer, err := b.masterClient.LookupVolumeServer(fid) + if err != nil { + s.failed++ + println("!!!! ", fid, " location not found!!!!!") + continue + } + bytesRead, err = grpcFileGet(volumeServer, fid, b.grpcDialOption) + } else { + url, err := b.masterClient.LookupFileId(fid) + if err != nil { + s.failed++ + println("!!!! ", fid, " location not found!!!!!") + continue + } + var bytes []byte + bytes, err = util.Get(url) + bytesRead = len(bytes) } - if bytesRead, err := util.Get(url); err == nil { + if err == nil { s.completed++ - s.transferred += int64(len(bytesRead)) + s.transferred += int64(bytesRead) readStats.addSample(time.Now().Sub(start)) } else { s.failed++ - fmt.Printf("Failed to read %s error:%v\n", url, err) + fmt.Printf("Failed to read %s error:%v\n", fid, err) } } } +func grpcFileGet(volumeServer, fid string, grpcDialOption grpc.DialOption) (bytesRead int, err error) { + err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + fileGetClient, err := client.FileGet(context.Background(), &volume_server_pb.FileGetRequest{FileId: fid}) + if err != nil { + return err + } + + for { + resp, respErr := fileGetClient.Recv() + if resp != nil { + bytesRead += len(resp.Data) + } + if respErr != nil { + if respErr == io.EOF { + return nil + } + return respErr + } + } + }) + return +} + func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) { file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { diff --git a/weed/command/command.go b/weed/command/command.go index 79c00d4cd..9dc51e922 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -20,6 +20,7 @@ var Commands = []*Command{ cmdS3, cmdUpload, cmdDownload, + cmdMsgBroker, cmdScaffold, cmdShell, cmdVersion, diff --git a/weed/command/compact.go b/weed/command/compact.go index 4a54f5670..85313b749 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -17,6 +17,9 @@ var cmdCompact = &Command{ The compacted .dat file is stored as .cpd file. The compacted .idx file is stored as .cpx file. + For method=0, it compacts based on the .dat file, works if .idx file is corrupted. + For method=1, it compacts based on the .idx file, works if deletion happened but not written to .dat files. + `, } @@ -47,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } else { - if err = v.Compact2(); err != nil { + if err = v.Compact2(preallocate); err != nil { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } diff --git a/weed/command/download.go b/weed/command/download.go index b3e33defd..be0eb47e5 100644 --- a/weed/command/download.go +++ b/weed/command/download.go @@ -71,6 +71,7 @@ func downloadToFile(server, fileId, saveDir string) error { } f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) if err != nil { + io.Copy(ioutil.Discard, rc) return err } defer f.Close() diff --git a/weed/command/export.go b/weed/command/export.go index d3a765e09..8c32b3f4d 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -4,6 +4,7 @@ import ( "archive/tar" "bytes" "fmt" + "io" "os" "path" "path/filepath" @@ -12,11 +13,11 @@ import ( "text/template" "time" - "io" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -89,12 +90,12 @@ func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, type VolumeFileScanner4Export struct { version needle.Version counter int - needleMap *storage.NeedleMap + needleMap *needle_map.MemDb vid needle.VolumeId } -func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } @@ -192,15 +193,12 @@ func runExport(cmd *Command, args []string) bool { fileName = *export.collection + "_" + fileName } vid := needle.VolumeId(*export.volumeId) - indexFile, err := os.OpenFile(path.Join(*export.dir, fileName+".idx"), os.O_RDONLY, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() - needleMap, err := storage.LoadBtreeNeedleMap(indexFile) - if err != nil { - glog.Fatalf("cannot load needle map from %s: %s", indexFile.Name(), err) + needleMap := needle_map.NewMemDb() + defer needleMap.Close() + + if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil { + glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err) } volumeFileScanner := &VolumeFileScanner4Export{ diff --git a/weed/command/filer.go b/weed/command/filer.go index b1ceb46f5..fb1ee2b0f 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -6,14 +6,14 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -27,13 +27,13 @@ type FilerOptions struct { publicPort *int collection *string defaultReplicaPlacement *string - redirectOnRead *bool disableDirListing *bool maxMB *int dirListingLimit *int dataCenter *string enableNotification *bool disableHttp *bool + cipher *bool // default leveldb directory, used in "weed server" mode defaultLevelDbDirectory *string @@ -47,12 +47,12 @@ func init() { f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified") - f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") + f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") } var cmdFiler = &Command{ @@ -103,14 +103,14 @@ func (fo *FilerOptions) startFiler() { Masters: strings.Split(*fo.masters, ","), Collection: *fo.collection, DefaultReplication: *fo.defaultReplicaPlacement, - RedirectOnRead: *fo.redirectOnRead, DisableDirListing: *fo.disableDirListing, MaxMB: *fo.maxMB, DirListingLimit: *fo.dirListingLimit, DataCenter: *fo.dataCenter, DefaultLevelDbDir: defaultLevelDbDirectory, DisableHttp: *fo.disableHttp, - Port: *fo.port, + Port: uint32(*fo.port), + Cipher: *fo.cipher, }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) @@ -145,7 +145,7 @@ func (fo *FilerOptions) startFiler() { if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 9995cf6aa..0aee8cd80 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -14,13 +14,15 @@ import ( "sync" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/spf13/viper" - "google.golang.org/grpc" ) var ( @@ -37,9 +39,10 @@ type CopyOptions struct { masterClient *wdclient.MasterClient concurrenctFiles *int concurrenctChunks *int - compressionLevel *int grpcDialOption grpc.DialOption masters []string + cipher bool + ttlSec int32 } func init() { @@ -52,7 +55,6 @@ func init() { copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines") copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file") - copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9") } var cmdCopy = &Command{ @@ -105,11 +107,9 @@ func runCopy(cmd *Command, args []string) bool { filerGrpcPort := filerPort + 10000 filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) - copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - ctx := context.Background() - - masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress) + masters, collection, replication, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress) if err != nil { fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err) return false @@ -124,10 +124,14 @@ func runCopy(cmd *Command, args []string) bool { *copy.maxMB = int(maxMB) } copy.masters = masters + copy.cipher = cipher - copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters) - go copy.masterClient.KeepConnectedToMaster() - copy.masterClient.WaitUntilConnected() + ttl, err := needle.ReadTTL(*copy.ttl) + if err != nil { + fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err) + return false + } + copy.ttlSec = int32(ttl.Minutes()) * 60 if *cmdCopy.IsDebug { util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") @@ -153,7 +157,7 @@ func runCopy(cmd *Command, args []string) bool { filerHost: filerUrl.Host, filerGrpcAddress: filerGrpcAddress, } - if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil { + if err := worker.copyFiles(fileCopyTaskChan); err != nil { fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) return } @@ -164,13 +168,14 @@ func runCopy(cmd *Command, args []string) bool { return true } -func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { - err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) +func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, cipher bool, err error) { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb + cipher = resp.Cipher return nil }) return @@ -215,9 +220,9 @@ type FileCopyWorker struct { filerGrpcAddress string } -func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error { +func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error { for task := range fileCopyTaskChan { - if err := worker.doEachCopy(ctx, task); err != nil { + if err := worker.doEachCopy(task); err != nil { return err } } @@ -233,7 +238,7 @@ type FileCopyTask struct { gid uint32 } -func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error { +func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error { f, err := os.Open(task.sourceLocation) if err != nil { @@ -261,36 +266,55 @@ func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) } if chunkCount == 1 { - return worker.uploadFileAsOne(ctx, task, f) + return worker.uploadFileAsOne(task, f) } - return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize) + return worker.uploadFileInChunks(task, f, chunkCount, chunkSize) } -func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error { +func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error { // upload the file content fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) + data, err := ioutil.ReadAll(f) + if err != nil { + return err + } var chunks []*filer_pb.FileChunk + var assignResult *filer_pb.AssignVolumeResponse + var assignError error if task.fileSize > 0 { // assign a volume - assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - Ttl: *worker.options.ttl, + err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + ParentPath: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil }) if err != nil { fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId - uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel) + uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth)) if err != nil { return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } @@ -300,17 +324,19 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) chunks = append(chunks, &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: 0, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + FileId: assignResult.FileId, + Offset: 0, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.Md5, + CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, }) fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) } - if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -325,13 +351,13 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy Mime: mimeType, Replication: *worker.options.replication, Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -342,7 +368,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy return nil } -func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { +func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) @@ -352,6 +378,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks) var wg sync.WaitGroup var uploadError error + var collection, replication string fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount) for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ { @@ -363,22 +390,42 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC <-concurrentChunks }() // assign a volume - assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - Ttl: *worker.options.ttl, + var assignResult *filer_pb.AssignVolumeResponse + var assignError error + err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + ParentPath: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil }) if err != nil { fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) } + if err != nil { + fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) + } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId + if collection == "" { + collection = assignResult.Collection + } + if replication == "" { + replication = assignResult.Replication + } - uploadResult, err := operation.Upload(targetUrl, - fileName+"-"+strconv.FormatInt(i+1, 10), - io.NewSectionReader(f, i*chunkSize, chunkSize), - false, "application/octet-stream", nil, assignResult.Auth) + uploadResult, err := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth)) if err != nil { uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) return @@ -388,11 +435,13 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC return } chunksChan <- &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: i * chunkSize, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + FileId: assignResult.FileId, + Offset: i * chunkSize, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, } fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) }(i) @@ -410,11 +459,11 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC for _, chunk := range chunks { fileIds = append(fileIds, chunk.FileId) } - operation.DeleteFiles(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, fileIds) + operation.DeleteFiles(copy.masters[0], worker.options.grpcDialOption, fileIds) return uploadError } - if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -427,15 +476,15 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC FileSize: uint64(task.fileSize), FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + Replication: replication, + Collection: collection, + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -457,18 +506,12 @@ func detectMimeType(f *os.File) string { } if err != nil { fmt.Printf("read head of %v: %v\n", f.Name(), err) - return "application/octet-stream" + return "" } f.Seek(0, io.SeekStart) mimeType := http.DetectContentType(head[:n]) + if mimeType == "application/octet-stream" { + return "" + } return mimeType } - -func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - - return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(clientConn) - return fn(client) - }, filerAddress, grpcDialOption) - -} diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index c6e7f5dba..737f0d24a 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -39,7 +39,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("replication", true) util.LoadConfiguration("notification", true) - config := viper.GetViper() + config := util.GetViper() var notificationInput sub.NotificationInput @@ -47,8 +47,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { - viperSub := config.Sub("notification." + input.GetName()) - if err := input.Initialize(viperSub); err != nil { + if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification input for %s: %+v", input.GetName(), err) } @@ -66,10 +65,9 @@ func runFilerReplicate(cmd *Command, args []string) bool { // avoid recursive replication if config.GetBool("notification.source.filer.enabled") && config.GetBool("notification.sink.filer.enabled") { - sourceConfig, sinkConfig := config.Sub("source.filer"), config.Sub("sink.filer") - if sourceConfig.GetString("grpcAddress") == sinkConfig.GetString("grpcAddress") { - fromDir := sourceConfig.GetString("directory") - toDir := sinkConfig.GetString("directory") + if config.GetString("source.filer.grpcAddress") == config.GetString("sink.filer.grpcAddress") { + fromDir := config.GetString("source.filer.directory") + toDir := config.GetString("sink.filer.directory") if strings.HasPrefix(toDir, fromDir) { glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir) } @@ -79,8 +77,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { var dataSink sink.ReplicationSink for _, sk := range sink.Sinks { if config.GetBool("sink." + sk.GetName() + ".enabled") { - viperSub := config.Sub("sink." + sk.GetName()) - if err := sk.Initialize(viperSub); err != nil { + if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize sink for %s: %+v", sk.GetName(), err) } @@ -98,7 +95,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { return true } - replicator := replication.NewReplicator(config.Sub("source.filer"), dataSink) + replicator := replication.NewReplicator(config, "source.filer.", dataSink) for { key, m, err := notificationInput.ReceiveMessage() diff --git a/weed/command/fix.go b/weed/command/fix.go index 2fbbca5e6..90d1c4893 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -8,6 +8,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -31,11 +33,11 @@ var ( type VolumeFileScanner4Fix struct { version needle.Version - nm *storage.NeedleMap + nm *needle_map.MemDb } -func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } @@ -46,11 +48,11 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) if n.Size > 0 && n.Size != types.TombstoneFileSize { - pe := scanner.nm.Put(n.Id, types.ToOffset(offset), n.Size) + pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size) glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { glog.V(2).Infof("skipping deleted file ...") - return scanner.nm.Delete(n.Id, types.ToOffset(offset)) + return scanner.nm.Delete(n.Id) } return nil } @@ -66,13 +68,8 @@ func runFix(cmd *Command, args []string) bool { baseFileName = *fixVolumeCollection + "_" + baseFileName } indexFileName := path.Join(*fixVolumePath, baseFileName+".idx") - indexFile, err := os.OpenFile(indexFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() - nm := storage.NewBtreeNeedleMap(indexFile) + nm := needle_map.NewMemDb() defer nm.Close() vid := needle.VolumeId(*fixVolumeId) @@ -80,9 +77,13 @@ func runFix(cmd *Command, args []string) bool { nm: nm, } - err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner) - if err != nil { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + if err := storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { + glog.Fatalf("scan .dat File: %v", err) + os.Remove(indexFileName) + } + + if err := nm.SaveToIdx(indexFileName); err != nil { + glog.Fatalf("save to .idx File: %v", err) os.Remove(indexFileName) } diff --git a/weed/command/master.go b/weed/command/master.go index 3d33f4f7a..1be60426f 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -8,14 +8,16 @@ import ( "strings" "github.com/chrislusf/raft/protobuf" + "github.com/gorilla/mux" + "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc/reflection" ) var ( @@ -101,6 +103,8 @@ func runMaster(cmd *Command, args []string) bool { func startMaster(masterOption MasterOptions, masterWhiteList []string) { + backend.LoadConfiguration(util.GetViper()) + myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers) r := mux.NewRouter() @@ -112,7 +116,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { glog.Fatalf("Master startup error: %v", e) } // start raftServer - raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), + raftServer := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"), peers, myMasterAddress, *masterOption.metaFolder, ms.Topo, *masterOption.pulseSeconds) if raftServer == nil { glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder) @@ -126,7 +130,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } // Create your protocol servers. - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) diff --git a/weed/command/mount.go b/weed/command/mount.go index 71c1a4387..f1448c6cc 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -1,23 +1,18 @@ package command -import ( - "fmt" - "strconv" - "strings" -) - type MountOptions struct { - filer *string - filerMountRootPath *string - dir *string - dirListingLimit *int - collection *string - replication *string - ttlSec *int - chunkSizeLimitMB *int - dataCenter *string - allowOthers *bool - umaskString *string + filer *string + filerMountRootPath *string + dir *string + dirListCacheLimit *int64 + collection *string + replication *string + ttlSec *int + chunkSizeLimitMB *int + dataCenter *string + allowOthers *bool + umaskString *string + outsideContainerClusterMode *bool } var ( @@ -31,7 +26,7 @@ func init() { mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location") mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server") mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory") - mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 100000, "limit directory listing size") + mountOptions.dirListCacheLimit = cmdMount.Flag.Int64("dirListCacheLimit", 1000000, "limit cache size to speed up directory long format listing") mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files") mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") @@ -41,6 +36,7 @@ func init() { mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") + mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access the file system") } var cmdMount = &Command{ @@ -58,21 +54,11 @@ var cmdMount = &Command{ On OS X, it requires OSXFUSE (http://osxfuse.github.com/). - `, -} - -func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { - hostnameAndPort := strings.Split(filer, ":") - if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort) - } - - filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) - if parseErr != nil { - return "", fmt.Errorf("The filer filer port parse error: %v", parseErr) - } + If the SeaweedFS system runs in a container cluster, e.g. managed by kubernetes or docker compose, + the volume servers are not accessible by their own ip addresses. + In "outsideContainerClusterMode", the mount will use the filer ip address instead, assuming: + * All volume server containers are accessible through the same hostname or IP address as the filer. + * All volume server container ports are open external to the cluster. - filerGrpcPort := int(filerPort) + 10000 - - return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil + `, } diff --git a/weed/command/mount_darwin.go b/weed/command/mount_darwin.go index 632691e47..f0a5581e7 100644 --- a/weed/command/mount_darwin.go +++ b/weed/command/mount_darwin.go @@ -7,3 +7,7 @@ import ( func osSpecificMountOptions() []fuse.MountOption { return []fuse.MountOption{} } + +func checkMountPointAvailable(dir string) bool { + return true +} diff --git a/weed/command/mount_freebsd.go b/weed/command/mount_freebsd.go index 632691e47..f0a5581e7 100644 --- a/weed/command/mount_freebsd.go +++ b/weed/command/mount_freebsd.go @@ -7,3 +7,7 @@ import ( func osSpecificMountOptions() []fuse.MountOption { return []fuse.MountOption{} } + +func checkMountPointAvailable(dir string) bool { + return true +} diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go index 7d94e5142..80a5f9da4 100644 --- a/weed/command/mount_linux.go +++ b/weed/command/mount_linux.go @@ -1,11 +1,157 @@ package command import ( + "bufio" + "fmt" + "io" + "os" + "strings" + "github.com/seaweedfs/fuse" ) +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func mounted(mountPoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountPoint + for _, e := range entries { + if e.Mountpoint == mountPoint { + return true, nil + } + } + return false, nil +} + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out []*Info + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + func osSpecificMountOptions() []fuse.MountOption { return []fuse.MountOption{ fuse.AllowNonEmptyMount(), } } + +func checkMountPointAvailable(dir string) bool { + mountPoint := dir + if mountPoint != "/" && strings.HasSuffix(mountPoint, "/") { + mountPoint = mountPoint[0 : len(mountPoint)-1] + } + + if mounted, err := mounted(mountPoint); err != nil || mounted { + return false + } + + return true +} diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 6ca9bfdca..9177091a5 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -3,6 +3,7 @@ package command import ( + "context" "fmt" "os" "os/user" @@ -12,12 +13,13 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/jacobsa/daemonize" - "github.com/spf13/viper" "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" @@ -43,13 +45,14 @@ func runMount(cmd *Command, args []string) bool { *mountOptions.chunkSizeLimitMB, *mountOptions.allowOthers, *mountOptions.ttlSec, - *mountOptions.dirListingLimit, + *mountOptions.dirListCacheLimit, os.FileMode(umask), + *mountOptions.outsideContainerClusterMode, ) } func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int, - allowOthers bool, ttlSec int, dirListingLimit int, umask os.FileMode) bool { + allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode, outsideContainerClusterMode bool) bool { util.LoadConfiguration("security", false) @@ -88,13 +91,19 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente } } + // Ensure target mount point availability + if isValid := checkMountPointAvailable(dir); !isValid { + glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir) + return false + } + mountName := path.Base(dir) options := []fuse.MountOption{ fuse.VolumeName(mountName), - fuse.FSName("SeaweedFS"), - fuse.Subtype("SeaweedFS"), - fuse.NoAppleDouble(), + fuse.FSName(filer + ":" + filerMountRootPath), + fuse.Subtype("seaweedfs"), + // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders fuse.NoAppleXattr(), fuse.NoBrowse(), fuse.AutoXattr(), @@ -116,9 +125,9 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente c, err := fuse.Mount(dir, options...) if err != nil { - glog.Fatal(err) + glog.V(0).Infof("mount: %v", err) daemonize.SignalOutcome(err) - return false + return true } util.OnInterrupt(func() { @@ -126,13 +135,31 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente c.Close() }) - filerGrpcAddress, err := parseFilerGrpcAddress(filer) + // parse filer grpc address + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer) if err != nil { - glog.Fatal(err) + glog.V(0).Infof("ParseFilerGrpcAddress: %v", err) daemonize.SignalOutcome(err) + return true + } + + // try to connect to filer, filerBucketsPath may be useful later + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + var cipher bool + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.Fatal(err) return false } + // find mount point mountRoot := filerMountRootPath if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") { mountRoot = mountRoot[0 : len(mountRoot)-1] @@ -141,22 +168,24 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente daemonize.SignalOutcome(nil) err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ - FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), - FilerMountRootPath: mountRoot, - Collection: collection, - Replication: replication, - TtlSec: int32(ttlSec), - ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, - DataCenter: dataCenter, - DirListingLimit: dirListingLimit, - EntryCacheTtl: 3 * time.Second, - MountUid: uid, - MountGid: gid, - MountMode: mountMode, - MountCtime: fileInfo.ModTime(), - MountMtime: time.Now(), - Umask: umask, + FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: grpcDialOption, + FilerMountRootPath: mountRoot, + Collection: collection, + Replication: replication, + TtlSec: int32(ttlSec), + ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, + DataCenter: dataCenter, + DirListCacheLimit: dirListCacheLimit, + EntryCacheTtl: 3 * time.Second, + MountUid: uid, + MountGid: gid, + MountMode: mountMode, + MountCtime: fileInfo.ModTime(), + MountMtime: time.Now(), + Umask: umask, + OutsideContainerClusterMode: outsideContainerClusterMode, + Cipher: cipher, })) if err != nil { fuse.Unmount(dir) @@ -165,8 +194,9 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { - glog.Fatal(err) + glog.V(0).Infof("mount process: %v", err) daemonize.SignalOutcome(err) + return true } return true diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go new file mode 100644 index 000000000..3e13b4730 --- /dev/null +++ b/weed/command/msg_broker.go @@ -0,0 +1,107 @@ +package command + +import ( + "context" + "fmt" + "strconv" + "time" + + "google.golang.org/grpc/reflection" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/queue_pb" + "github.com/chrislusf/seaweedfs/weed/security" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + messageBrokerStandaloneOptions QueueOptions +) + +type QueueOptions struct { + filer *string + port *int + defaultTtl *string +} + +func init() { + cmdMsgBroker.Run = runMsgBroker // break init cycle + messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address") + messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "queue server gRPC listen port") + messageBrokerStandaloneOptions.defaultTtl = cmdMsgBroker.Flag.String("ttl", "1h", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") +} + +var cmdMsgBroker = &Command{ + UsageLine: "msg.broker [-port=17777] [-filer=]", + Short: " start a message queue broker", + Long: `start a message queue broker + + The broker can accept gRPC calls to write or read messages. The messages are stored via filer. + The brokers are stateless. To scale up, just add more brokers. + +`, +} + +func runMsgBroker(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + return messageBrokerStandaloneOptions.startQueueServer() + +} + +func (msgBrokerOpt *QueueOptions) startQueueServer() bool { + + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + filerQueuesPath := "/queues" + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerQueuesPath = resp.DirQueues + glog.V(0).Infof("Queue read filer queues dir: %s", filerQueuesPath) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + break + } + } + + qs, err := weed_server.NewMessageBroker(&weed_server.MessageBrokerOption{ + Filers: []string{*msgBrokerOpt.filer}, + DefaultReplication: "", + MaxMB: 0, + Port: *msgBrokerOpt.port, + }) + + // start grpc listener + grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0) + if err != nil { + glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err) + } + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) + queue_pb.RegisterSeaweedQueueServer(grpcS, qs) + reflection.Register(grpcS) + grpcS.Serve(grpcL) + + return true + +} diff --git a/weed/command/s3.go b/weed/command/s3.go index e004bb066..cd4018fbc 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -1,18 +1,20 @@ package command import ( + "context" + "fmt" "net/http" "time" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "fmt" + "github.com/gorilla/mux" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/s3api" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" ) var ( @@ -20,29 +22,89 @@ var ( ) type S3Options struct { - filer *string - filerBucketsPath *string - port *int - domainName *string - tlsPrivateKey *string - tlsCertificate *string + filer *string + port *int + config *string + domainName *string + tlsPrivateKey *string + tlsCertificate *string } func init() { cmdS3.Run = runS3 // break init cycle s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") - s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port") s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file") s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") } var cmdS3 = &Command{ - UsageLine: "s3 -port=8333 -filer=", + UsageLine: "s3 [-port=8333] [-filer=] [-config=]", Short: "start a s3 API compatible server that is backed by a filer", Long: `start a s3 API compatible server that is backed by a filer. + By default, you can use any access key and secret key to access the S3 APIs. + To enable credential based access, create a config.json file similar to this: + +{ + "identities": [ + { + "name": "some_name", + "credentials": [ + { + "accessKey": "some_access_key1", + "secretKey": "some_secret_key1" + } + ], + "actions": [ + "Admin", + "Read", + "Write" + ] + }, + { + "name": "some_read_only_user", + "credentials": [ + { + "accessKey": "some_access_key2", + "secretKey": "some_secret_key2" + } + ], + "actions": [ + "Read" + ] + }, + { + "name": "some_normal_user", + "credentials": [ + { + "accessKey": "some_access_key3", + "secretKey": "some_secret_key3" + } + ], + "actions": [ + "Read", + "Write" + ] + }, + { + "name": "user_limited_to_bucket1", + "credentials": [ + { + "accessKey": "some_access_key4", + "secretKey": "some_secret_key4" + } + ], + "actions": [ + "Read:bucket1", + "Write:bucket1" + ] + } + ] +} + `, } @@ -56,20 +118,44 @@ func runS3(cmd *Command, args []string) bool { func (s3opt *S3Options) startS3Server() bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer) + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer) if err != nil { glog.Fatal(err) return false } + filerBucketsPath := "/buckets" + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerBucketsPath = resp.DirBuckets + glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + break + } + } + router := mux.NewRouter().SkipClean(true) _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ Filer: *s3opt.filer, FilerGrpcAddress: filerGrpcAddress, + Config: *s3opt.config, DomainName: *s3opt.domainName, - BucketsPath: *s3opt.filerBucketsPath, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + BucketsPath: filerBucketsPath, + GrpcDialOption: grpcDialOption, }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 13091764e..f4a08fb51 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -14,6 +14,14 @@ var cmdScaffold = &Command{ Short: "generate basic configuration files", Long: `Generate filer.toml with all possible configurations for you to customize. + The options can also be overwritten by environment variables. + For example, the filer.toml mysql password can be overwritten by environment variable + export WEED_MYSQL_PASSWORD=some_password + Environment variable rules: + * Prefix fix with "WEED_" + * Upppercase the reset of variable name. + * Replace '.' with '_' + `, } @@ -59,14 +67,21 @@ const ( # $HOME/.seaweedfs/filer.toml # /etc/seaweedfs/filer.toml -[memory] -# local in memory, mostly for testing purpose -enabled = false +#################################################### +# Customizable filer server options +#################################################### +[filer.options] +# with http DELETE, by default the filer would check whether a folder is empty. +# recursive_delete will delete all sub folders and files, similar to "rm -Rf" +recursive_delete = false +# directories under this folder will be automatically creating a separate bucket +buckets_folder = "/buckets" +# directories under this folder will be store message queue data +queues_folder = "/queues" -[leveldb] -# local on disk, mostly for simple single-machine setup, fairly scalable -enabled = false -dir = "." # directory to store level db files +#################################################### +# The following are filer store options +#################################################### [leveldb2] # local on disk, mostly for simple single-machine setup, fairly scalable @@ -74,10 +89,6 @@ dir = "." # directory to store level db files enabled = true dir = "." # directory to store level db files -#################################################### -# multiple filers on shared storage, fairly scalable -#################################################### - [mysql] # or tidb # CREATE TABLE IF NOT EXISTS filemeta ( # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', @@ -95,6 +106,7 @@ password = "" database = "" # create or use an existing database connection_max_idle = 2 connection_max_open = 100 +interpolateParams = false [postgres] # or cockroachdb # CREATE TABLE IF NOT EXISTS filemeta ( @@ -144,6 +156,10 @@ addresses = [ "localhost:30006", ] password = "" +# allows reads from slave servers or the master, but all writes still go to the master +readOnly = true +# automatically use the closest Redis server for reads +routeByLatency = true [etcd] enabled = false @@ -310,6 +326,10 @@ key = "" cert = "" key = "" +[grpc.msg_broker] +cert = "" +key = "" + # use this for any place needs a grpc client # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" [grpc.client] @@ -350,19 +370,33 @@ sleep_minutes = 17 # sleep minutes between each script execution default_filer_url = "http://localhost:8888/" [master.sequencer] -type = memory # Choose [memory|etcd] type for storing the file id sequence +type = "memory" # Choose [memory|etcd] type for storing the file id sequence # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence # example : http://127.0.0.1:2379,http://127.0.0.1:2389 -sequencer_etcd_urls = http://127.0.0.1:2379 - - -[storage.backend.s3] -enabled = true -aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials). -aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). -region = "us-east-2" -bucket = "your_bucket_name" # an existing bucket -directory = "/" # destination directory +sequencer_etcd_urls = "http://127.0.0.1:2379" + + +# configurations for tiered cloud storage +# old volumes are transparently moved to cloud for cost efficiency +[storage.backend] + [storage.backend.s3.default] + enabled = false + aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials). + aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). + region = "us-east-2" + bucket = "your_bucket_name" # an existing bucket + +# create this number of logical volumes if no more writable volumes +# count_x means how many copies of data. +# e.g.: +# 000 has only one copy, count_1 +# 010 and 001 has two copies, count_2 +# 011 has only 3 copies, count_3 +[master.volume_growth] +count_1 = 7 # create 1 x 7 = 7 actual volumes +count_2 = 6 # create 2 x 6 = 12 actual volumes +count_3 = 3 # create 3 x 3 = 9 actual volumes +count_other = 1 # create n x 1 = n actual volumes ` ) diff --git a/weed/command/scaffold_test.go b/weed/command/scaffold_test.go new file mode 100644 index 000000000..423dacc32 --- /dev/null +++ b/weed/command/scaffold_test.go @@ -0,0 +1,44 @@ +package command + +import ( + "bytes" + "fmt" + "testing" + + "github.com/spf13/viper" +) + +func TestReadingTomlConfiguration(t *testing.T) { + + viper.SetConfigType("toml") + + // any approach to require this configuration into your program. + var tomlExample = []byte(` +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +`) + + viper.ReadConfig(bytes.NewBuffer(tomlExample)) + + fmt.Printf("database is %v\n", viper.Get("database")) + fmt.Printf("servers is %v\n", viper.GetStringMap("servers")) + + alpha := viper.Sub("servers.alpha") + + fmt.Printf("alpha ip is %v\n", alpha.GetString("ip")) +} diff --git a/weed/command/server.go b/weed/command/server.go index 87f404ed3..560b90037 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -78,10 +78,10 @@ func init() { filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.") - filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") + filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") @@ -89,13 +89,14 @@ func init() { serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") + serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") - s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}") s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") + s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file") } @@ -113,10 +114,6 @@ func runServer(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - if *filerOptions.redirectOnRead { - *isStartingFiler = true - } - if *isStartingS3 { *isStartingFiler = true } diff --git a/weed/command/shell.go b/weed/command/shell.go index 34b5aef31..dcf70608f 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -6,7 +6,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/shell" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( @@ -31,7 +30,7 @@ var cmdShell = &Command{ func runShell(command *Command, args []string) bool { util.LoadConfiguration("security", false) - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") var filerPwdErr error shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl) diff --git a/weed/command/upload.go b/weed/command/upload.go index 25e938d9b..d71046131 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -6,11 +6,9 @@ import ( "os" "path/filepath" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" - - "github.com/chrislusf/seaweedfs/weed/operation" ) var ( @@ -63,7 +61,7 @@ var cmdUpload = &Command{ func runUpload(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if len(args) == 0 { if *upload.dir == "" { diff --git a/weed/command/volume.go b/weed/command/volume.go index 3c1aa2b50..4773d8a55 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -1,6 +1,7 @@ package command import ( + "fmt" "net/http" "os" "runtime" @@ -9,15 +10,20 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/spf13/viper" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util/httpdown" + + "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -44,6 +50,7 @@ type VolumeServerOptions struct { cpuProfile *string memProfile *string compactionMBPerSecond *int + fileSizeLimitMB *int } func init() { @@ -64,6 +71,7 @@ func init() { v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") + v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") } var cmdVolume = &Command{ @@ -94,7 +102,7 @@ func runVolume(cmd *Command, args []string) bool { func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string) { - //Set multiple folders and each folder's max volume count limit' + // Set multiple folders and each folder's max volume count limit' v.folders = strings.Split(volumeFolders, ",") maxCountStrings := strings.Split(maxVolumeCounts, ",") for _, maxString := range maxCountStrings { @@ -113,7 +121,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } - //security related white list configuration + // security related white list configuration if volumeWhiteListOption != "" { v.whiteList = strings.Split(volumeWhiteListOption, ",") } @@ -128,11 +136,10 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if *v.publicUrl == "" { *v.publicUrl = *v.ip + ":" + strconv.Itoa(*v.publicPort) } - isSeperatedPublicPort := *v.publicPort != *v.port volumeMux := http.NewServeMux() publicVolumeMux := volumeMux - if isSeperatedPublicPort { + if v.isSeparatedPublicPort() { publicVolumeMux = http.NewServeMux() } @@ -156,53 +163,134 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v v.whiteList, *v.fixJpgOrientation, *v.readRedirect, *v.compactionMBPerSecond, + *v.fileSizeLimitMB, ) - listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) - glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress) - listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) - if e != nil { - glog.Fatalf("Volume server listener error:%v", e) - } - if isSeperatedPublicPort { - publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) - glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress) - publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) - if e != nil { - glog.Fatalf("Volume server listener error:%v", e) + // starting grpc server + grpcS := v.startGrpcService(volumeServer) + + // starting public http server + var publicHttpDown httpdown.Server + if v.isSeparatedPublicPort() { + publicHttpDown = v.startPublicHttpService(publicVolumeMux) + if nil == publicHttpDown { + glog.Fatalf("start public http service failed") } - go func() { - if e := http.Serve(publicListener, publicVolumeMux); e != nil { - glog.Fatalf("Volume server fail to serve public: %v", e) - } - }() } + // starting the cluster http server + clusterHttpServer := v.startClusterHttpService(volumeMux) + + stopChain := make(chan struct{}) util.OnInterrupt(func() { + fmt.Println("volume server has be killed") + var startTime time.Time + + // firstly, stop the public http service to prevent from receiving new user request + if nil != publicHttpDown { + startTime = time.Now() + if err := publicHttpDown.Stop(); err != nil { + glog.Warningf("stop the public http server failed, %v", err) + } + delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("stop public http server, elapsed %dms", delta) + } + + startTime = time.Now() + if err := clusterHttpServer.Stop(); err != nil { + glog.Warningf("stop the cluster http server failed, %v", err) + } + delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("graceful stop cluster http server, elapsed [%d]", delta) + + startTime = time.Now() + grpcS.GracefulStop() + delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("graceful stop gRPC, elapsed [%d]", delta) + + startTime = time.Now() volumeServer.Shutdown() + delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("stop volume server, elapsed [%d]", delta) + pprof.StopCPUProfile() + + close(stopChain) // notify exit }) - // starting grpc server + select { + case <-stopChain: + } + glog.Warningf("the volume server exit.") +} + +// check whether configure the public port +func (v VolumeServerOptions) isSeparatedPublicPort() bool { + return *v.publicPort != *v.port +} + +func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server { grpcPort := *v.port + 10000 grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume")) - volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) + volume_server_pb.RegisterVolumeServerServer(grpcS, vs) reflection.Register(grpcS) - go grpcS.Serve(grpcL) - - if viper.GetString("https.volume.key") != "" { - if e := http.ServeTLS(listener, volumeMux, - viper.GetString("https.volume.cert"), viper.GetString("https.volume.key")); e != nil { - glog.Fatalf("Volume server fail to serve: %v", e) + go func() { + if err := grpcS.Serve(grpcL); err != nil { + glog.Fatalf("start gRPC service failed, %s", err) } - } else { - if e := http.Serve(listener, volumeMux); e != nil { - glog.Fatalf("Volume server fail to serve: %v", e) + }() + return grpcS +} + +func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server { + publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) + glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress) + publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) + if e != nil { + glog.Fatalf("Volume server listener error:%v", e) + } + + pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute} + publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener) + go func() { + if err := publicHttpDown.Wait(); err != nil { + glog.Errorf("public http down wait failed, %v", err) } + }() + + return publicHttpDown +} + +func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpdown.Server { + var ( + certFile, keyFile string + ) + if viper.GetString("https.volume.key") != "" { + certFile = viper.GetString("https.volume.cert") + keyFile = viper.GetString("https.volume.key") } + listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) + glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress) + listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) + if e != nil { + glog.Fatalf("Volume server listener error:%v", e) + } + + httpDown := httpdown.HTTP{ + KillTimeout: 5 * time.Minute, + StopTimeout: 5 * time.Minute, + CertFile: certFile, + KeyFile: keyFile} + clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener) + go func() { + if e := clusterHttpServer.Wait(); e != nil { + glog.Fatalf("Volume server fail to serve: %v", e) + } + }() + return clusterHttpServer } diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 371c4a9ad..4f5d5f5ce 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -1,6 +1,7 @@ package command import ( + "context" "fmt" "net/http" "os/user" @@ -8,10 +9,11 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( @@ -37,7 +39,7 @@ func init() { var cmdWebDav = &Command{ UsageLine: "webdav -port=7333 -filer=", - Short: " start a webdav server that is backed by a filer", + Short: "start a webdav server that is backed by a filer", Long: `start a webdav server that is backed by a filer. `, @@ -55,12 +57,6 @@ func runWebDav(cmd *Command, args []string) bool { func (wo *WebDavOption) startWebDav() bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer) - if err != nil { - glog.Fatal(err) - return false - } - // detect current user uid, gid := uint32(0), uint32(0) if u, err := user.Current(); err == nil { @@ -72,13 +68,43 @@ func (wo *WebDavOption) startWebDav() bool { } } + // parse filer grpc address + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + var cipher bool + // connect to filer + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + break + } + } + ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ Filer: *wo.filer, FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: grpcDialOption, Collection: *wo.collection, Uid: uid, Gid: gid, + Cipher: cipher, }) if webdavServer_err != nil { glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 3e8554957..864c858d3 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -7,16 +7,19 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type AbstractSqlStore struct { - DB *sql.DB - SqlInsert string - SqlUpdate string - SqlFind string - SqlDelete string - SqlListExclusive string - SqlListInclusive string + DB *sql.DB + SqlInsert string + SqlUpdate string + SqlFind string + SqlDelete string + SqlDeleteFolderChildren string + SqlListExclusive string + SqlListInclusive string } type TxOrDB interface { @@ -64,7 +67,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta) if err != nil { return fmt.Errorf("insert %s: %s", entry.FullPath, err) } @@ -84,7 +87,7 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir) if err != nil { return fmt.Errorf("update %s: %s", entry.FullPath, err) } @@ -99,10 +102,10 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { dir, name := fullpath.DirAndName() - row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir) + row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir) var data []byte if err := row.Scan(&data); err != nil { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } entry := &filer2.Entry{ @@ -119,7 +122,7 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2. dir, name := fullpath.DirAndName() - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir) if err != nil { return fmt.Errorf("delete %s: %s", fullpath, err) } @@ -132,6 +135,21 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2. return nil } +func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { + + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath) + if err != nil { + return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err) + } + + return nil +} + func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { sqlText := store.SqlListExclusive @@ -139,7 +157,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat sqlText = store.SqlListInclusive } - rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) + rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), limit) if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go deleted file mode 100644 index 5c982c537..000000000 --- a/weed/filer2/abstract_sql/hashing.go +++ /dev/null @@ -1,32 +0,0 @@ -package abstract_sql - -import ( - "crypto/md5" - "io" -) - -// returns a 64 bit big int -func hashToLong(dir string) (v int64) { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - v += int64(b[0]) - v <<= 8 - v += int64(b[1]) - v <<= 8 - v += int64(b[2]) - v <<= 8 - v += int64(b[3]) - v <<= 8 - v += int64(b[4]) - v <<= 8 - v += int64(b[5]) - v <<= 8 - v += int64(b[6]) - v <<= 8 - v += int64(b[7]) - - return -} diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index 466be5bf3..6f25fffec 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -3,10 +3,13 @@ package cassandra import ( "context" "fmt" + + "github.com/gocql/gocql" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gocql/gocql" ) func init() { @@ -22,10 +25,10 @@ func (store *CassandraStore) GetName() string { return "cassandra" } -func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) { +func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("keyspace"), - configuration.GetStringSlice("hosts"), + configuration.GetString(prefix+"keyspace"), + configuration.GetStringSlice(prefix+"hosts"), ) } @@ -80,12 +83,12 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.Full "SELECT meta FROM filemeta WHERE directory=? AND name=?", dir, name).Consistency(gocql.One).Scan(&data); err != nil { if err != gocql.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } } if len(data) == 0 { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } entry = &filer2.Entry{ @@ -112,6 +115,17 @@ func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.Fu return nil } +func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=?", + fullpath).Exec(); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go index 7b05b53dc..a174117ea 100644 --- a/weed/filer2/configuration.go +++ b/weed/filer2/configuration.go @@ -17,8 +17,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) { for _, store := range Stores { if config.GetBool(store.GetName() + ".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { + if err := store.Initialize(config, store.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize store for %s: %+v", store.GetName(), err) } diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go index 3f8a19114..c901927bb 100644 --- a/weed/filer2/entry.go +++ b/weed/filer2/entry.go @@ -30,6 +30,7 @@ type Entry struct { FullPath Attr + Extended map[string][]byte // the following is for files Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` @@ -56,6 +57,7 @@ func (entry *Entry) ToProtoEntry() *filer_pb.Entry { IsDirectory: entry.IsDirectory(), Attributes: EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, } } diff --git a/weed/filer2/entry_codec.go b/weed/filer2/entry_codec.go index cf4627b74..3a2dc6134 100644 --- a/weed/filer2/entry_codec.go +++ b/weed/filer2/entry_codec.go @@ -1,18 +1,21 @@ package filer2 import ( + "bytes" + "fmt" "os" "time" - "fmt" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { message := &filer_pb.Entry{ Attributes: EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, } return proto.Marshal(message) } @@ -27,6 +30,8 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error { entry.Attr = PbToEntryAttribute(message.Attributes) + entry.Extended = message.Extended + entry.Chunks = message.Chunks return nil @@ -84,6 +89,10 @@ func EqualEntry(a, b *Entry) bool { return false } + if !eq(a.Extended, b.Extended) { + return false + } + for i := 0; i < len(a.Chunks); i++ { if !proto.Equal(a.Chunks[i], b.Chunks[i]) { return false @@ -91,3 +100,17 @@ func EqualEntry(a, b *Entry) bool { } return true } + +func eq(a, b map[string][]byte) bool { + if len(a) != len(b) { + return false + } + + for k, v := range a { + if w, ok := b[k]; !ok || !bytes.Equal(v, w) { + return false + } + } + + return true +} diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go index 1b0f928d0..83a6ddc5d 100644 --- a/weed/filer2/etcd/etcd_store.go +++ b/weed/filer2/etcd/etcd_store.go @@ -6,10 +6,12 @@ import ( "strings" "time" + "go.etcd.io/etcd/clientv3" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" - "go.etcd.io/etcd/clientv3" ) const ( @@ -28,13 +30,13 @@ func (store *EtcdStore) GetName() string { return "etcd" } -func (store *EtcdStore) Initialize(configuration weed_util.Configuration) (err error) { - servers := configuration.GetString("servers") +func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + servers := configuration.GetString(prefix + "servers") if servers == "" { servers = "localhost:2379" } - timeout := configuration.GetString("timeout") + timeout := configuration.GetString(prefix + "timeout") if timeout == "" { timeout = "3s" } @@ -99,7 +101,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) } if len(resp.Kvs) == 0 { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } entry = &filer2.Entry{ @@ -123,6 +125,16 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat return nil } +func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + + if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil { + return fmt.Errorf("deleteFolderChildren %s : %v", fullpath, err) + } + + return nil +} + func (store *EtcdStore) ListDirectoryEntries( ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int, ) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go index b5876df82..711488df1 100644 --- a/weed/filer2/filechunks.go +++ b/weed/filer2/filechunks.go @@ -71,6 +71,8 @@ type ChunkView struct { Size uint64 LogicOffset int64 IsFullChunk bool + CipherKey []byte + isGzipped bool } func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { @@ -86,6 +88,7 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int stop := offset + int64(size) for _, chunk := range visibles { + if chunk.start <= offset && offset < chunk.stop && offset < stop { isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop views = append(views, &ChunkView{ @@ -94,6 +97,8 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int Size: uint64(min(chunk.stop, stop) - offset), LogicOffset: offset, IsFullChunk: isFullChunk, + CipherKey: chunk.cipherKey, + isGzipped: chunk.isGzipped, }) offset = min(chunk.stop, stop) } @@ -120,13 +125,7 @@ var bufPool = sync.Pool{ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval { - newV := newVisibleInterval( - chunk.Offset, - chunk.Offset+int64(chunk.Size), - chunk.GetFileIdString(), - chunk.Mtime, - true, - ) + newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, true, chunk.CipherKey, chunk.IsGzipped) length := len(visibles) if length == 0 { @@ -140,23 +139,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb. logPrintf(" before", visibles) for _, v := range visibles { if v.start < chunk.Offset && chunk.Offset < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - v.start, - chunk.Offset, - v.fileId, - v.modifiedTime, - false, - )) + newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, false, v.cipherKey, v.isGzipped)) } chunkStop := chunk.Offset + int64(chunk.Size) if v.start < chunkStop && chunkStop < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - chunkStop, - v.stop, - v.fileId, - v.modifiedTime, - false, - )) + newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, false, v.cipherKey, v.isGzipped)) } if chunkStop <= v.start || v.stop <= chunk.Offset { newVisibles = append(newVisibles, v) @@ -187,6 +174,7 @@ func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []Vi var newVisibles []VisibleInterval for _, chunk := range chunks { + newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk) t := visibles[:0] visibles = newVisibles @@ -208,15 +196,19 @@ type VisibleInterval struct { modifiedTime int64 fileId string isFullChunk bool + cipherKey []byte + isGzipped bool } -func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval { +func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool, cipherKey []byte, isGzipped bool) VisibleInterval { return VisibleInterval{ start: start, stop: stop, fileId: fileId, modifiedTime: modifiedTime, isFullChunk: isFullChunk, + cipherKey: cipherKey, + isGzipped: isGzipped, } } diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go index e75e60753..bb4a6c74d 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer2/filechunks_test.go @@ -331,6 +331,42 @@ func TestChunksReading(t *testing.T) { {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, }, }, + // case 8: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353}, + }, + Offset: 0, + Size: 300, + Expected: []*ChunkView{ + {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90}, + {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190}, + }, + }, + // case 9: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, + {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2}, + {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, + {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4}, + {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5}, + {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6}, + }, + Offset: 0, + Size: 153578836, + Expected: []*ChunkView{ + {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, + {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, + {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, + {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, + {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, + {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, + }, + }, } for i, testcase := range testcases { diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 672295dea..d3343f610 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -3,37 +3,46 @@ package filer2 import ( "context" "fmt" - "google.golang.org/grpc" - "math" "os" "path/filepath" "strings" "time" + "google.golang.org/grpc" + + "github.com/karlseguin/ccache" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/karlseguin/ccache" ) +const PaginationSize = 1024 * 256 + var ( OS_UID = uint32(os.Getuid()) OS_GID = uint32(os.Getgid()) ) type Filer struct { - store *FilerStoreWrapper - directoryCache *ccache.Cache - MasterClient *wdclient.MasterClient - fileIdDeletionChan chan string - GrpcDialOption grpc.DialOption + store *FilerStoreWrapper + directoryCache *ccache.Cache + MasterClient *wdclient.MasterClient + fileIdDeletionQueue *util.UnboundedQueue + GrpcDialOption grpc.DialOption + DirBucketsPath string + DirQueuesPath string + buckets *FilerBuckets + Cipher bool } -func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { +func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort uint32) *Filer { f := &Filer{ - directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), - fileIdDeletionChan: make(chan string, 4096), - GrpcDialOption: grpcDialOption, + directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), + MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerGrpcPort, masters), + fileIdDeletionQueue: util.NewUnboundedQueue(), + GrpcDialOption: grpcDialOption, } go f.loopProcessingDeletion() @@ -69,7 +78,7 @@ func (f *Filer) RollbackTransaction(ctx context.Context) error { return f.store.RollbackTransaction(ctx) } -func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) error { if string(entry.FullPath) == "/" { return nil @@ -93,7 +102,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { glog.V(4).Infof("find uncached directory: %s", dirPath) dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath)) } else { - glog.V(4).Infof("found cached directory: %s", dirPath) + // glog.V(4).Infof("found cached directory: %s", dirPath) } // no such existing directory @@ -105,25 +114,30 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { dirEntry = &Entry{ FullPath: FullPath(dirPath), Attr: Attr{ - Mtime: now, - Crtime: now, - Mode: os.ModeDir | 0770, - Uid: entry.Uid, - Gid: entry.Gid, + Mtime: now, + Crtime: now, + Mode: os.ModeDir | 0770, + Uid: entry.Uid, + Gid: entry.Gid, + Collection: entry.Collection, + Replication: entry.Replication, }, } glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) mkdirErr := f.store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { - if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound { + if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == filer_pb.ErrNotFound { + glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { + f.maybeAddBucket(dirEntry) f.NotifyUpdateEvent(nil, dirEntry, false) } } else if !dirEntry.IsDirectory() { + glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) return fmt.Errorf("%s is a file", dirPath) } @@ -138,6 +152,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { } if lastDirectoryEntry == nil { + glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath) return fmt.Errorf("parent folder not found: %v", entry.FullPath) } @@ -151,22 +166,30 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { oldEntry, _ := f.FindEntry(ctx, entry.FullPath) + glog.V(4).Infof("CreateEntry %s: old entry: %v exclusive:%v", entry.FullPath, oldEntry, o_excl) if oldEntry == nil { if err := f.store.InsertEntry(ctx, entry); err != nil { glog.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { + if o_excl { + glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) + return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) + } if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { glog.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) } } + f.maybeAddBucket(entry) f.NotifyUpdateEvent(oldEntry, entry, true) f.deleteChunksIfNotNew(oldEntry, entry) + glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + return nil } @@ -200,75 +223,51 @@ func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err er }, }, nil } - return f.store.FindEntry(ctx, p) -} - -func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { - entry, err := f.FindEntry(ctx, p) - if err != nil { - return err - } - - if entry.IsDirectory() { - limit := int(1) - if isRecursive { - limit = math.MaxInt32 - } - lastFileName := "" - includeLastFile := false - for limit > 0 { - entries, err := f.ListDirectoryEntries(ctx, p, lastFileName, includeLastFile, 1024) - if err != nil { - glog.Errorf("list folder %s: %v", p, err) - return fmt.Errorf("list folder %s: %v", p, err) - } - - if len(entries) == 0 { - break - } - - if isRecursive { - for _, sub := range entries { - lastFileName = sub.Name() - err = f.DeleteEntryMetaAndData(ctx, sub.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks) - if err != nil && !ignoreRecursiveError { - return err - } - limit-- - if limit <= 0 { - break - } - } - } - - if len(entries) < 1024 { - break - } + entry, err = f.store.FindEntry(ctx, p) + if entry != nil && entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.store.DeleteEntry(ctx, p.Child(entry.Name())) + return nil, filer_pb.ErrNotFound } - - f.cacheDelDirectory(string(p)) - } + return - if shouldDeleteChunks { - f.DeleteChunks(p, entry.Chunks) - } +} - if p == "/" { - return nil +func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { + if strings.HasSuffix(string(p), "/") && len(p) > 1 { + p = p[0 : len(p)-1] } - glog.V(3).Infof("deleting entry %v", p) - f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) + var makeupEntries []*Entry + entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit) + for expiredCount > 0 && err == nil { + makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount) + if err == nil { + entries = append(entries, makeupEntries...) + } + } - return f.store.DeleteEntry(ctx, p) + return entries, err } -func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { - if strings.HasSuffix(string(p), "/") && len(p) > 1 { - p = p[0 : len(p)-1] +func (f *Filer) doListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) { + listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) + if listErr != nil { + return listedEntries, expiredCount, "", listErr } - return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) + for _, entry := range listedEntries { + lastFileName = entry.Name() + if entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.store.DeleteEntry(ctx, p.Child(entry.Name())) + expiredCount++ + continue + } + } + entries = append(entries, entry) + } + return } func (f *Filer) cacheDelDirectory(dirpath string) { diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go new file mode 100644 index 000000000..601b7dbf3 --- /dev/null +++ b/weed/filer2/filer_buckets.go @@ -0,0 +1,113 @@ +package filer2 + +import ( + "context" + "math" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +type BucketName string +type BucketOption struct { + Name BucketName + Replication string +} +type FilerBuckets struct { + dirBucketsPath string + buckets map[BucketName]*BucketOption + sync.RWMutex +} + +func (f *Filer) LoadBuckets(dirBucketsPath string) { + + f.buckets = &FilerBuckets{ + buckets: make(map[BucketName]*BucketOption), + } + f.DirBucketsPath = dirBucketsPath + + limit := math.MaxInt32 + + entries, err := f.ListDirectoryEntries(context.Background(), FullPath(dirBucketsPath), "", false, limit) + + if err != nil { + glog.V(1).Infof("no buckets found: %v", err) + return + } + + glog.V(1).Infof("buckets found: %d", len(entries)) + + f.buckets.Lock() + for _, entry := range entries { + f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{ + Name: BucketName(entry.Name()), + Replication: entry.Replication, + } + } + f.buckets.Unlock() + +} + +func (f *Filer) ReadBucketOption(buketName string) (replication string) { + + f.buckets.RLock() + defer f.buckets.RUnlock() + + option, found := f.buckets.buckets[BucketName(buketName)] + + if !found { + return "" + } + return option.Replication + +} + +func (f *Filer) isBucket(entry *Entry) bool { + if !entry.IsDirectory() { + return false + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return false + } + + f.buckets.RLock() + defer f.buckets.RUnlock() + + _, found := f.buckets.buckets[BucketName(dirName)] + + return found + +} + +func (f *Filer) maybeAddBucket(entry *Entry) { + if !entry.IsDirectory() { + return + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return + } + f.addBucket(dirName, &BucketOption{ + Name: BucketName(dirName), + Replication: entry.Replication, + }) +} + +func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + f.buckets.buckets[BucketName(buketName)] = bucketOption + +} + +func (f *Filer) deleteBucket(buketName string) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + delete(f.buckets.buckets, BucketName(buketName)) + +} diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index 7e093eea2..1c1fa6a5b 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -3,6 +3,8 @@ package filer2 import ( "context" "fmt" + "io" + "math" "strings" "sync" @@ -20,10 +22,11 @@ func VolumeId(fileId string) string { } type FilerClient interface { - WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error + WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error + AdjustedUrl(hostAndPort string) string } -func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath string, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { +func ReadIntoBuffer(filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { var vids []string for _, chunkView := range chunkViews { vids = append(vids, VolumeId(chunkView.FileId)) @@ -31,10 +34,10 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath s vid2Locations := make(map[string]*filer_pb.Locations) - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read fh lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: vids, }) if err != nil { @@ -65,20 +68,16 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath s return } + volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url) var n int64 - n, err = util.ReadUrl( - fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId), - chunkView.Offset, - int(chunkView.Size), - buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)], - !chunkView.IsFullChunk) + n, err = util.ReadUrl(fmt.Sprintf("http://%s/%s", volumeServerAddress, chunkView.FileId), chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)]) if err != nil { - glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, locations.Locations[0].Url, chunkView.FileId, n, err) + glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, volumeServerAddress, chunkView.FileId, n, err) err = fmt.Errorf("failed to read http://%s/%s: %v", - locations.Locations[0].Url, chunkView.FileId, err) + volumeServerAddress, chunkView.FileId, err) return } @@ -91,68 +90,75 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath s return } -func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) (entry *filer_pb.Entry, err error) { +func GetEntry(filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.Entry, err error) { - dir, name := FullPath(fullFilePath).DirAndName() + dir, name := fullFilePath.DirAndName() - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, Name: name, } - glog.V(3).Infof("read %s request: %v", fullFilePath, request) - resp, err := client.LookupDirectoryEntry(ctx, request) + // glog.V(3).Infof("read %s request: %v", fullFilePath, request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { - if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + if err == filer_pb.ErrNotFound { return nil } - glog.V(3).Infof("read %s attr %v: %v", fullFilePath, request, err) + glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) return err } - if resp.Entry != nil { - entry = resp.Entry + if resp.Entry == nil { + // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) + return nil } + entry = resp.Entry return nil }) return } -func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath string, fn func(entry *filer_pb.Entry)) (err error) { +func ReadDirAllEntries(filerClient FilerClient, fullDirPath FullPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - paginationLimit := 1024 + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { lastEntryName := "" - for { - - request := &filer_pb.ListEntriesRequest{ - Directory: fullDirPath, - StartFromFileName: lastEntryName, - Limit: uint32(paginationLimit), - } + request := &filer_pb.ListEntriesRequest{ + Directory: string(fullDirPath), + Prefix: prefix, + StartFromFileName: lastEntryName, + Limit: math.MaxUint32, + } - glog.V(3).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) - if err != nil { - return fmt.Errorf("list %s: %v", fullDirPath, err) - } + glog.V(3).Infof("read directory: %v", request) + stream, err := client.ListEntries(context.Background(), request) + if err != nil { + return fmt.Errorf("list %s: %v", fullDirPath, err) + } - for _, entry := range resp.Entries { - fn(entry) - lastEntryName = entry.Name + var prevEntry *filer_pb.Entry + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + if prevEntry != nil { + fn(prevEntry, true) + } + break + } else { + return recvErr + } } - - if len(resp.Entries) < paginationLimit { - break + if prevEntry != nil { + fn(prevEntry, false) } - + prevEntry = resp.Entry } return nil diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go new file mode 100644 index 000000000..d0792ac66 --- /dev/null +++ b/weed/filer2/filer_delete_entry.go @@ -0,0 +1,125 @@ +package filer2 + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { + if p == "/" { + return nil + } + + entry, findErr := f.FindEntry(ctx, p) + if findErr != nil { + return findErr + } + + isCollection := f.isBucket(entry) + + var chunks []*filer_pb.FileChunk + chunks = append(chunks, entry.Chunks...) + if entry.IsDirectory() { + // delete the folder children, not including the folder itself + var dirChunks []*filer_pb.FileChunk + dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection) + if err != nil { + glog.V(0).Infof("delete directory %s: %v", p, err) + return fmt.Errorf("delete directory %s: %v", p, err) + } + chunks = append(chunks, dirChunks...) + } + + // delete the file or folder + err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks) + if err != nil { + return fmt.Errorf("delete file %s: %v", p, err) + } + + if shouldDeleteChunks && !isCollection { + go f.DeleteChunks(chunks) + } + if isCollection { + collectionName := entry.Name() + f.doDeleteCollection(collectionName) + f.deleteBucket(collectionName) + } + + return nil +} + +func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (chunks []*filer_pb.FileChunk, err error) { + + lastFileName := "" + includeLastFile := false + for { + entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) + if err != nil { + glog.Errorf("list folder %s: %v", entry.FullPath, err) + return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) + } + if lastFileName == "" && !isRecursive && len(entries) > 0 { + // only for first iteration in the loop + return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) + } + + for _, sub := range entries { + lastFileName = sub.Name() + var dirChunks []*filer_pb.FileChunk + if sub.IsDirectory() { + dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks) + chunks = append(chunks, dirChunks...) + } else { + chunks = append(chunks, sub.Chunks...) + } + if err != nil && !ignoreRecursiveError { + return nil, err + } + } + + if len(entries) < PaginationSize { + break + } + } + + f.cacheDelDirectory(string(entry.FullPath)) + + glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks) + + if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { + return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) + + return chunks, nil +} + +func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) { + + glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) + + if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil { + return fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) + + return nil +} + +func (f *Filer) doDeleteCollection(collectionName string) (err error) { + + return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ + Name: collectionName, + }) + if err != nil { + glog.Infof("delete collection %s: %v", collectionName, err) + } + return err + }) + +} diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go index 25e27e504..3a64f636e 100644 --- a/weed/filer2/filer_deletion.go +++ b/weed/filer2/filer_deletion.go @@ -10,8 +10,6 @@ import ( func (f *Filer) loopProcessingDeletion() { - ticker := time.NewTicker(5 * time.Second) - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { m := make(map[string]operation.LookupResult) for _, vid := range vids { @@ -31,37 +29,35 @@ func (f *Filer) loopProcessingDeletion() { return m, nil } - var fileIds []string + var deletionCount int for { - select { - case fid := <-f.fileIdDeletionChan: - fileIds = append(fileIds, fid) - if len(fileIds) >= 4096 { - glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) - fileIds = fileIds[:0] - } - case <-ticker.C: - if len(fileIds) > 0 { - glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) - fileIds = fileIds[:0] + deletionCount = 0 + f.fileIdDeletionQueue.Consume(func(fileIds []string) { + deletionCount = len(fileIds) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) + if err != nil { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } else { + glog.V(1).Infof("deleting fileIds len=%d", deletionCount) } + }) + + if deletionCount == 0 { + time.Sleep(1123 * time.Millisecond) } } } -func (f *Filer) DeleteChunks(fullpath FullPath, chunks []*filer_pb.FileChunk) { +func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { for _, chunk := range chunks { - glog.V(3).Infof("deleting %s chunk %s", fullpath, chunk.String()) - f.fileIdDeletionChan <- chunk.GetFileIdString() + f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString()) } } // DeleteFileByFileId direct delete by file id. // Only used when the fileId is not being managed by snapshots. func (f *Filer) DeleteFileByFileId(fileId string) { - f.fileIdDeletionChan <- fileId + f.fileIdDeletionQueue.EnQueue(fileId) } func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { @@ -70,7 +66,7 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { return } if newEntry == nil { - f.DeleteChunks(oldEntry.FullPath, oldEntry.Chunks) + f.DeleteChunks(oldEntry.Chunks) } var toDelete []*filer_pb.FileChunk @@ -84,5 +80,5 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { toDelete = append(toDelete, oldChunk) } } - f.DeleteChunks(oldEntry.FullPath, toDelete) + f.DeleteChunks(toDelete) } diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index 8caa44ee2..f724f79c2 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -2,7 +2,6 @@ package filer2 import ( "context" - "errors" "time" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -14,12 +13,13 @@ type FilerStore interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error InsertEntry(context.Context, *Entry) error UpdateEntry(context.Context, *Entry) (err error) // err == filer2.ErrNotFound if not found FindEntry(context.Context, FullPath) (entry *Entry, err error) DeleteEntry(context.Context, FullPath) (err error) + DeleteFolderChildren(context.Context, FullPath) (err error) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) BeginTransaction(ctx context.Context) (context.Context, error) @@ -27,8 +27,6 @@ type FilerStore interface { RollbackTransaction(ctx context.Context) error } -var ErrNotFound = errors.New("filer: no entry is found in filer store") - type FilerStoreWrapper struct { actualStore FilerStore } @@ -46,8 +44,8 @@ func (fsw *FilerStoreWrapper) GetName() string { return fsw.actualStore.GetName() } -func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration) error { - return fsw.actualStore.Initialize(configuration) +func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error { + return fsw.actualStore.Initialize(configuration, prefix) } func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { @@ -97,6 +95,16 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err return fsw.actualStore.DeleteEntry(ctx, fp) } +func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullPath) (err error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) + }() + + return fsw.actualStore.DeleteFolderChildren(ctx, fp) +} + func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) { stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc() start := time.Now() diff --git a/weed/filer2/fullpath.go b/weed/filer2/fullpath.go index 191e51cf3..133069f93 100644 --- a/weed/filer2/fullpath.go +++ b/weed/filer2/fullpath.go @@ -3,6 +3,8 @@ package filer2 import ( "path/filepath" "strings" + + "github.com/chrislusf/seaweedfs/weed/util" ) type FullPath string @@ -34,3 +36,7 @@ func (fp FullPath) Child(name string) FullPath { } return FullPath(dir + "/" + name) } + +func (fp FullPath) AsInode() uint64 { + return uint64(util.HashStringToLong(string(fp))) +} diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index d00eba859..807fcb56f 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -5,12 +5,14 @@ import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - weed_util "github.com/chrislusf/seaweedfs/weed/util" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -29,8 +31,8 @@ func (store *LevelDBStore) GetName() string { return "leveldb" } -func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir) } @@ -93,7 +95,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPa data, err := store.db.Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) @@ -123,6 +125,34 @@ func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.Full return nil } +func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + + batch := new(leveldb.Batch) + + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + iter := store.db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + batch.Delete([]byte(genKey(string(fullpath), fileName))) + } + iter.Release() + + err = store.db.Write(batch, nil) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 904de8c97..497158420 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -9,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, 0) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -30,7 +30,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } @@ -64,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, 0) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDBStore{} diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go index 4b47d2eb3..0b07c6833 100644 --- a/weed/filer2/leveldb2/leveldb2_store.go +++ b/weed/filer2/leveldb2/leveldb2_store.go @@ -8,12 +8,14 @@ import ( "io" "os" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - weed_util "github.com/chrislusf/seaweedfs/weed/util" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -29,8 +31,8 @@ func (store *LevelDB2Store) GetName() string { return "leveldb2" } -func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir, 8) } @@ -103,7 +105,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullP data, err := store.dbs[partitionId].Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) @@ -134,6 +136,34 @@ func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.Ful return nil } +func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) + + batch := new(leveldb.Batch) + + iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + batch.Delete(append(directoryPrefix, []byte(fileName)...)) + } + iter.Release() + + err = store.dbs[partitionId].Write(batch, nil) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index e28ef7dac..dc94f2ac7 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -9,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, 0) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDB2Store{} @@ -30,7 +30,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } @@ -64,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, 0) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDB2Store{} diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go deleted file mode 100644 index 9c10a5472..000000000 --- a/weed/filer2/memdb/memdb_store.go +++ /dev/null @@ -1,132 +0,0 @@ -package memdb - -import ( - "context" - "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/google/btree" - "strings" - "sync" -) - -func init() { - filer2.Stores = append(filer2.Stores, &MemDbStore{}) -} - -type MemDbStore struct { - tree *btree.BTree - treeLock sync.Mutex -} - -type entryItem struct { - *filer2.Entry -} - -func (a entryItem) Less(b btree.Item) bool { - return strings.Compare(string(a.FullPath), string(b.(entryItem).FullPath)) < 0 -} - -func (store *MemDbStore) GetName() string { - return "memory" -} - -func (store *MemDbStore) Initialize(configuration util.Configuration) (err error) { - store.tree = btree.New(8) - return nil -} - -func (store *MemDbStore) BeginTransaction(ctx context.Context) (context.Context, error) { - return ctx, nil -} -func (store *MemDbStore) CommitTransaction(ctx context.Context) error { - return nil -} -func (store *MemDbStore) RollbackTransaction(ctx context.Context) error { - return nil -} - -func (store *MemDbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - // println("inserting", entry.FullPath) - store.treeLock.Lock() - store.tree.ReplaceOrInsert(entryItem{entry}) - store.treeLock.Unlock() - return nil -} - -func (store *MemDbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - if _, err = store.FindEntry(ctx, entry.FullPath); err != nil { - return fmt.Errorf("no such file %s : %v", entry.FullPath, err) - } - store.treeLock.Lock() - store.tree.ReplaceOrInsert(entryItem{entry}) - store.treeLock.Unlock() - return nil -} - -func (store *MemDbStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}}) - if item == nil { - return nil, filer2.ErrNotFound - } - entry = item.(entryItem).Entry - return entry, nil -} - -func (store *MemDbStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { - store.treeLock.Lock() - store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}}) - store.treeLock.Unlock() - return nil -} - -func (store *MemDbStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { - - startFrom := string(fullpath) - if startFileName != "" { - startFrom = startFrom + "/" + startFileName - } - - store.tree.AscendGreaterOrEqual(entryItem{&filer2.Entry{FullPath: filer2.FullPath(startFrom)}}, - func(item btree.Item) bool { - if limit <= 0 { - return false - } - entry := item.(entryItem).Entry - // println("checking", entry.FullPath) - - if entry.FullPath == fullpath { - // skipping the current directory - // println("skipping the folder", entry.FullPath) - return true - } - - dir, name := entry.FullPath.DirAndName() - if name == startFileName { - if inclusive { - limit-- - entries = append(entries, entry) - } - return true - } - - // only iterate the same prefix - if !strings.HasPrefix(string(entry.FullPath), string(fullpath)) { - // println("breaking from", entry.FullPath) - return false - } - - if dir != string(fullpath) { - // this could be items in deeper directories - // println("skipping deeper folder", entry.FullPath) - return true - } - // now process the directory items - // println("adding entry", entry.FullPath) - limit-- - entries = append(entries, entry) - return true - }, - ) - return entries, nil -} diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go deleted file mode 100644 index 3fd806aeb..000000000 --- a/weed/filer2/memdb/memdb_store_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package memdb - -import ( - "context" - "github.com/chrislusf/seaweedfs/weed/filer2" - "testing" -) - -func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) - store := &MemDbStore{} - store.Initialize(nil) - filer.SetStore(store) - filer.DisableDirectoryCache() - - ctx := context.Background() - - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") - - entry1 := &filer2.Entry{ - FullPath: fullpath, - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - if err := filer.CreateEntry(ctx, entry1); err != nil { - t.Errorf("create entry %v: %v", entry1.FullPath, err) - return - } - - entry, err := filer.FindEntry(ctx, fullpath) - - if err != nil { - t.Errorf("find entry: %v", err) - return - } - - if entry.FullPath != entry1.FullPath { - t.Errorf("find wrong entry: %v", entry.FullPath) - return - } - -} - -func TestCreateFileAndList(t *testing.T) { - filer := filer2.NewFiler(nil, nil) - store := &MemDbStore{} - store.Initialize(nil) - filer.SetStore(store) - filer.DisableDirectoryCache() - - ctx := context.Background() - - entry1 := &filer2.Entry{ - FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"), - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - entry2 := &filer2.Entry{ - FullPath: filer2.FullPath("/home/chris/this/is/one/file2.jpg"), - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - filer.CreateEntry(ctx, entry1) - filer.CreateEntry(ctx, entry2) - - // checking the 2 files - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "", false, 100) - - if err != nil { - t.Errorf("list entries: %v", err) - return - } - - if len(entries) != 2 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - if entries[0].FullPath != entry1.FullPath { - t.Errorf("find wrong entry 1: %v", entries[0].FullPath) - return - } - - if entries[1].FullPath != entry2.FullPath { - t.Errorf("find wrong entry 2: %v", entries[1].FullPath) - return - } - - // checking the offset - entries, err = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // checking root directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // add file3 - file3Path := filer2.FullPath("/home/chris/this/is/file3.jpg") - entry3 := &filer2.Entry{ - FullPath: file3Path, - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - filer.CreateEntry(ctx, entry3) - - // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 2 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // delete file and count - filer.DeleteEntryMetaAndData(ctx, file3Path, false, false, false) - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - -} diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go index e18299bd2..63d99cd9d 100644 --- a/weed/filer2/mysql/mysql_store.go +++ b/weed/filer2/mysql/mysql_store.go @@ -26,28 +26,35 @@ func (store *MysqlStore) GetName() string { return "mysql" } -func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) { +func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetBool(prefix+"interpolateParams"), ) } -func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int) (err error) { +func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int, + interpolateParams bool) (err error) { store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)" store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?" store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?" store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?" + store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?" store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?" store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?" sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) + if interpolateParams { + sqlUrl += "&interpolateParams=true" + } + var dbErr error store.DB, dbErr = sql.Open("mysql", sqlUrl) if dbErr != nil { diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go index ffd3d1e01..27a0c2513 100644 --- a/weed/filer2/postgres/postgres_store.go +++ b/weed/filer2/postgres/postgres_store.go @@ -26,16 +26,16 @@ func (store *PostgresStore) GetName() string { return "postgres" } -func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) { +func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetString("sslmode"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetString(prefix+"sslmode"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), ) } @@ -45,6 +45,7 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4" store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" + store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2" store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer2/redis/redis_cluster_store.go index 11c315391..eaaecb740 100644 --- a/weed/filer2/redis/redis_cluster_store.go +++ b/weed/filer2/redis/redis_cluster_store.go @@ -18,17 +18,25 @@ func (store *RedisClusterStore) GetName() string { return "redis_cluster" } -func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) { + + configuration.SetDefault(prefix+"useReadOnly", true) + configuration.SetDefault(prefix+"routeByLatency", true) + return store.initialize( - configuration.GetStringSlice("addresses"), - configuration.GetString("password"), + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), ) } -func (store *RedisClusterStore) initialize(addresses []string, password string) (err error) { +func (store *RedisClusterStore) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) { store.Client = redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: addresses, - Password: password, + Addrs: addresses, + Password: password, + ReadOnly: readOnly, + RouteByLatency: routeByLatency, }) return } diff --git a/weed/filer2/redis/redis_store.go b/weed/filer2/redis/redis_store.go index c56fa014c..9debdb070 100644 --- a/weed/filer2/redis/redis_store.go +++ b/weed/filer2/redis/redis_store.go @@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string { return "redis" } -func (store *RedisStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("address"), - configuration.GetString("password"), - configuration.GetInt("database"), + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), ) } diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index ce41d4d70..c5b9d9416 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -3,12 +3,15 @@ package redis import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/go-redis/redis" "sort" "strings" "time" + + "github.com/go-redis/redis" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) const ( @@ -62,7 +65,7 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2 data, err := store.Client.Get(string(fullpath)).Result() if err == redis.Nil { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { @@ -99,10 +102,29 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file return nil } +func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + + members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + if err != nil { + return fmt.Errorf("delete folder %s : %v", fullpath, err) + } + + for _, fileName := range members { + path := filer2.NewFullPath(string(fullpath), fileName) + _, err = store.Client.Del(string(path)).Result() + if err != nil { + return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { - members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + dirListKey := genDirectoryListKey(string(fullpath)) + members, err := store.Client.SMembers(dirListKey).Result() if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } @@ -141,6 +163,13 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full if err != nil { glog.V(0).Infof("list %s : %v", path, err) } else { + if entry.TtlSec > 0 { + if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + store.Client.Del(string(path)).Result() + store.Client.SRem(dirListKey, fileName).Result() + continue + } + } entries = append(entries, entry) } } diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index 01b87cad1..381d99144 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -26,8 +26,9 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f } for _, chunkView := range chunkViews { + urlString := fileId2Url[chunkView.FileId] - _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) { + err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) { w.Write(data) }) if err != nil { diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go index 0e6b93c86..2a9dd6648 100644 --- a/weed/filer2/tikv/tikv_store.go +++ b/weed/filer2/tikv/tikv_store.go @@ -1,5 +1,6 @@ // +build !386 // +build !arm +// +build !windows package tikv @@ -12,6 +13,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" "github.com/pingcap/tidb/kv" @@ -30,8 +32,8 @@ func (store *TikvStore) GetName() string { return "tikv" } -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { - pdAddr := configuration.GetString("pdAddress") +func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + pdAddr := configuration.GetString(prefix + "pdAddress") return store.initialize(pdAddr) } @@ -110,7 +112,7 @@ func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) data, err := store.getTx(ctx).Get(ctx, key) if err == kv.ErrNotExist { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) @@ -141,6 +143,38 @@ func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat return nil } +func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + + tx := store.getTx(ctx) + + iter, err := tx.Iter(directoryPrefix, nil) + if err != nil { + return fmt.Errorf("deleteFolderChildren %s: %v", fullpath, err) + } + defer iter.Close() + for iter.Valid() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + iter.Next() + continue + } + + if err = tx.Delete(genKey(string(fullpath), fileName)); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + iter.Next() + } + + return nil +} + func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go index 95c8b2dad..713c84bf8 100644 --- a/weed/filer2/tikv/tikv_store_unsupported.go +++ b/weed/filer2/tikv/tikv_store_unsupported.go @@ -1,4 +1,4 @@ -// +build 386 arm +// +build 386 arm windows package tikv @@ -21,7 +21,7 @@ func (store *TikvStore) GetName() string { return "tikv" } -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { +func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { return fmt.Errorf("not implemented for 32 bit computers") } @@ -55,6 +55,10 @@ func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat return fmt.Errorf("not implemented for 32 bit computers") } +func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + return fmt.Errorf("not implemented for 32 bit computers") +} + func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { return nil, fmt.Errorf("not implemented for 32 bit computers") diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 79cf45385..483229b3f 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -3,7 +3,7 @@ package filesys import ( "context" "os" - "path" + "strings" "time" "github.com/chrislusf/seaweedfs/weed/filer2" @@ -14,9 +14,9 @@ import ( ) type Dir struct { - Path string - wfs *WFS - attributes *filer_pb.FuseAttributes + Path string + wfs *WFS + entry *filer_pb.Entry } var _ = fs.Node(&Dir{}) @@ -27,6 +27,11 @@ var _ = fs.HandleReadDirAller(&Dir{}) var _ = fs.NodeRemover(&Dir{}) var _ = fs.NodeRenamer(&Dir{}) var _ = fs.NodeSetattrer(&Dir{}) +var _ = fs.NodeGetxattrer(&Dir{}) +var _ = fs.NodeSetxattrer(&Dir{}) +var _ = fs.NodeRemovexattrer(&Dir{}) +var _ = fs.NodeListxattrer(&Dir{}) +var _ = fs.NodeForgetter(&Dir{}) func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { @@ -35,42 +40,41 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { if dir.Path == dir.wfs.option.FilerMountRootPath { dir.setRootDirAttributes(attr) + glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.Path, attr) return nil } - item := dir.wfs.listDirectoryEntriesCache.Get(dir.Path) - if item != nil && !item.Expired() { - entry := item.Value().(*filer_pb.Entry) + if err := dir.maybeLoadEntry(); err != nil { + glog.V(3).Infof("dir Attr %s,err: %+v", dir.Path, err) + return err + } - attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) - attr.Mode = os.FileMode(entry.Attributes.FileMode) - attr.Gid = entry.Attributes.Gid - attr.Uid = entry.Attributes.Uid + attr.Inode = filer2.FullPath(dir.Path).AsInode() + attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir + attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) + attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0) + attr.Gid = dir.entry.Attributes.Gid + attr.Uid = dir.entry.Attributes.Uid - return nil - } + glog.V(3).Infof("dir Attr %s, attr: %+v", dir.Path, attr) - entry, err := filer2.GetEntry(ctx, dir.wfs, dir.Path) - if err != nil { - glog.V(2).Infof("read dir %s attr: %v, error: %v", dir.Path, dir.attributes, err) - return err - } - dir.attributes = entry.Attributes + return nil +} - glog.V(2).Infof("dir %s: %v perm: %v", dir.Path, dir.attributes, os.FileMode(dir.attributes.FileMode)) +func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - attr.Mode = os.FileMode(dir.attributes.FileMode) | os.ModeDir + glog.V(4).Infof("dir Getxattr %s", dir.Path) - attr.Mtime = time.Unix(dir.attributes.Mtime, 0) - attr.Ctime = time.Unix(dir.attributes.Crtime, 0) - attr.Gid = dir.attributes.Gid - attr.Uid = dir.attributes.Uid + if err := dir.maybeLoadEntry(); err != nil { + return err + } - return nil + return getxattr(dir.entry, req, resp) } func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { + attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode() + attr.Valid = time.Hour attr.Uid = dir.wfs.option.MountUid attr.Gid = dir.wfs.option.MountGid attr.Mode = dir.wfs.option.MountMode @@ -78,16 +82,25 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { attr.Ctime = dir.wfs.option.MountCtime attr.Mtime = dir.wfs.option.MountMtime attr.Atime = dir.wfs.option.MountMtime + attr.BlockSize = 1024 * 1024 } -func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { - return &File{ - Name: name, - dir: dir, - wfs: dir.wfs, - entry: entry, - entryViewCache: nil, - } +func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { + return dir.wfs.getNode(filer2.NewFullPath(dir.Path, name), func() fs.Node { + return &File{ + Name: name, + dir: dir, + wfs: dir.wfs, + entry: entry, + entryViewCache: nil, + } + }) +} + +func (dir *Dir) newDirectory(fullpath filer2.FullPath, entry *filer_pb.Entry) fs.Node { + return dir.wfs.getNode(fullpath, func() fs.Node { + return &Dir{Path: string(fullpath), wfs: dir.wfs, entry: entry} + }) } func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, @@ -109,94 +122,104 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, TtlSec: dir.wfs.option.TtlSec, }, }, + OExcl: req.Flags&fuse.OpenExclusive != 0, } - glog.V(1).Infof("create: %v", request) + glog.V(1).Infof("create %s/%s: %v", dir.Path, req.Name, req.Flags) - if request.Entry.IsDirectory { - if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, request); err != nil { + if strings.Contains(err.Error(), "EEXIST") { + return fuse.EEXIST } - return nil - }); err != nil { - return nil, nil, err + return fuse.EIO } + return nil + }); err != nil { + return nil, nil, err } - - file := dir.newFile(req.Name, request.Entry) - if !request.Entry.IsDirectory { - file.isOpen = true + var node fs.Node + if request.Entry.IsDirectory { + node = dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), request.Entry) + return node, nil, nil } + + node = dir.newFile(req.Name, request.Entry) + file := node.(*File) + file.isOpen++ fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) - fh.dirtyMetadata = true return file, fh, nil } func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + newEntry := &filer_pb.Entry{ + Name: req.Name, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + }, + } + + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: dir.Path, - Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), - Uid: req.Uid, - Gid: req.Gid, - }, - }, + Entry: newEntry, } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + return err } return nil }) if err == nil { - node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs} + node := dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), newEntry) return node, nil } - return nil, err + return nil, fuse.EIO } func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { - var entry *filer_pb.Entry - fullFilePath := path.Join(dir.Path, req.Name) + glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name) - item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) - } + fullFilePath := filer2.NewFullPath(dir.Path, req.Name) + entry := dir.wfs.cacheGet(fullFilePath) if entry == nil { - entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath) + // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) + entry, err = filer2.GetEntry(dir.wfs, fullFilePath) if err != nil { - return nil, err + glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) + return nil, fuse.ENOENT } + dir.wfs.cacheSet(fullFilePath, entry, 5*time.Minute) + } else { + glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) } if entry != nil { if entry.IsDirectory { - node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, attributes: entry.Attributes} + node = dir.newDirectory(fullFilePath, entry) } else { node = dir.newFile(req.Name, entry) } - resp.EntryValid = time.Duration(0) + // resp.EntryValid = time.Second + resp.Attr.Inode = fullFilePath.AsInode() + resp.Attr.Valid = time.Second resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) + resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) resp.Attr.Gid = entry.Attributes.Gid resp.Attr.Uid = entry.Attributes.Uid @@ -204,57 +227,32 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. return node, nil } + glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - err = dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - paginationLimit := 1024 - remaining := dir.wfs.option.DirListingLimit - - lastEntryName := "" - - for remaining >= 0 { + glog.V(3).Infof("dir ReadDirAll %s", dir.Path) - request := &filer_pb.ListEntriesRequest{ - Directory: dir.Path, - StartFromFileName: lastEntryName, - Limit: uint32(paginationLimit), - } - - glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) - if err != nil { - glog.V(0).Infof("list %s: %v", dir.Path, err) - return fuse.EIO - } - - cacheTtl := estimatedCacheTtl(len(resp.Entries)) - - for _, entry := range resp.Entries { - if entry.IsDirectory { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir} - ret = append(ret, dirent) - } else { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File} - ret = append(ret, dirent) - } - dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl) - lastEntryName = entry.Name - } - - remaining -= len(resp.Entries) - - if len(resp.Entries) < paginationLimit { - break - } + cacheTtl := 5 * time.Minute + readErr := filer2.ReadDirAllEntries(dir.wfs, filer2.FullPath(dir.Path), "", func(entry *filer_pb.Entry, isLast bool) { + fullpath := filer2.NewFullPath(dir.Path, entry.Name) + inode := fullpath.AsInode() + if entry.IsDirectory { + dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir} + ret = append(ret, dirent) + } else { + dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File} + ret = append(ret, dirent) } - - return nil + dir.wfs.cacheSet(fullpath, entry, cacheTtl) }) + if readErr != nil { + glog.V(0).Infof("list %s: %v", dir.Path, err) + return ret, fuse.EIO + } return ret, err } @@ -262,23 +260,29 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { if !req.Dir { - return dir.removeOneFile(ctx, req) + return dir.removeOneFile(req) } - return dir.removeFolder(ctx, req) + return dir.removeFolder(req) } -func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { +func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { - entry, err := filer2.GetEntry(ctx, dir.wfs, path.Join(dir.Path, req.Name)) + filePath := filer2.NewFullPath(dir.Path, req.Name) + entry, err := filer2.GetEntry(dir.wfs, filePath) if err != nil { return err } + if entry == nil { + return nil + } + + dir.wfs.deleteFileChunks(entry.Chunks) - dir.wfs.deleteFileChunks(ctx, entry.Chunks) + dir.wfs.cacheDelete(filePath) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -287,22 +291,22 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro } glog.V(3).Infof("remove file: %v", request) - _, err := client.DeleteEntry(ctx, request) + _, err := client.DeleteEntry(context.Background(), request) if err != nil { - glog.V(3).Infof("remove file %s/%s: %v", dir.Path, req.Name, err) + glog.V(3).Infof("not found remove file %s/%s: %v", dir.Path, req.Name, err) return fuse.ENOENT } - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) - return nil }) } -func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { +func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.Name)) + + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -311,14 +315,12 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error } glog.V(3).Infof("remove directory entry: %v", request) - _, err := client.DeleteEntry(ctx, request) + _, err := client.DeleteEntry(context.Background(), request) if err != nil { - glog.V(3).Infof("remove %s/%s: %v", dir.Path, req.Name, err) + glog.V(3).Infof("not found remove %s/%s: %v", dir.Path, req.Name, err) return fuse.ENOENT } - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) - return nil }) @@ -326,66 +328,122 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if dir.attributes == nil { - return nil + glog.V(3).Infof("%v dir setattr %+v", dir.Path, req) + + if err := dir.maybeLoadEntry(); err != nil { + return err } - glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) if req.Valid.Mode() { - dir.attributes.FileMode = uint32(req.Mode) + dir.entry.Attributes.FileMode = uint32(req.Mode) } if req.Valid.Uid() { - dir.attributes.Uid = req.Uid + dir.entry.Attributes.Uid = req.Uid } if req.Valid.Gid() { - dir.attributes.Gid = req.Gid + dir.entry.Attributes.Gid = req.Gid } if req.Valid.Mtime() { - dir.attributes.Mtime = req.Mtime.Unix() + dir.entry.Attributes.Mtime = req.Mtime.Unix() + } + + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + + return dir.saveEntry() + +} + +func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { + + glog.V(4).Infof("dir Setxattr %s: %s", dir.Path, req.Name) + + if err := dir.maybeLoadEntry(); err != nil { + return err + } + + if err := setxattr(dir.entry, req); err != nil { + return err + } + + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + + return dir.saveEntry() + +} + +func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { + + glog.V(4).Infof("dir Removexattr %s: %s", dir.Path, req.Name) + + if err := dir.maybeLoadEntry(); err != nil { + return err + } + + if err := removexattr(dir.entry, req); err != nil { + return err + } + + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + + return dir.saveEntry() + +} + +func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + + glog.V(4).Infof("dir Listxattr %s", dir.Path) + + if err := dir.maybeLoadEntry(); err != nil { + return err + } + + if err := listxattr(dir.entry, req, resp); err != nil { + return err } + return nil + +} + +func (dir *Dir) Forget() { + glog.V(3).Infof("Forget dir %s", dir.Path) + + dir.wfs.forgetNode(filer2.FullPath(dir.Path)) +} + +func (dir *Dir) maybeLoadEntry() error { + if dir.entry == nil { + parentDirPath, name := filer2.FullPath(dir.Path).DirAndName() + entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name) + if err != nil { + return err + } + dir.entry = entry + } + return nil +} + +func (dir *Dir) saveEntry() error { + parentDir, name := filer2.FullPath(dir.Path).DirAndName() - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: parentDir, - Entry: &filer_pb.Entry{ - Name: name, - Attributes: dir.attributes, - }, + Entry: dir.entry, } - glog.V(1).Infof("set attr directory entry: %v", request) - _, err := client.UpdateEntry(ctx, request) + glog.V(1).Infof("save dir entry: %v", request) + _, err := client.UpdateEntry(context.Background(), request) if err != nil { - glog.V(0).Infof("UpdateEntry %s: %v", dir.Path, err) + glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err) return fuse.EIO } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) - return nil }) - -} - -func estimatedCacheTtl(numEntries int) time.Duration { - if numEntries < 100 { - // 30 ms per entry - return 3 * time.Second - } - if numEntries < 1000 { - // 10 ms per entry - return 10 * time.Second - } - if numEntries < 10000 { - // 10 ms per entry - return 100 * time.Second - } - - // 2 ms per entry - return time.Duration(numEntries*2) * time.Millisecond } diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 94e443649..61ed04c26 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -35,8 +35,8 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, }, } - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) return fuse.EIO } @@ -51,7 +51,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { - if err := file.maybeLoadAttributes(ctx); err != nil { + if err := file.maybeLoadEntry(ctx); err != nil { return "", err } diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index e72a15758..9b0c0fe6e 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -2,7 +2,9 @@ package filesys import ( "context" - "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" @@ -11,8 +13,9 @@ import ( func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) + glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.Path, @@ -21,13 +24,40 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector NewName: req.NewName, } - _, err := client.AtomicRenameEntry(ctx, request) + _, err := client.AtomicRenameEntry(context.Background(), request) if err != nil { - return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) + glog.V(0).Infof("dir Rename %s/%s => %s/%s : %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) + return fuse.EIO } return nil }) + if err == nil { + newPath := filer2.NewFullPath(newDir.Path, req.NewName) + oldPath := filer2.NewFullPath(dir.Path, req.OldName) + dir.wfs.cacheDelete(newPath) + dir.wfs.cacheDelete(oldPath) + + oldFileNode := dir.wfs.getNode(oldPath, func() fs.Node { + return nil + }) + newDirNode := dir.wfs.getNode(filer2.FullPath(dir.Path), func() fs.Node { + return nil + }) + dir.wfs.forgetNode(newPath) + dir.wfs.forgetNode(oldPath) + if oldFileNode != nil && newDirNode != nil { + oldFile := oldFileNode.(*File) + oldFile.Name = req.NewName + oldFile.dir = newDirNode.(*Dir) + dir.wfs.getNode(newPath, func() fs.Node { + return oldFile + }) + + } + } + + return err } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index baee412b2..7e33c97a7 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -4,8 +4,8 @@ import ( "bytes" "context" "fmt" + "io" "sync" - "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -15,113 +15,68 @@ import ( ) type ContinuousDirtyPages struct { - hasData bool - Offset int64 - Size int64 - Data []byte - f *File - lock sync.Mutex + intervals *ContinuousIntervals + f *File + lock sync.Mutex + collection string + replication string } func newDirtyPages(file *File) *ContinuousDirtyPages { return &ContinuousDirtyPages{ - Data: nil, - f: file, + intervals: &ContinuousIntervals{}, + f: file, } } func (pages *ContinuousDirtyPages) releaseResource() { - if pages.Data != nil { - pages.f.wfs.bufPool.Put(pages.Data) - pages.Data = nil - atomic.AddInt32(&counter, -1) - glog.V(3).Infof("%s/%s releasing resource %d", pages.f.dir.Path, pages.f.Name, counter) - } } var counter = int32(0) -func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { pages.lock.Lock() defer pages.lock.Unlock() - var chunk *filer_pb.FileChunk + glog.V(3).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data))) if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. - return pages.flushAndSave(ctx, offset, data) - } - - if pages.Data == nil { - pages.Data = pages.f.wfs.bufPool.Get().([]byte) - atomic.AddInt32(&counter, 1) - glog.V(3).Infof("%s/%s acquire resource %d", pages.f.dir.Path, pages.f.Name, counter) + return pages.flushAndSave(offset, data) } - if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) || - pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) { - // if the data is out of range, - // or buffer is full if adding new data, - // flush current buffer and add new data + pages.intervals.AddInterval(data, offset) - // println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size) + var chunk *filer_pb.FileChunk + var hasSavedData bool - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return + if pages.intervals.TotalSize() > pages.f.wfs.option.ChunkSizeLimit { + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage() + if hasSavedData { + chunks = append(chunks, chunk) } - pages.Offset = offset - copy(pages.Data, data) - pages.Size = int64(len(data)) - return } - if offset != pages.Offset+pages.Size { - // when this happens, debug shows the data overlapping with existing data is empty - // the data is not just append - if offset == pages.Offset && int(pages.Size) < len(data) { - // glog.V(2).Infof("pages[%d,%d) pages.Data len=%v, data len=%d, pages.Size=%d", pages.Offset, pages.Offset+pages.Size, len(pages.Data), len(data), pages.Size) - copy(pages.Data[pages.Size:], data[pages.Size:]) - } else { - if pages.Size != 0 { - glog.V(1).Infof("%s/%s add page: pages [%d, %d) write [%d, %d)", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data))) - } - return pages.flushAndSave(ctx, offset, data) - } - } else { - copy(pages.Data[offset-pages.Offset:], data) - } - - pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset) - return } -func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { var chunk *filer_pb.FileChunk + var newChunks []*filer_pb.FileChunk // flush existing - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s flush existing [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) - chunks = append(chunks, chunk) + if newChunks, err = pages.saveExistingPagesToStorage(); err == nil { + if newChunks != nil { + chunks = append(chunks, newChunks...) } } else { - glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) return } - pages.Size = 0 - pages.Offset = 0 // flush the new page - if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { + if chunk, err = pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data))); err == nil { if chunk != nil { glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) chunks = append(chunks, chunk) @@ -134,40 +89,62 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6 return } -func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) FlushToStorage() (chunks []*filer_pb.FileChunk, err error) { pages.lock.Lock() defer pages.lock.Unlock() - if pages.Size == 0 { - return nil, nil - } + return pages.saveExistingPagesToStorage() +} - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - pages.Size = 0 - pages.Offset = 0 - if chunk != nil { - glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) +func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() (chunks []*filer_pb.FileChunk, err error) { + + var hasSavedData bool + var chunk *filer_pb.FileChunk + + for { + + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage() + if !hasSavedData { + return chunks, err + } + + if err == nil { + chunks = append(chunks, chunk) + } else { + return } } - return + } -func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) { +func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *filer_pb.FileChunk, hasSavedData bool, err error) { - if pages.Size == 0 { - return nil, nil + maxList := pages.intervals.RemoveLargestIntervalLinkedList() + if maxList == nil { + return nil, false, nil } - return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset) + chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size()) + if err == nil { + hasSavedData = true + glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId) + } else { + glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err) + return + } + + return } -func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { +func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) { var fileId, host string var auth security.EncodedJwt - if err := pages.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + dir, _ := pages.f.fullpath().DirAndName() + + if err := pages.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -175,15 +152,21 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte Collection: pages.f.wfs.option.Collection, TtlSec: pages.f.wfs.option.TtlSec, DataCenter: pages.f.wfs.option.DataCenter, + ParentPath: dir, } - resp, err := client.AssignVolume(ctx, request) + resp, err := client.AssignVolume(context.Background(), request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + host = pages.f.wfs.AdjustedUrl(host) + pages.collection, pages.replication = resp.Collection, resp.Replication return nil }); err != nil { @@ -191,8 +174,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, auth) + uploadResult, err := operation.Upload(fileUrl, pages.f.Name, pages.f.wfs.option.Cipher, reader, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload data: %v", err) @@ -203,11 +185,13 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte } return &filer_pb.FileChunk{ - FileId: fileId, - Offset: offset, - Size: uint64(len(buf)), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + FileId: fileId, + Offset: offset, + Size: uint64(size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, }, nil } @@ -218,3 +202,18 @@ func max(x, y int64) int64 { } return y } +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +func (pages *ContinuousDirtyPages) ReadDirtyData(data []byte, startOffset int64) (offset int64, size int) { + + pages.lock.Lock() + defer pages.lock.Unlock() + + return pages.intervals.ReadData(data, startOffset) + +} diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go new file mode 100644 index 000000000..ec94c6df1 --- /dev/null +++ b/weed/filesys/dirty_page_interval.go @@ -0,0 +1,220 @@ +package filesys + +import ( + "bytes" + "io" + "math" +) + +type IntervalNode struct { + Data []byte + Offset int64 + Size int64 + Next *IntervalNode +} + +type IntervalLinkedList struct { + Head *IntervalNode + Tail *IntervalNode +} + +type ContinuousIntervals struct { + lists []*IntervalLinkedList +} + +func (list *IntervalLinkedList) Offset() int64 { + return list.Head.Offset +} +func (list *IntervalLinkedList) Size() int64 { + return list.Tail.Offset + list.Tail.Size - list.Head.Offset +} +func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { + // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) + list.Tail.Next = node + list.Tail = node +} +func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { + // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) + node.Next = list.Head + list.Head = node +} + +func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) { + t := list.Head + for { + + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart < nodeStop { + // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop) + copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset]) + } + + if t.Next == nil { + break + } + t = t.Next + } +} + +func (c *ContinuousIntervals) TotalSize() (total int64) { + for _, list := range c.lists { + total += list.Size() + } + return +} + +func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList { + var nodes []*IntervalNode + for t := list.Head; t != nil; t = t.Next { + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart >= nodeStop { + // skip non overlapping IntervalNode + continue + } + nodes = append(nodes, &IntervalNode{ + Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset], + Offset: nodeStart, + Size: nodeStop - nodeStart, + Next: nil, + }) + } + for i := 1; i < len(nodes); i++ { + nodes[i-1].Next = nodes[i] + } + return &IntervalLinkedList{ + Head: nodes[0], + Tail: nodes[len(nodes)-1], + } +} + +func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { + + interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} + + var newLists []*IntervalLinkedList + for _, list := range c.lists { + // if list is to the left of new interval, add to the new list + if list.Tail.Offset+list.Tail.Size <= interval.Offset { + newLists = append(newLists, list) + } + // if list is to the right of new interval, add to the new list + if interval.Offset+interval.Size <= list.Head.Offset { + newLists = append(newLists, list) + } + // if new interval overwrite the right part of the list + if list.Head.Offset < interval.Offset && interval.Offset < list.Tail.Offset+list.Tail.Size { + // create a new list of the left part of existing list + newLists = append(newLists, subList(list, list.Offset(), interval.Offset)) + } + // if new interval overwrite the left part of the list + if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size < list.Tail.Offset+list.Tail.Size { + // create a new list of the right part of existing list + newLists = append(newLists, subList(list, interval.Offset+interval.Size, list.Tail.Offset+list.Tail.Size)) + } + // skip anything that is fully overwritten by the new interval + } + + c.lists = newLists + // add the new interval to the lists, connecting neighbor lists + var prevList, nextList *IntervalLinkedList + + for _, list := range c.lists { + if list.Head.Offset == interval.Offset+interval.Size { + nextList = list + break + } + } + + for _, list := range c.lists { + if list.Head.Offset+list.Size() == offset { + list.addNodeToTail(interval) + prevList = list + break + } + } + + if prevList != nil && nextList != nil { + // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) + prevList.Tail.Next = nextList.Head + prevList.Tail = nextList.Tail + c.removeList(nextList) + } else if nextList != nil { + // add to head was not done when checking + nextList.addNodeToHead(interval) + } + if prevList == nil && nextList == nil { + c.lists = append(c.lists, &IntervalLinkedList{ + Head: interval, + Tail: interval, + }) + } + + return +} + +func (c *ContinuousIntervals) RemoveLargestIntervalLinkedList() *IntervalLinkedList { + var maxSize int64 + maxIndex := -1 + for k, list := range c.lists { + if maxSize <= list.Size() { + maxSize = list.Size() + maxIndex = k + } + } + if maxSize <= 0 { + return nil + } + + t := c.lists[maxIndex] + c.lists = append(c.lists[0:maxIndex], c.lists[maxIndex+1:]...) + return t + +} + +func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) { + index := -1 + for k, list := range c.lists { + if list.Offset() == target.Offset() { + index = k + } + } + if index < 0 { + return + } + + c.lists = append(c.lists[0:index], c.lists[index+1:]...) + +} + +func (c *ContinuousIntervals) ReadData(data []byte, startOffset int64) (offset int64, size int) { + var minOffset int64 = math.MaxInt64 + var maxStop int64 + for _, list := range c.lists { + start := max(startOffset, list.Offset()) + stop := min(startOffset+int64(len(data)), list.Offset()+list.Size()) + if start <= stop { + list.ReadData(data[start-startOffset:], start, stop) + minOffset = min(minOffset, start) + maxStop = max(maxStop, stop) + } + } + + if minOffset == math.MaxInt64 { + return 0, 0 + } + + offset = minOffset + size = int(maxStop - offset) + return +} + +func (l *IntervalLinkedList) ToReader() io.Reader { + var readers []io.Reader + t := l.Head + readers = append(readers, bytes.NewReader(t.Data)) + for t.Next != nil { + t = t.Next + readers = append(readers, bytes.NewReader(t.Data)) + } + return io.MultiReader(readers...) +} diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go new file mode 100644 index 000000000..184be2f3b --- /dev/null +++ b/weed/filesys/dirty_page_interval_test.go @@ -0,0 +1,72 @@ +package filesys + +import ( + "bytes" + "testing" +) + +func TestContinuousIntervals_AddIntervalAppend(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25 + c.AddInterval(getBytes(25, 3), 0) + // _, _, 23, 23, 23, 23 + c.AddInterval(getBytes(23, 4), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 23, 23) + +} + +func TestContinuousIntervals_AddIntervalInnerOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25, 25, 25 + c.AddInterval(getBytes(25, 5), 0) + // _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 25) + +} + +func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, + c.AddInterval(getBytes(25, 1), 0) + // _, _, _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 4) + // _, _, _, 24, 24, 24, 24 + c.AddInterval(getBytes(24, 4), 3) + + // _, 22, 22 + c.AddInterval(getBytes(22, 2), 1) + + expectedData(t, c, 0, 25, 22, 22, 24, 24, 24, 24) + +} + +func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) { + start, stop := int64(offset), int64(offset+len(data)) + for _, list := range c.lists { + nodeStart, nodeStop := max(start, list.Head.Offset), min(stop, list.Head.Offset+list.Size()) + if nodeStart < nodeStop { + buf := make([]byte, nodeStop-nodeStart) + list.ReadData(buf, nodeStart, nodeStop) + if bytes.Compare(buf, data[nodeStart-start:nodeStop-start]) != 0 { + t.Errorf("expected %v actual %v", data[nodeStart-start:nodeStop-start], buf) + } + } + } +} + +func getBytes(content byte, length int) []byte { + data := make([]byte, length) + for i := 0; i < length; i++ { + data[i] = content + } + return data +} diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 1b359ebbe..69d440a73 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -3,7 +3,6 @@ package filesys import ( "context" "os" - "path/filepath" "sort" "time" @@ -20,6 +19,11 @@ var _ = fs.Node(&File{}) var _ = fs.NodeOpener(&File{}) var _ = fs.NodeFsyncer(&File{}) var _ = fs.NodeSetattrer(&File{}) +var _ = fs.NodeGetxattrer(&File{}) +var _ = fs.NodeSetxattrer(&File{}) +var _ = fs.NodeRemovexattrer(&File{}) +var _ = fs.NodeListxattrer(&File{}) +var _ = fs.NodeForgetter(&File{}) type File struct { Name string @@ -27,21 +31,32 @@ type File struct { wfs *WFS entry *filer_pb.Entry entryViewCache []filer2.VisibleInterval - isOpen bool + isOpen int } -func (file *File) fullpath() string { - return filepath.Join(file.dir.Path, file.Name) +func (file *File) fullpath() filer2.FullPath { + return filer2.NewFullPath(file.dir.Path, file.Name) } func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { - if err := file.maybeLoadAttributes(ctx); err != nil { - return err + glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr) + + if file.isOpen <= 0 { + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } } + attr.Inode = file.fullpath().AsInode() + attr.Valid = time.Second attr.Mode = os.FileMode(file.entry.Attributes.FileMode) attr.Size = filer2.TotalSize(file.entry.Chunks) + if file.isOpen > 0 { + attr.Size = file.entry.Attributes.FileSize + glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size) + } + attr.Crtime = time.Unix(file.entry.Attributes.Crtime, 0) attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0) attr.Gid = file.entry.Attributes.Gid attr.Uid = file.entry.Attributes.Uid @@ -52,11 +67,22 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { } +func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + + // glog.V(4).Infof("file Getxattr %s", file.fullpath()) + + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + return getxattr(file.entry, req, resp) +} + func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { - glog.V(3).Infof("%v file open %+v", file.fullpath(), req) + glog.V(4).Infof("file %v open %+v", file.fullpath(), req) - file.isOpen = true + file.isOpen++ handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid) @@ -70,17 +96,28 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if err := file.maybeLoadAttributes(ctx); err != nil { + glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) + + if err := file.maybeLoadEntry(ctx); err != nil { return err } - glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) if req.Valid.Size() { glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size) - if req.Size == 0 { + if req.Size < filer2.TotalSize(file.entry.Chunks) { // fmt.Printf("truncate %v \n", fullPath) - file.entry.Chunks = nil + var chunks []*filer_pb.FileChunk + for _, chunk := range file.entry.Chunks { + int64Size := int64(chunk.Size) + if chunk.Offset+int64Size > int64(req.Size) { + int64Size = int64(req.Size) - chunk.Offset + } + if int64Size > 0 { + chunks = append(chunks, chunk) + } + } + file.entry.Chunks = chunks file.entryViewCache = nil } file.entry.Attributes.FileSize = req.Size @@ -105,79 +142,94 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f file.entry.Attributes.Mtime = req.Mtime.Unix() } - if file.isOpen { + if file.isOpen > 0 { return nil } - return file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + file.wfs.cacheDelete(file.fullpath()) - request := &filer_pb.UpdateEntryRequest{ - Directory: file.dir.Path, - Entry: file.entry, - } + return file.saveEntry() - glog.V(1).Infof("set attr file entry: %v", request) - _, err := client.UpdateEntry(ctx, request) - if err != nil { - glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err) - return fuse.EIO - } +} - return nil - }) +func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { -} + glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name) -func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { - // fsync works at OS level - // write the file chunks to the filerGrpcAddress - glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req) + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := setxattr(file.entry, req); err != nil { + return err + } + + file.wfs.cacheDelete(file.fullpath()) + + return file.saveEntry() - return nil } -func (file *File) maybeLoadAttributes(ctx context.Context) error { - if file.entry == nil || !file.isOpen { - item := file.wfs.listDirectoryEntriesCache.Get(file.fullpath()) - if item != nil && !item.Expired() { - entry := item.Value().(*filer_pb.Entry) - file.setEntry(entry) - // glog.V(1).Infof("file attr read cached %v attributes", file.Name) - } else { - err := file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { +func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { - request := &filer_pb.LookupDirectoryEntryRequest{ - Name: file.Name, - Directory: file.dir.Path, - } + glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - glog.V(3).Infof("file attr read file %v: %v", request, err) - return fuse.ENOENT - } + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } - file.setEntry(resp.Entry) + if err := removexattr(file.entry, req); err != nil { + return err + } + + file.wfs.cacheDelete(file.fullpath()) - glog.V(3).Infof("file attr %v %+v: %d", file.fullpath(), file.entry.Attributes, filer2.TotalSize(file.entry.Chunks)) + return file.saveEntry() - // file.wfs.listDirectoryEntriesCache.Set(file.fullpath(), file.entry, file.wfs.option.EntryCacheTtl) +} - return nil - }) +func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { - if err != nil { - return err - } - } + glog.V(4).Infof("file Listxattr %s", file.fullpath()) + + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := listxattr(file.entry, req, resp); err != nil { + return err } + return nil + } -func (file *File) addChunk(chunk *filer_pb.FileChunk) { - if chunk != nil { - file.addChunks([]*filer_pb.FileChunk{chunk}) +func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { + // fsync works at OS level + // write the file chunks to the filerGrpcAddress + glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req) + + return nil +} + +func (file *File) Forget() { + glog.V(3).Infof("Forget file %s/%s", file.dir.Path, file.Name) + + file.wfs.forgetNode(filer2.NewFullPath(file.dir.Path, file.Name)) + +} + +func (file *File) maybeLoadEntry(ctx context.Context) error { + if file.entry == nil || file.isOpen <= 0 { + entry, err := file.wfs.maybeLoadEntry(file.dir.Path, file.Name) + if err != nil { + return err + } + if entry != nil { + file.setEntry(entry) + } } + return nil } func (file *File) addChunks(chunks []*filer_pb.FileChunk) { @@ -203,3 +255,22 @@ func (file *File) setEntry(entry *filer_pb.Entry) { file.entry = entry file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks) } + +func (file *File) saveEntry() error { + return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.UpdateEntryRequest{ + Directory: file.dir.Path, + Entry: file.entry, + } + + glog.V(1).Infof("save file entry: %v", request) + _, err := client.UpdateEntry(context.Background(), request) + if err != nil { + glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err) + return fuse.EIO + } + + return nil + }) +} diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 1f4754dd1..100c9eba0 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -7,10 +7,11 @@ import ( "path" "time" + "github.com/gabriel-vasile/mimetype" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gabriel-vasile/mimetype" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -50,29 +51,51 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size)) + buff := make([]byte, req.Size) + + totalRead, err := fh.readFromChunks(buff, req.Offset) + if err == nil { + dirtyOffset, dirtySize := fh.readFromDirtyPages(buff, req.Offset) + if totalRead+req.Offset < dirtyOffset+int64(dirtySize) { + totalRead = dirtyOffset + int64(dirtySize) - req.Offset + } + } + + resp.Data = buff[:totalRead] + + if err != nil { + glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + return fuse.EIO + } + + return err +} + +func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset int64, size int) { + return fh.dirtyPages.ReadDirtyData(buff, startOffset) +} + +func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { + // this value should come from the filer instead of the old f if len(fh.f.entry.Chunks) == 0 { - glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name) - return nil + glog.V(1).Infof("empty fh %v", fh.f.fullpath()) + return 0, nil } - buff := make([]byte, req.Size) - if fh.f.entryViewCache == nil { fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) } - chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size) - - totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset) + chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, offset, len(buff)) - resp.Data = buff[:totalRead] + totalRead, err := filer2.ReadIntoBuffer(fh.f.wfs, fh.f.fullpath(), buff, chunkViews, offset) if err != nil { glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) } - return err + return totalRead, err } // Write to the file handle @@ -80,30 +103,32 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f // write the request to volume servers - glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data))) + fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize))) + // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) - chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) + chunks, err := fh.dirtyPages.AddPage(req.Offset, req.Data) if err != nil { - glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) - return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err) + glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) + return fuse.EIO } resp.Size = len(req.Data) if req.Offset == 0 { // detect mime type - var possibleExt string - fh.contentType, possibleExt = mimetype.Detect(req.Data) - if ext := path.Ext(fh.f.Name); ext != possibleExt { + detectedMIME := mimetype.Detect(req.Data) + fh.contentType = detectedMIME.String() + if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() { fh.contentType = mime.TypeByExtension(ext) } fh.dirtyMetadata = true } - fh.f.addChunks(chunks) - if len(chunks) > 0 { + + fh.f.addChunks(chunks) + fh.dirtyMetadata = true } @@ -114,11 +139,12 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle) - fh.dirtyPages.releaseResource() - - fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + fh.f.isOpen-- - fh.f.isOpen = false + if fh.f.isOpen <= 0 { + fh.dirtyPages.releaseResource() + fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + } return nil } @@ -128,19 +154,22 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { // send the data to the OS glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req) - chunk, err := fh.dirtyPages.FlushToStorage(ctx) + chunks, err := fh.dirtyPages.FlushToStorage() if err != nil { - glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) - return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) + glog.Errorf("flush %s: %v", fh.f.fullpath(), err) + return fuse.EIO } - fh.f.addChunk(chunk) + fh.f.addChunks(chunks) + if len(chunks) > 0 { + fh.dirtyMetadata = true + } if !fh.dirtyMetadata { return nil } - return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { if fh.f.entry.Attributes != nil { fh.f.entry.Attributes.Mime = fh.contentType @@ -149,6 +178,8 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.f.entry.Attributes.Mtime = time.Now().Unix() fh.f.entry.Attributes.Crtime = time.Now().Unix() fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask) + fh.f.entry.Attributes.Collection = fh.dirtyPages.collection + fh.f.entry.Attributes.Replication = fh.dirtyPages.replication } request := &filer_pb.CreateEntryRequest{ @@ -156,25 +187,36 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { Entry: fh.f.entry, } - glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) + glog.V(3).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks)) for i, chunk := range fh.f.entry.Chunks { - glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(3).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) fh.f.entry.Chunks = chunks // fh.f.entryViewCache = nil - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.Errorf("update fh: %v", err) - return fmt.Errorf("update fh: %v", err) + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) + return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) } - fh.f.wfs.deleteFileChunks(ctx, garbages) + fh.f.wfs.deleteFileChunks(garbages) for i, chunk := range garbages { - glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } return nil }) + + if err == nil { + fh.dirtyMetadata = false + } + + if err != nil { + glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err) + return fuse.EIO + } + + return nil } diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 1bd9b5cc9..77438b58e 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -5,16 +5,19 @@ import ( "fmt" "math" "os" + "strings" "sync" "time" + "github.com/karlseguin/ccache" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/karlseguin/ccache" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" - "google.golang.org/grpc" ) type Option struct { @@ -26,7 +29,7 @@ type Option struct { TtlSec int32 ChunkSizeLimit int64 DataCenter string - DirListingLimit int + DirListCacheLimit int64 EntryCacheTtl time.Duration Umask os.FileMode @@ -35,6 +38,10 @@ type Option struct { MountMode os.FileMode MountCtime time.Time MountMtime time.Time + + OutsideContainerClusterMode bool // whether the mount runs outside SeaweedFS containers + Cipher bool // whether encrypt data on volume server + } var _ = fs.FS(&WFS{}) @@ -44,13 +51,19 @@ type WFS struct { option *Option listDirectoryEntriesCache *ccache.Cache - // contains all open handles + // contains all open handles, protected by handlesLock + handlesLock sync.Mutex handles []*FileHandle - pathToHandleIndex map[string]int - pathToHandleLock sync.Mutex - bufPool sync.Pool + pathToHandleIndex map[filer2.FullPath]int + + bufPool sync.Pool stats statsCache + + // nodes, protected by nodesLock + nodesLock sync.Mutex + nodes map[uint64]fs.Node + root fs.Node } type statsCache struct { filer_pb.StatisticsResponse @@ -60,36 +73,46 @@ type statsCache struct { func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ option: option, - listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(1024 * 8).ItemsToPrune(100)), - pathToHandleIndex: make(map[string]int), + listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), + pathToHandleIndex: make(map[filer2.FullPath]int), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) }, }, + nodes: make(map[uint64]fs.Node), } + wfs.root = &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs} + return wfs } func (wfs *WFS) Root() (fs.Node, error) { - return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil + return wfs.root, nil } -func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + err := pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + if err == nil { + return nil + } + return err + } func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() fullpath := file.fullpath() + glog.V(4).Infof("%s AcquireHandle uid=%d gid=%d", fullpath, uid, gid) + + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() index, found := wfs.pathToHandleIndex[fullpath] if found && wfs.handles[index] != nil { @@ -103,24 +126,24 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand wfs.handles[i] = fileHandle fileHandle.handle = uint64(i) wfs.pathToHandleIndex[fullpath] = i - glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle) + glog.V(4).Infof("%s reuse fh %d", fullpath, fileHandle.handle) return } } wfs.handles = append(wfs.handles, fileHandle) fileHandle.handle = uint64(len(wfs.handles) - 1) - glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle) wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle) + glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle) return } -func (wfs *WFS) ReleaseHandle(fullpath string, handleId fuse.HandleID) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() +func (wfs *WFS) ReleaseHandle(fullpath filer2.FullPath, handleId fuse.HandleID) { + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() - glog.V(4).Infof("%s releasing handle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) + glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) delete(wfs.pathToHandleIndex, fullpath) if int(handleId) < len(wfs.handles) { wfs.handles[int(handleId)] = nil @@ -136,7 +159,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, @@ -145,7 +168,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. } glog.V(4).Infof("reading filer stats: %+v", request) - resp, err := client.Statistics(ctx, request) + resp, err := client.Statistics(context.Background(), request) if err != nil { glog.V(0).Infof("reading filer stats %v: %v", request, err) return err @@ -190,3 +213,56 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. return nil } + +func (wfs *WFS) cacheGet(path filer2.FullPath) *filer_pb.Entry { + item := wfs.listDirectoryEntriesCache.Get(string(path)) + if item != nil && !item.Expired() { + return item.Value().(*filer_pb.Entry) + } + return nil +} +func (wfs *WFS) cacheSet(path filer2.FullPath, entry *filer_pb.Entry, ttl time.Duration) { + if entry == nil { + wfs.listDirectoryEntriesCache.Delete(string(path)) + } else { + wfs.listDirectoryEntriesCache.Set(string(path), entry, ttl) + } +} +func (wfs *WFS) cacheDelete(path filer2.FullPath) { + wfs.listDirectoryEntriesCache.Delete(string(path)) +} + +func (wfs *WFS) getNode(fullpath filer2.FullPath, fn func() fs.Node) fs.Node { + wfs.nodesLock.Lock() + defer wfs.nodesLock.Unlock() + + node, found := wfs.nodes[fullpath.AsInode()] + if found { + return node + } + node = fn() + if node != nil { + wfs.nodes[fullpath.AsInode()] = node + } + return node +} + +func (wfs *WFS) forgetNode(fullpath filer2.FullPath) { + wfs.nodesLock.Lock() + defer wfs.nodesLock.Unlock() + + delete(wfs.nodes, fullpath.AsInode()) +} + +func (wfs *WFS) AdjustedUrl(hostAndPort string) string { + if !wfs.option.OutsideContainerClusterMode { + return hostAndPort + } + commaIndex := strings.Index(hostAndPort, ":") + if commaIndex < 0 { + return hostAndPort + } + filerCommaIndex := strings.Index(wfs.option.FilerGrpcAddress, ":") + return fmt.Sprintf("%s:%s", wfs.option.FilerGrpcAddress[:filerCommaIndex], hostAndPort[commaIndex+1:]) + +} diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index 6e586b7df..bf21b1808 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -3,14 +3,15 @@ package filesys import ( "context" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "google.golang.org/grpc" ) -func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) { +func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { if len(chunks) == 0 { return } @@ -20,13 +21,13 @@ func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChu fileIds = append(fileIds, chunk.GetFileIdString()) } - wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds) + wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + wfs.deleteFileIds(wfs.option.GrpcDialOption, client, fileIds) return nil }) } -func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { +func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { var vids []string for _, fileId := range fileIds { @@ -38,7 +39,7 @@ func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client f m := make(map[string]operation.LookupResult) glog.V(4).Infof("remove file lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: vids, }) if err != nil { @@ -50,10 +51,13 @@ func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client f VolumeId: vid, Locations: nil, } - locations := resp.LocationsMap[vid] + locations, found := resp.LocationsMap[vid] + if !found { + continue + } for _, loc := range locations.Locations { lr.Locations = append(lr.Locations, operation.Location{ - Url: loc.Url, + Url: wfs.AdjustedUrl(loc.Url), PublicUrl: loc.PublicUrl, }) } diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go new file mode 100644 index 000000000..af154a7ee --- /dev/null +++ b/weed/filesys/xattr.go @@ -0,0 +1,141 @@ +package filesys + +import ( + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/fuse" +) + +func getxattr(entry *filer_pb.Entry, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + + if entry == nil { + return fuse.ErrNoXattr + } + if entry.Extended == nil { + return fuse.ErrNoXattr + } + data, found := entry.Extended[req.Name] + if !found { + return fuse.ErrNoXattr + } + if req.Position < uint32(len(data)) { + size := req.Size + if req.Position+size >= uint32(len(data)) { + size = uint32(len(data)) - req.Position + } + if size == 0 { + resp.Xattr = data[req.Position:] + } else { + resp.Xattr = data[req.Position : req.Position+size] + } + } + + return nil + +} + +func setxattr(entry *filer_pb.Entry, req *fuse.SetxattrRequest) error { + + if entry == nil { + return fuse.EIO + } + + if entry.Extended == nil { + entry.Extended = make(map[string][]byte) + } + data, _ := entry.Extended[req.Name] + + newData := make([]byte, int(req.Position)+len(req.Xattr)) + + copy(newData, data) + + copy(newData[int(req.Position):], req.Xattr) + + entry.Extended[req.Name] = newData + + return nil + +} + +func removexattr(entry *filer_pb.Entry, req *fuse.RemovexattrRequest) error { + + if entry == nil { + return fuse.ErrNoXattr + } + + if entry.Extended == nil { + return fuse.ErrNoXattr + } + + _, found := entry.Extended[req.Name] + + if !found { + return fuse.ErrNoXattr + } + + delete(entry.Extended, req.Name) + + return nil + +} + +func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + + if entry == nil { + return fuse.EIO + } + + for k := range entry.Extended { + resp.Append(k) + } + + size := req.Size + if req.Position+size >= uint32(len(resp.Xattr)) { + size = uint32(len(resp.Xattr)) - req.Position + } + + if size == 0 { + resp.Xattr = resp.Xattr[req.Position:] + } else { + resp.Xattr = resp.Xattr[req.Position : req.Position+size] + } + + return nil + +} + +func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) { + + fullpath := filer2.NewFullPath(dir, name) + entry = wfs.cacheGet(fullpath) + if entry != nil { + return + } + // glog.V(3).Infof("read entry cache miss %s", fullpath) + + err = wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + + resp, err := filer_pb.LookupEntry(client, request) + if err != nil { + if err == filer_pb.ErrNotFound { + glog.V(3).Infof("file attr read not found file %v: %v", request, err) + return fuse.ENOENT + } + glog.V(3).Infof("attr read %v: %v", request, err) + return fuse.EIO + } + + entry = resp.Entry + wfs.cacheSet(fullpath, entry, wfs.option.EntryCacheTtl) + + return nil + }) + + return +} diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go index 4c1302abb..d881049dd 100644 --- a/weed/notification/aws_sqs/aws_sqs_pub.go +++ b/weed/notification/aws_sqs/aws_sqs_pub.go @@ -27,14 +27,14 @@ func (k *AwsSqsPub) GetName() string { return "aws_sqs" } -func (k *AwsSqsPub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go index 7f8765cc3..36211692c 100644 --- a/weed/notification/configuration.go +++ b/weed/notification/configuration.go @@ -11,7 +11,7 @@ type MessageQueue interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error SendMessage(key string, message proto.Message) error } @@ -21,7 +21,7 @@ var ( Queue MessageQueue ) -func LoadConfiguration(config *viper.Viper) { +func LoadConfiguration(config *viper.Viper, prefix string) { if config == nil { return @@ -30,9 +30,8 @@ func LoadConfiguration(config *viper.Viper) { validateOneEnabledQueue(config) for _, queue := range MessageQueues { - if config.GetBool(queue.GetName() + ".enabled") { - viperSub := config.Sub(queue.GetName()) - if err := queue.Initialize(viperSub); err != nil { + if config.GetBool(prefix + queue.GetName() + ".enabled") { + if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification for %s: %+v", queue.GetName(), err) } diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go index ebf44ea6f..1ae102509 100644 --- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -18,12 +18,13 @@ import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/notification" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/golang/protobuf/proto" "gocloud.dev/pubsub" _ "gocloud.dev/pubsub/awssnssqs" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" // _ "gocloud.dev/pubsub/azuresb" _ "gocloud.dev/pubsub/gcppubsub" _ "gocloud.dev/pubsub/natspubsub" @@ -43,8 +44,8 @@ func (k *GoCDKPubSub) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSub) Initialize(config util.Configuration) error { - k.topicURL = config.GetString("topic_url") +func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error { + k.topicURL = configuration.GetString(prefix + "topic_url") glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) if err != nil { @@ -59,8 +60,7 @@ func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error { if err != nil { return err } - ctx := context.Background() - err = k.topic.Send(ctx, &pubsub.Message{ + err = k.topic.Send(context.Background(), &pubsub.Message{ Body: bytes, Metadata: map[string]string{"key": key}, }) diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go index 7b26bfe38..363a86eb6 100644 --- a/weed/notification/google_pub_sub/google_pub_sub.go +++ b/weed/notification/google_pub_sub/google_pub_sub.go @@ -25,13 +25,13 @@ func (k *GooglePubSub) GetName() string { return "google_pub_sub" } -func (k *GooglePubSub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go index 830709a51..8d83b5892 100644 --- a/weed/notification/kafka/kafka_queue.go +++ b/weed/notification/kafka/kafka_queue.go @@ -21,12 +21,12 @@ func (k *KafkaQueue) GetName() string { return "kafka" } -func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), ) } @@ -76,7 +76,7 @@ func (k *KafkaQueue) handleError() { for { err := <-k.producer.Errors() if err != nil { - glog.Errorf("producer message error, partition:%d offset:%d key:%v valus:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic) + glog.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic) } } } diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go index dcc038dfc..1ca4786a1 100644 --- a/weed/notification/log/log_queue.go +++ b/weed/notification/log/log_queue.go @@ -18,7 +18,7 @@ func (k *LogQueue) GetName() string { return "log" } -func (k *LogQueue) Initialize(configuration util.Configuration) (err error) { +func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (err error) { return nil } diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 2dfa44483..893bf516c 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,11 +3,13 @@ package operation import ( "context" "fmt" + "strings" + + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strings" ) type VolumeAssignRequest struct { diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 6d84be76f..361c09e7e 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "google.golang.org/grpc" "net/http" "strings" "sync" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) type DeleteResult struct { @@ -94,7 +95,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil { err = deleteErr - } else { + } else if deleteResults != nil { resultChan <- deleteResults } @@ -107,8 +108,6 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str ret = append(ret, result...) } - glog.V(0).Infof("deleted %d items", len(ret)) - return ret, err } diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index f6b2b69e9..dccf85da4 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -1,27 +1,26 @@ package operation import ( - "context" "fmt" + "strconv" + "strings" + + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strconv" - "strings" ) func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { - ctx := context.Background() - grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) if err != nil { return err } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) }, grpcAddress, grpcDialOption) @@ -40,14 +39,12 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { - ctx := context.Background() - - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(masterServer) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) }, masterGrpcAddress, grpcDialOption) diff --git a/weed/operation/submit.go b/weed/operation/submit.go index bdf59d966..5e4dc4374 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -1,8 +1,6 @@ package operation import ( - "bytes" - "google.golang.org/grpc" "io" "mime" "net/url" @@ -11,6 +9,8 @@ import ( "strconv" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" ) @@ -52,7 +52,7 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart } ret, err := Assign(master, grpcDialOption, ar) if err != nil { - for index, _ := range files { + for index := range files { results[index].Error = err.Error() } return results, err @@ -189,7 +189,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp cm.DeleteChunks(master, grpcDialOption) } } else { - ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) + ret, e := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt) if e != nil { return 0, e } @@ -202,8 +202,7 @@ func upload_one_chunk(filename string, reader io.Reader, master, fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") - uploadResult, uploadError := Upload(fileUrl, filename, reader, false, - "application/octet-stream", nil, jwt) + uploadResult, uploadError := Upload(fileUrl, filename, false, reader, false, "", nil, jwt) if uploadError != nil { return 0, uploadError } @@ -215,12 +214,11 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s if e != nil { return e } - bufReader := bytes.NewReader(buf) glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...") u, _ := url.Parse(fileUrl) q := u.Query() q.Set("cm", "true") u.RawQuery = q.Encode() - _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt) + _, e = UploadData(u.String(), manifest.Name, false, buf, false, "application/json", nil, jwt) return e } diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go index b53f18ce1..3cd66b5da 100644 --- a/weed/operation/tail_volume.go +++ b/weed/operation/tail_volume.go @@ -5,9 +5,10 @@ import ( "fmt" "io" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "google.golang.org/grpc" ) func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index c387d0230..52f8f9e2b 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -3,7 +3,7 @@ package operation import ( "bytes" "compress/flate" - "compress/gzip" + "crypto/md5" "encoding/json" "errors" "fmt" @@ -22,10 +22,14 @@ import ( ) type UploadResult struct { - Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` - Error string `json:"error,omitempty"` - ETag string `json:"eTag,omitempty"` + Name string `json:"name,omitempty"` + Size uint32 `json:"size,omitempty"` + Error string `json:"error,omitempty"` + ETag string `json:"eTag,omitempty"` + CipherKey []byte `json:"cipherKey,omitempty"` + Mime string `json:"mime,omitempty"` + Gzip uint32 `json:"gzip,omitempty"` + Md5 string `json:"md5,omitempty"` } var ( @@ -41,40 +45,159 @@ func init() { var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") // Upload sends a POST request to a volume server to upload the content with adjustable compression level -func UploadWithLocalCompressionLevel(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { - if compressionLevel < 1 { - compressionLevel = 1 +func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + hash := md5.New() + hash.Write(data) + uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputGzipped, mtype, pairMap, jwt) + if uploadResult != nil { + uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil)) } - if compressionLevel > 9 { - compressionLevel = 9 - } - return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, compressionLevel, jwt) + return } // Upload sends a POST request to a volume server to upload the content with fast compression -func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt) +func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + hash := md5.New() + reader = io.TeeReader(reader, hash) + uploadResult, err = doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, flate.BestSpeed, jwt) + if uploadResult != nil { + uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil)) + } + return +} + +func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + contentIsGzipped := isInputGzipped + shouldGzipNow := false + if !isInputGzipped { + if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); mtype == "" || iAmSure && shouldBeZipped { + shouldGzipNow = true + contentIsGzipped = true + } + } + + var clearDataLen int + + // gzip if possible + // this could be double copying + clearDataLen = len(data) + if shouldGzipNow { + data, err = util.GzipData(data) + } else if isInputGzipped { + // just to get the clear data length + clearData, err := util.UnGzipData(data) + if err == nil { + clearDataLen = len(clearData) + } + } + + if cipher { + // encrypt(gzip(data)) + + // encrypt + cipherKey := util.GenCipherKey() + encryptedData, encryptionErr := util.Encrypt(data, cipherKey) + if encryptionErr != nil { + err = fmt.Errorf("encrypt input: %v", encryptionErr) + return + } + + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(encryptedData) + return + }, "", false, "", nil, jwt) + if uploadResult != nil { + uploadResult.Name = filename + uploadResult.Mime = mtype + uploadResult.CipherKey = cipherKey + } + } else { + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(data) + return + }, filename, contentIsGzipped, mtype, pairMap, jwt) + } + + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 + } + + return uploadResult, err } -func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { - contentIsGzipped := isGzipped +func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + contentIsGzipped := isInputGzipped shouldGzipNow := false - if !isGzipped { - if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped { + if !isInputGzipped { + if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); mtype == "" || iAmSure && shouldBeZipped { shouldGzipNow = true contentIsGzipped = true } } - return upload_content(uploadUrl, func(w io.Writer) (err error) { - if shouldGzipNow { - gzWriter, _ := gzip.NewWriterLevel(w, compression) - _, err = io.Copy(gzWriter, reader) - gzWriter.Close() - } else { - _, err = io.Copy(w, reader) + + var clearDataLen int + + // gzip if possible + // this could be double copying + data, readErr := ioutil.ReadAll(reader) + if readErr != nil { + err = fmt.Errorf("read input: %v", readErr) + return + } + clearDataLen = len(data) + if shouldGzipNow { + data, err = util.GzipData(data) + } else if isInputGzipped { + // just to get the clear data length + clearData, err := util.UnGzipData(data) + if err == nil { + clearDataLen = len(clearData) + } + } + + if cipher { + // encrypt(gzip(data)) + + // encrypt + cipherKey := util.GenCipherKey() + encryptedData, encryptionErr := util.Encrypt(data, cipherKey) + if encryptionErr != nil { + err = fmt.Errorf("encrypt input: %v", encryptionErr) + return } + + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(encryptedData) + return + }, "", false, "", nil, jwt) + if uploadResult != nil { + uploadResult.Name = filename + uploadResult.Mime = mtype + uploadResult.CipherKey = cipherKey + uploadResult.Size = uint32(clearDataLen) + } + } else { + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(data) + return + }, filename, contentIsGzipped, mtype, pairMap, jwt) + } + + if uploadResult == nil { return - }, filename, contentIsGzipped, mtype, pairMap, jwt) + } + + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 + } + + return uploadResult, err } func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { diff --git a/weed/pb/Makefile b/weed/pb/Makefile index c50410574..6680b7ca2 100644 --- a/weed/pb/Makefile +++ b/weed/pb/Makefile @@ -6,5 +6,7 @@ gen: protoc master.proto --go_out=plugins=grpc:./master_pb protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb protoc filer.proto --go_out=plugins=grpc:./filer_pb + protoc iam.proto --go_out=plugins=grpc:./iam_pb + protoc queue.proto --go_out=plugins=grpc:./queue_pb # protoc filer.proto --java_out=../../other/java/client/src/main/java cp filer.proto ../../other/java/client/src/main/proto diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 18ccca44f..8df46e917 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -12,7 +12,7 @@ service SeaweedFiler { rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) { } - rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) { + rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) { } rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) { @@ -24,6 +24,9 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc StreamDeleteEntries (stream DeleteEntryRequest) returns (stream DeleteEntryResponse) { + } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { } @@ -64,7 +67,7 @@ message ListEntriesRequest { } message ListEntriesResponse { - repeated Entry entries = 1; + Entry entry = 1; } message Entry { @@ -96,6 +99,8 @@ message FileChunk { string source_file_id = 6; // to be deprecated FileId fid = 7; FileId source_fid = 8; + bytes cipher_key = 9; + bool is_gzipped = 10; } message FileId { @@ -123,9 +128,11 @@ message FuseAttributes { message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { @@ -145,6 +152,7 @@ message DeleteEntryRequest { } message DeleteEntryResponse { + string error = 1; } message AtomicRenameEntryRequest { @@ -163,6 +171,7 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string parent_path = 6; } message AssignVolumeResponse { @@ -171,6 +180,9 @@ message AssignVolumeResponse { string public_url = 3; int32 count = 4; string auth = 5; + string collection = 6; + string replication = 7; + string error = 8; } message LookupVolumeRequest { @@ -217,4 +229,7 @@ message GetFilerConfigurationResponse { string replication = 2; string collection = 3; uint32 max_mb = 4; + string dir_buckets = 5; + string dir_queues = 6; + bool cipher = 7; } diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 89541d6f3..9cf659ece 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -151,7 +151,7 @@ func (m *ListEntriesRequest) GetLimit() uint32 { } type ListEntriesResponse struct { - Entries []*Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"` } func (m *ListEntriesResponse) Reset() { *m = ListEntriesResponse{} } @@ -159,9 +159,9 @@ func (m *ListEntriesResponse) String() string { return proto.CompactT func (*ListEntriesResponse) ProtoMessage() {} func (*ListEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } -func (m *ListEntriesResponse) GetEntries() []*Entry { +func (m *ListEntriesResponse) GetEntry() *Entry { if m != nil { - return m.Entries + return m.Entry } return nil } @@ -287,6 +287,8 @@ type FileChunk struct { SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"` Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"` SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"` + CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"` + IsGzipped bool `protobuf:"varint,10,opt,name=is_gzipped,json=isGzipped" json:"is_gzipped,omitempty"` } func (m *FileChunk) Reset() { *m = FileChunk{} } @@ -350,6 +352,20 @@ func (m *FileChunk) GetSourceFid() *FileId { return nil } +func (m *FileChunk) GetCipherKey() []byte { + if m != nil { + return m.CipherKey + } + return nil +} + +func (m *FileChunk) GetIsGzipped() bool { + if m != nil { + return m.IsGzipped + } + return false +} + type FileId struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` @@ -497,6 +513,7 @@ func (m *FuseAttributes) GetSymlinkTarget() string { type CreateEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` + OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl" json:"o_excl,omitempty"` } func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } @@ -518,7 +535,15 @@ func (m *CreateEntryRequest) GetEntry() *Entry { return nil } +func (m *CreateEntryRequest) GetOExcl() bool { + if m != nil { + return m.OExcl + } + return false +} + type CreateEntryResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` } func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } @@ -526,6 +551,13 @@ func (m *CreateEntryResponse) String() string { return proto.CompactT func (*CreateEntryResponse) ProtoMessage() {} func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *CreateEntryResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type UpdateEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` @@ -608,6 +640,7 @@ func (m *DeleteEntryRequest) GetIgnoreRecursiveError() bool { } type DeleteEntryResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` } func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } @@ -615,6 +648,13 @@ func (m *DeleteEntryResponse) String() string { return proto.CompactT func (*DeleteEntryResponse) ProtoMessage() {} func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *DeleteEntryResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type AtomicRenameEntryRequest struct { OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"` OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName" json:"old_name,omitempty"` @@ -669,6 +709,7 @@ type AssignVolumeRequest struct { Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"` DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` + ParentPath string `protobuf:"bytes,6,opt,name=parent_path,json=parentPath" json:"parent_path,omitempty"` } func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } @@ -711,12 +752,22 @@ func (m *AssignVolumeRequest) GetDataCenter() string { return "" } +func (m *AssignVolumeRequest) GetParentPath() string { + if m != nil { + return m.ParentPath + } + return "" +} + type AssignVolumeResponse struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` + Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` + Collection string `protobuf:"bytes,6,opt,name=collection" json:"collection,omitempty"` + Replication string `protobuf:"bytes,7,opt,name=replication" json:"replication,omitempty"` + Error string `protobuf:"bytes,8,opt,name=error" json:"error,omitempty"` } func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } @@ -759,6 +810,27 @@ func (m *AssignVolumeResponse) GetAuth() string { return "" } +func (m *AssignVolumeResponse) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *AssignVolumeResponse) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +func (m *AssignVolumeResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type LookupVolumeRequest struct { VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` } @@ -956,6 +1028,9 @@ type GetFilerConfigurationResponse struct { Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"` + DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets" json:"dir_buckets,omitempty"` + DirQueues string `protobuf:"bytes,6,opt,name=dir_queues,json=dirQueues" json:"dir_queues,omitempty"` + Cipher bool `protobuf:"varint,7,opt,name=cipher" json:"cipher,omitempty"` } func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} } @@ -991,6 +1066,27 @@ func (m *GetFilerConfigurationResponse) GetMaxMb() uint32 { return 0 } +func (m *GetFilerConfigurationResponse) GetDirBuckets() string { + if m != nil { + return m.DirBuckets + } + return "" +} + +func (m *GetFilerConfigurationResponse) GetDirQueues() string { + if m != nil { + return m.DirQueues + } + return "" +} + +func (m *GetFilerConfigurationResponse) GetCipher() bool { + if m != nil { + return m.Cipher + } + return false +} + func init() { proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") @@ -1036,10 +1132,11 @@ const _ = grpc.SupportPackageIsVersion4 type SeaweedFilerClient interface { LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) - ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) + ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) + StreamDeleteEntries(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_StreamDeleteEntriesClient, error) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) @@ -1065,13 +1162,36 @@ func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *Looku return out, nil } -func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) { - out := new(ListEntriesResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/ListEntries", in, out, c.cc, opts...) +func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], c.cc, "/filer_pb.SeaweedFiler/ListEntries", opts...) if err != nil { return nil, err } - return out, nil + x := &seaweedFilerListEntriesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_ListEntriesClient interface { + Recv() (*ListEntriesResponse, error) + grpc.ClientStream +} + +type seaweedFilerListEntriesClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) { + m := new(ListEntriesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil } func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) { @@ -1101,6 +1221,37 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq return out, nil } +func (c *seaweedFilerClient) StreamDeleteEntries(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_StreamDeleteEntriesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], c.cc, "/filer_pb.SeaweedFiler/StreamDeleteEntries", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerStreamDeleteEntriesClient{stream} + return x, nil +} + +type SeaweedFiler_StreamDeleteEntriesClient interface { + Send(*DeleteEntryRequest) error + Recv() (*DeleteEntryResponse, error) + grpc.ClientStream +} + +type seaweedFilerStreamDeleteEntriesClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerStreamDeleteEntriesClient) Send(m *DeleteEntryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedFilerStreamDeleteEntriesClient) Recv() (*DeleteEntryResponse, error) { + m := new(DeleteEntryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { out := new(AtomicRenameEntryResponse) err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...) @@ -1159,10 +1310,11 @@ func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetF type SeaweedFilerServer interface { LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) - ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) + ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) + StreamDeleteEntries(SeaweedFiler_StreamDeleteEntriesServer) error AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) @@ -1193,22 +1345,25 @@ func _SeaweedFiler_LookupDirectoryEntry_Handler(srv interface{}, ctx context.Con return interceptor(ctx, in, info, handler) } -func _SeaweedFiler_ListEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListEntriesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedFilerServer).ListEntries(ctx, in) +func _SeaweedFiler_ListEntries_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListEntriesRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/filer_pb.SeaweedFiler/ListEntries", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedFilerServer).ListEntries(ctx, req.(*ListEntriesRequest)) - } - return interceptor(ctx, in, info, handler) + return srv.(SeaweedFilerServer).ListEntries(m, &seaweedFilerListEntriesServer{stream}) +} + +type SeaweedFiler_ListEntriesServer interface { + Send(*ListEntriesResponse) error + grpc.ServerStream +} + +type seaweedFilerListEntriesServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerListEntriesServer) Send(m *ListEntriesResponse) error { + return x.ServerStream.SendMsg(m) } func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -1265,6 +1420,32 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_StreamDeleteEntries_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedFilerServer).StreamDeleteEntries(&seaweedFilerStreamDeleteEntriesServer{stream}) +} + +type SeaweedFiler_StreamDeleteEntriesServer interface { + Send(*DeleteEntryResponse) error + Recv() (*DeleteEntryRequest, error) + grpc.ServerStream +} + +type seaweedFilerStreamDeleteEntriesServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerStreamDeleteEntriesServer) Send(m *DeleteEntryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedFilerStreamDeleteEntriesServer) Recv() (*DeleteEntryRequest, error) { + m := new(DeleteEntryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AtomicRenameEntryRequest) if err := dec(in); err != nil { @@ -1381,10 +1562,6 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "LookupDirectoryEntry", Handler: _SeaweedFiler_LookupDirectoryEntry_Handler, }, - { - MethodName: "ListEntries", - Handler: _SeaweedFiler_ListEntries_Handler, - }, { MethodName: "CreateEntry", Handler: _SeaweedFiler_CreateEntry_Handler, @@ -1422,113 +1599,134 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_GetFilerConfiguration_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListEntries", + Handler: _SeaweedFiler_ListEntries_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamDeleteEntries", + Handler: _SeaweedFiler_StreamDeleteEntries_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, Metadata: "filer.proto", } func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1608 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x49, 0x6f, 0xdc, 0x46, - 0x16, 0x36, 0x7b, 0xe7, 0xeb, 0x6e, 0x5b, 0x2a, 0xc9, 0x36, 0xdd, 0x5a, 0x46, 0xa6, 0xc6, 0x1e, - 0x19, 0x63, 0x68, 0x0c, 0x8f, 0x0f, 0xf6, 0x18, 0x03, 0xc4, 0xd6, 0x12, 0x08, 0x91, 0x17, 0x50, - 0x76, 0x90, 0x20, 0x40, 0x08, 0x8a, 0xac, 0x6e, 0x55, 0x44, 0xb2, 0x3a, 0xc5, 0xa2, 0x24, 0xe7, - 0x27, 0xe4, 0x98, 0x63, 0x80, 0x9c, 0xf3, 0x27, 0x82, 0x5c, 0x02, 0xff, 0x9d, 0x1c, 0x73, 0x0e, - 0xaa, 0x8a, 0x64, 0x17, 0x9b, 0x2d, 0xc9, 0x41, 0xe0, 0x1b, 0xeb, 0x2d, 0xdf, 0x5b, 0xea, 0x2d, - 0xd5, 0x0d, 0xdd, 0x21, 0x09, 0x31, 0xdb, 0x1c, 0x33, 0xca, 0x29, 0xea, 0xc8, 0x83, 0x3b, 0x3e, - 0xb4, 0x5f, 0xc1, 0xd2, 0x3e, 0xa5, 0xc7, 0xe9, 0x78, 0x9b, 0x30, 0xec, 0x73, 0xca, 0xde, 0xed, - 0xc4, 0x9c, 0xbd, 0x73, 0xf0, 0xb7, 0x29, 0x4e, 0x38, 0x5a, 0x06, 0x33, 0xc8, 0x19, 0x96, 0xb1, - 0x66, 0x6c, 0x98, 0xce, 0x84, 0x80, 0x10, 0x34, 0x62, 0x2f, 0xc2, 0x56, 0x4d, 0x32, 0xe4, 0xb7, - 0xbd, 0x03, 0xcb, 0xb3, 0x01, 0x93, 0x31, 0x8d, 0x13, 0x8c, 0xee, 0x40, 0x13, 0x0b, 0x82, 0x44, - 0xeb, 0x3e, 0xbc, 0xb6, 0x99, 0xbb, 0xb2, 0xa9, 0xe4, 0x14, 0xd7, 0xfe, 0xd5, 0x00, 0xb4, 0x4f, - 0x12, 0x2e, 0x88, 0x04, 0x27, 0x1f, 0xe6, 0xcf, 0x0d, 0x68, 0x8d, 0x19, 0x1e, 0x92, 0xb3, 0xcc, - 0xa3, 0xec, 0x84, 0xee, 0xc3, 0x7c, 0xc2, 0x3d, 0xc6, 0x77, 0x19, 0x8d, 0x76, 0x49, 0x88, 0x5f, - 0x0a, 0xa7, 0xeb, 0x52, 0xa4, 0xca, 0x40, 0x9b, 0x80, 0x48, 0xec, 0x87, 0x69, 0x42, 0x4e, 0xf0, - 0x41, 0xce, 0xb5, 0x1a, 0x6b, 0xc6, 0x46, 0xc7, 0x99, 0xc1, 0x41, 0x8b, 0xd0, 0x0c, 0x49, 0x44, - 0xb8, 0xd5, 0x5c, 0x33, 0x36, 0xfa, 0x8e, 0x3a, 0xd8, 0x9f, 0xc0, 0x42, 0xc9, 0xff, 0x2c, 0xfc, - 0x7b, 0xd0, 0xc6, 0x8a, 0x64, 0x19, 0x6b, 0xf5, 0x59, 0x09, 0xc8, 0xf9, 0xf6, 0x4f, 0x35, 0x68, - 0x4a, 0x52, 0x91, 0x67, 0x63, 0x92, 0x67, 0x74, 0x1b, 0x7a, 0x24, 0x71, 0x27, 0xc9, 0xa8, 0x49, - 0xff, 0xba, 0x24, 0x29, 0xf2, 0x8e, 0xfe, 0x0d, 0x2d, 0xff, 0x28, 0x8d, 0x8f, 0x13, 0xab, 0x2e, - 0x4d, 0x2d, 0x4c, 0x4c, 0x89, 0x60, 0xb7, 0x04, 0xcf, 0xc9, 0x44, 0xd0, 0x63, 0x00, 0x8f, 0x73, - 0x46, 0x0e, 0x53, 0x8e, 0x13, 0x19, 0x6d, 0xf7, 0xa1, 0xa5, 0x29, 0xa4, 0x09, 0x7e, 0x56, 0xf0, - 0x1d, 0x4d, 0x16, 0x3d, 0x81, 0x0e, 0x3e, 0xe3, 0x38, 0x0e, 0x70, 0x60, 0x35, 0xa5, 0xa1, 0x95, - 0xa9, 0x98, 0x36, 0x77, 0x32, 0xbe, 0x8a, 0xb0, 0x10, 0x1f, 0x3c, 0x85, 0x7e, 0x89, 0x85, 0xe6, - 0xa0, 0x7e, 0x8c, 0xf3, 0x9b, 0x15, 0x9f, 0x22, 0xbb, 0x27, 0x5e, 0x98, 0xaa, 0x22, 0xeb, 0x39, - 0xea, 0xf0, 0xbf, 0xda, 0x63, 0xc3, 0xde, 0x06, 0x73, 0x37, 0x0d, 0xc3, 0x42, 0x31, 0x20, 0x2c, - 0x57, 0x0c, 0x08, 0x9b, 0x14, 0x5a, 0xed, 0xc2, 0x42, 0xfb, 0xc5, 0x80, 0xf9, 0x9d, 0x13, 0x1c, - 0xf3, 0x97, 0x94, 0x93, 0x21, 0xf1, 0x3d, 0x4e, 0x68, 0x8c, 0xee, 0x83, 0x49, 0xc3, 0xc0, 0xbd, - 0xb0, 0x52, 0x3b, 0x34, 0xcc, 0xbc, 0xbe, 0x0f, 0x66, 0x8c, 0x4f, 0xdd, 0x0b, 0xcd, 0x75, 0x62, - 0x7c, 0xaa, 0xa4, 0xd7, 0xa1, 0x1f, 0xe0, 0x10, 0x73, 0xec, 0x16, 0xb7, 0x23, 0xae, 0xae, 0xa7, - 0x88, 0x5b, 0xea, 0x3a, 0xee, 0xc2, 0x35, 0x01, 0x39, 0xf6, 0x18, 0x8e, 0xb9, 0x3b, 0xf6, 0xf8, - 0x91, 0xbc, 0x13, 0xd3, 0xe9, 0xc7, 0xf8, 0xf4, 0xb5, 0xa4, 0xbe, 0xf6, 0xf8, 0x91, 0xfd, 0x87, - 0x01, 0x66, 0x71, 0x99, 0xe8, 0x26, 0xb4, 0x85, 0x59, 0x97, 0x04, 0x59, 0x26, 0x5a, 0xe2, 0xb8, - 0x17, 0x88, 0xce, 0xa0, 0xc3, 0x61, 0x82, 0xb9, 0x74, 0xaf, 0xee, 0x64, 0x27, 0x51, 0x59, 0x09, - 0xf9, 0x4e, 0x35, 0x43, 0xc3, 0x91, 0xdf, 0x22, 0xe3, 0x11, 0x27, 0x11, 0x96, 0x06, 0xeb, 0x8e, - 0x3a, 0xa0, 0x05, 0x68, 0x62, 0x97, 0x7b, 0x23, 0x59, 0xe5, 0xa6, 0xd3, 0xc0, 0x6f, 0xbc, 0x11, - 0xfa, 0x27, 0x5c, 0x4d, 0x68, 0xca, 0x7c, 0xec, 0xe6, 0x66, 0x5b, 0x92, 0xdb, 0x53, 0xd4, 0x5d, - 0x65, 0xdc, 0x86, 0xfa, 0x90, 0x04, 0x56, 0x5b, 0x26, 0x66, 0xae, 0x5c, 0x84, 0x7b, 0x81, 0x23, - 0x98, 0xe8, 0x3f, 0x00, 0x05, 0x52, 0x60, 0x75, 0xce, 0x11, 0x35, 0x73, 0xdc, 0xc0, 0xfe, 0x02, - 0x5a, 0x19, 0xfc, 0x12, 0x98, 0x27, 0x34, 0x4c, 0xa3, 0x22, 0xec, 0xbe, 0xd3, 0x51, 0x84, 0xbd, - 0x00, 0xdd, 0x02, 0x39, 0xeb, 0x5c, 0x51, 0x55, 0x35, 0x19, 0xa4, 0xcc, 0xd0, 0x67, 0x58, 0x4e, - 0x0b, 0x9f, 0xd2, 0x63, 0xa2, 0xa2, 0x6f, 0x3b, 0xd9, 0xc9, 0xfe, 0xbd, 0x06, 0x57, 0xcb, 0xe5, - 0x2e, 0x4c, 0x48, 0x14, 0x99, 0x2b, 0x43, 0xc2, 0x48, 0xd8, 0x83, 0x52, 0xbe, 0x6a, 0x7a, 0xbe, - 0x72, 0x95, 0x88, 0x06, 0xca, 0x40, 0x5f, 0xa9, 0xbc, 0xa0, 0x01, 0x16, 0xd5, 0x9a, 0x92, 0x40, - 0x26, 0xb8, 0xef, 0x88, 0x4f, 0x41, 0x19, 0x91, 0x20, 0x1b, 0x21, 0xe2, 0x53, 0xba, 0xc7, 0x24, - 0x6e, 0x4b, 0x5d, 0x99, 0x3a, 0x89, 0x2b, 0x8b, 0x04, 0xb5, 0xad, 0xee, 0x41, 0x7c, 0xa3, 0x35, - 0xe8, 0x32, 0x3c, 0x0e, 0xb3, 0xea, 0x95, 0xe9, 0x33, 0x1d, 0x9d, 0x84, 0x56, 0x01, 0x7c, 0x1a, - 0x86, 0xd8, 0x97, 0x02, 0xa6, 0x14, 0xd0, 0x28, 0xa2, 0x72, 0x38, 0x0f, 0xdd, 0x04, 0xfb, 0x16, - 0xac, 0x19, 0x1b, 0x4d, 0xa7, 0xc5, 0x79, 0x78, 0x80, 0x7d, 0x11, 0x47, 0x9a, 0x60, 0xe6, 0xca, - 0x01, 0xd4, 0x95, 0x7a, 0x1d, 0x41, 0x90, 0xa3, 0x72, 0x05, 0x60, 0xc4, 0x68, 0x3a, 0x56, 0xdc, - 0xde, 0x5a, 0x5d, 0xcc, 0x63, 0x49, 0x91, 0xec, 0x3b, 0x70, 0x35, 0x79, 0x17, 0x85, 0x24, 0x3e, - 0x76, 0xb9, 0xc7, 0x46, 0x98, 0x5b, 0x7d, 0x55, 0xc3, 0x19, 0xf5, 0x8d, 0x24, 0xda, 0x5f, 0x02, - 0xda, 0x62, 0xd8, 0xe3, 0xf8, 0x2f, 0xac, 0x9e, 0x0f, 0xec, 0xee, 0xeb, 0xb0, 0x50, 0x82, 0x56, - 0x53, 0x58, 0x58, 0x7c, 0x3b, 0x0e, 0x3e, 0x96, 0xc5, 0x12, 0x74, 0x66, 0xf1, 0xbd, 0x01, 0x68, - 0x5b, 0x36, 0xf8, 0xdf, 0xdb, 0xaf, 0xa2, 0xe5, 0xc4, 0xdc, 0x57, 0x03, 0x24, 0xf0, 0xb8, 0x97, - 0x6d, 0xa6, 0x1e, 0x49, 0x14, 0xfe, 0xb6, 0xc7, 0xbd, 0x6c, 0x3b, 0x30, 0xec, 0xa7, 0x4c, 0x2c, - 0x2b, 0x59, 0x57, 0x72, 0x3b, 0x38, 0x39, 0x09, 0x3d, 0x82, 0x1b, 0x64, 0x14, 0x53, 0x86, 0x27, - 0x62, 0x2e, 0x66, 0x8c, 0x32, 0x59, 0x6f, 0x1d, 0x67, 0x51, 0x71, 0x0b, 0x85, 0x1d, 0xc1, 0x13, - 0xe1, 0x95, 0xc2, 0xc8, 0xc2, 0xfb, 0xd1, 0x00, 0xeb, 0x19, 0xa7, 0x11, 0xf1, 0x1d, 0x2c, 0xdc, - 0x2c, 0x05, 0xb9, 0x0e, 0x7d, 0x31, 0x4c, 0xa7, 0x03, 0xed, 0xd1, 0x30, 0x98, 0x2c, 0xab, 0x5b, - 0x20, 0xe6, 0xa9, 0xab, 0xc5, 0xdb, 0xa6, 0x61, 0x20, 0xcb, 0x68, 0x1d, 0xc4, 0xd0, 0xd3, 0xf4, - 0xd5, 0xea, 0xee, 0xc5, 0xf8, 0xb4, 0xa4, 0x2f, 0x84, 0xa4, 0xbe, 0x9a, 0x94, 0xed, 0x18, 0x9f, - 0x0a, 0x7d, 0x7b, 0x09, 0x6e, 0xcd, 0xf0, 0x2d, 0xf3, 0xfc, 0x67, 0x03, 0x16, 0x9e, 0x25, 0x09, - 0x19, 0xc5, 0x9f, 0xcb, 0x99, 0x91, 0x3b, 0xbd, 0x08, 0x4d, 0x9f, 0xa6, 0x31, 0x97, 0xce, 0x36, - 0x1d, 0x75, 0x98, 0x6a, 0xa3, 0x5a, 0xa5, 0x8d, 0xa6, 0x1a, 0xb1, 0x5e, 0x6d, 0x44, 0xad, 0xd1, - 0x1a, 0xa5, 0x46, 0xfb, 0x07, 0x74, 0xc5, 0x75, 0xba, 0x3e, 0x8e, 0x39, 0x66, 0xd9, 0x98, 0x05, - 0x41, 0xda, 0x92, 0x14, 0xfb, 0x7b, 0x03, 0x16, 0xcb, 0x9e, 0x66, 0x6f, 0x8a, 0x73, 0xa7, 0xbe, - 0x18, 0x33, 0x2c, 0xcc, 0xdc, 0x14, 0x9f, 0xa2, 0x61, 0xc7, 0xe9, 0x61, 0x48, 0x7c, 0x57, 0x30, - 0x94, 0x7b, 0xa6, 0xa2, 0xbc, 0x65, 0xe1, 0x24, 0xe8, 0x86, 0x1e, 0x34, 0x82, 0x86, 0x97, 0xf2, - 0xa3, 0x7c, 0xf2, 0x8b, 0x6f, 0xfb, 0x11, 0x2c, 0xa8, 0x67, 0x5e, 0x39, 0x6b, 0x2b, 0x00, 0xc5, - 0x2c, 0x56, 0x2f, 0x1c, 0xd3, 0x31, 0xf3, 0x61, 0x9c, 0xd8, 0xff, 0x07, 0x73, 0x9f, 0xaa, 0x44, - 0x24, 0xe8, 0x01, 0x98, 0x61, 0x7e, 0xc8, 0x1e, 0x43, 0x68, 0xd2, 0x54, 0xb9, 0x9c, 0x33, 0x11, - 0xb2, 0x9f, 0x42, 0x27, 0x27, 0xe7, 0xb1, 0x19, 0xe7, 0xc5, 0x56, 0x9b, 0x8a, 0xcd, 0xfe, 0xcd, - 0x80, 0xc5, 0xb2, 0xcb, 0x59, 0xfa, 0xde, 0x42, 0xbf, 0x30, 0xe1, 0x46, 0xde, 0x38, 0xf3, 0xe5, - 0x81, 0xee, 0x4b, 0x55, 0xad, 0x70, 0x30, 0x79, 0xe1, 0x8d, 0x55, 0x49, 0xf5, 0x42, 0x8d, 0x34, - 0x78, 0x03, 0xf3, 0x15, 0x91, 0x19, 0xef, 0x9b, 0x7b, 0xfa, 0xfb, 0xa6, 0xf4, 0x46, 0x2b, 0xb4, - 0xf5, 0x47, 0xcf, 0x13, 0xb8, 0xa9, 0xfa, 0x6f, 0xab, 0x28, 0xba, 0x3c, 0xf7, 0xe5, 0xda, 0x34, - 0xa6, 0x6b, 0xd3, 0x1e, 0x80, 0x55, 0x55, 0xcd, 0xba, 0x60, 0x04, 0xf3, 0x07, 0xdc, 0xe3, 0x24, - 0xe1, 0xc4, 0x2f, 0x1e, 0xdb, 0x53, 0xc5, 0x6c, 0x5c, 0xb6, 0x55, 0xaa, 0xed, 0x30, 0x07, 0x75, - 0xce, 0xf3, 0x3a, 0x13, 0x9f, 0xe2, 0x16, 0x90, 0x6e, 0x29, 0xbb, 0x83, 0x8f, 0x60, 0x4a, 0xd4, - 0x03, 0xa7, 0xdc, 0x0b, 0xd5, 0xd6, 0x6e, 0xc8, 0xad, 0x6d, 0x4a, 0x8a, 0x5c, 0xdb, 0x6a, 0xb1, - 0x05, 0x8a, 0xdb, 0x54, 0x3b, 0x5d, 0x10, 0x24, 0x73, 0x05, 0x40, 0xb6, 0x94, 0xea, 0x86, 0x96, - 0xd2, 0x15, 0x94, 0x2d, 0x41, 0xb0, 0x57, 0x61, 0xf9, 0x53, 0xcc, 0xc5, 0xfb, 0x83, 0x6d, 0xd1, - 0x78, 0x48, 0x46, 0x29, 0xf3, 0xb4, 0xab, 0xb0, 0x7f, 0x30, 0x60, 0xe5, 0x1c, 0x81, 0x2c, 0x60, - 0x0b, 0xda, 0x91, 0x97, 0x70, 0xcc, 0xf2, 0x2e, 0xc9, 0x8f, 0xd3, 0xa9, 0xa8, 0x5d, 0x96, 0x8a, - 0x7a, 0x25, 0x15, 0xd7, 0xa1, 0x15, 0x79, 0x67, 0x6e, 0x74, 0x98, 0x3d, 0x30, 0x9a, 0x91, 0x77, - 0xf6, 0xe2, 0xf0, 0xe1, 0xfb, 0x36, 0xf4, 0x0e, 0xb0, 0x77, 0x8a, 0x71, 0x20, 0x1d, 0x43, 0xa3, - 0xbc, 0x21, 0xca, 0x3f, 0xd5, 0xd0, 0x9d, 0xe9, 0xca, 0x9f, 0xf9, 0xdb, 0x70, 0x70, 0xf7, 0x32, - 0xb1, 0xac, 0xb6, 0xae, 0xa0, 0x7d, 0xe8, 0x6a, 0xbf, 0x85, 0xd0, 0xb2, 0xa6, 0x58, 0xf9, 0x89, - 0x37, 0x58, 0x39, 0x87, 0xab, 0xa3, 0x69, 0x3b, 0x5d, 0x47, 0xab, 0xbe, 0x22, 0x74, 0xb4, 0x59, - 0x0f, 0x01, 0x89, 0xa6, 0xed, 0x6b, 0x1d, 0xad, 0xfa, 0x42, 0xd0, 0xd1, 0x66, 0x2d, 0x79, 0x89, - 0xa6, 0xad, 0x47, 0x1d, 0xad, 0xba, 0xfc, 0x75, 0xb4, 0x59, 0x3b, 0xf5, 0x0a, 0xfa, 0x1a, 0xe6, - 0x2b, 0x8b, 0x0b, 0xd9, 0x13, 0xad, 0xf3, 0x36, 0xee, 0x60, 0xfd, 0x42, 0x99, 0x02, 0xff, 0x15, - 0xf4, 0xf4, 0x85, 0x82, 0x34, 0x87, 0x66, 0xac, 0xc4, 0xc1, 0xea, 0x79, 0x6c, 0x1d, 0x50, 0x9f, - 0x95, 0x3a, 0xe0, 0x8c, 0x6d, 0xa1, 0x03, 0xce, 0x1a, 0xb1, 0xf6, 0x15, 0xf4, 0x15, 0xcc, 0x4d, - 0xcf, 0x2c, 0x74, 0x7b, 0x3a, 0x6d, 0x95, 0x51, 0x38, 0xb0, 0x2f, 0x12, 0x29, 0xc0, 0xf7, 0x00, - 0x26, 0xa3, 0x08, 0x2d, 0x4d, 0x74, 0x2a, 0xa3, 0x70, 0xb0, 0x3c, 0x9b, 0x59, 0x40, 0x7d, 0x03, - 0xd7, 0x67, 0xf6, 0x3b, 0xd2, 0x9a, 0xe4, 0xa2, 0x89, 0x31, 0xf8, 0xd7, 0xa5, 0x72, 0xb9, 0xad, - 0xe7, 0xab, 0x30, 0x97, 0xa8, 0x36, 0x1e, 0x26, 0x9b, 0x7e, 0x48, 0x70, 0xcc, 0x9f, 0x83, 0xd4, - 0x78, 0xcd, 0x28, 0xa7, 0x87, 0x2d, 0xf9, 0x1f, 0xcf, 0x7f, 0xff, 0x0c, 0x00, 0x00, 0xff, 0xff, - 0x0e, 0xa9, 0xb5, 0x68, 0xf2, 0x11, 0x00, 0x00, + // 1759 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0xdb, 0xc8, + 0x15, 0x0f, 0x25, 0x4b, 0x16, 0x9f, 0xa4, 0xac, 0x3d, 0x72, 0xb2, 0x8a, 0x62, 0xa7, 0x5e, 0xa6, + 0xd9, 0xba, 0x48, 0xe0, 0x06, 0xee, 0x1e, 0x76, 0xbb, 0xed, 0x21, 0x71, 0x9c, 0x45, 0xd0, 0x24, + 0x9b, 0xd2, 0x49, 0xb1, 0x45, 0x81, 0x12, 0x34, 0x39, 0x96, 0xa7, 0xa6, 0x38, 0xdc, 0x99, 0x61, + 0x6c, 0xef, 0x47, 0x29, 0xd0, 0x43, 0xbf, 0x43, 0x8f, 0x45, 0x2f, 0x45, 0x81, 0x7e, 0x8e, 0x1e, + 0x7b, 0xe8, 0x67, 0x28, 0xe6, 0x0d, 0x49, 0x0d, 0x45, 0xd9, 0xde, 0xed, 0x62, 0x6f, 0x9c, 0xf7, + 0x6f, 0xde, 0xfc, 0xde, 0x5f, 0x09, 0xfa, 0xc7, 0x2c, 0xa1, 0x62, 0x37, 0x13, 0x5c, 0x71, 0xd2, + 0xc3, 0x43, 0x90, 0x1d, 0x79, 0x5f, 0xc2, 0xdd, 0x97, 0x9c, 0x9f, 0xe6, 0xd9, 0x33, 0x26, 0x68, + 0xa4, 0xb8, 0xb8, 0x38, 0x48, 0x95, 0xb8, 0xf0, 0xe9, 0xd7, 0x39, 0x95, 0x8a, 0x6c, 0x82, 0x1b, + 0x97, 0x8c, 0xb1, 0xb3, 0xed, 0xec, 0xb8, 0xfe, 0x9c, 0x40, 0x08, 0xac, 0xa4, 0xe1, 0x8c, 0x8e, + 0x5b, 0xc8, 0xc0, 0x6f, 0xef, 0x00, 0x36, 0x97, 0x1b, 0x94, 0x19, 0x4f, 0x25, 0x25, 0x0f, 0xa0, + 0x43, 0x35, 0x01, 0xad, 0xf5, 0xf7, 0x3e, 0xd8, 0x2d, 0x5d, 0xd9, 0x35, 0x72, 0x86, 0xeb, 0xfd, + 0xdd, 0x01, 0xf2, 0x92, 0x49, 0xa5, 0x89, 0x8c, 0xca, 0x6f, 0xe7, 0xcf, 0x6d, 0xe8, 0x66, 0x82, + 0x1e, 0xb3, 0xf3, 0xc2, 0xa3, 0xe2, 0x44, 0x1e, 0xc1, 0xba, 0x54, 0xa1, 0x50, 0xcf, 0x05, 0x9f, + 0x3d, 0x67, 0x09, 0x7d, 0xad, 0x9d, 0x6e, 0xa3, 0x48, 0x93, 0x41, 0x76, 0x81, 0xb0, 0x34, 0x4a, + 0x72, 0xc9, 0xde, 0xd3, 0xc3, 0x92, 0x3b, 0x5e, 0xd9, 0x76, 0x76, 0x7a, 0xfe, 0x12, 0x0e, 0xd9, + 0x80, 0x4e, 0xc2, 0x66, 0x4c, 0x8d, 0x3b, 0xdb, 0xce, 0xce, 0xd0, 0x37, 0x07, 0xef, 0x97, 0x30, + 0xaa, 0xf9, 0xff, 0xdd, 0x9e, 0xff, 0xe7, 0x16, 0x74, 0x90, 0x50, 0x61, 0xec, 0xcc, 0x31, 0x26, + 0x1f, 0xc1, 0x80, 0xc9, 0x60, 0x0e, 0x44, 0x0b, 0x7d, 0xeb, 0x33, 0x59, 0x61, 0x4e, 0x1e, 0x42, + 0x37, 0x3a, 0xc9, 0xd3, 0x53, 0x39, 0x6e, 0x6f, 0xb7, 0x77, 0xfa, 0x7b, 0xa3, 0xf9, 0x45, 0xfa, + 0xa1, 0xfb, 0x9a, 0xe7, 0x17, 0x22, 0xe4, 0x53, 0x80, 0x50, 0x29, 0xc1, 0x8e, 0x72, 0x45, 0x25, + 0xbe, 0xb4, 0xbf, 0x37, 0xb6, 0x14, 0x72, 0x49, 0x9f, 0x54, 0x7c, 0xdf, 0x92, 0x25, 0x9f, 0x41, + 0x8f, 0x9e, 0x2b, 0x9a, 0xc6, 0x34, 0x1e, 0x77, 0xf0, 0xa2, 0xad, 0x85, 0x17, 0xed, 0x1e, 0x14, + 0x7c, 0xf3, 0xbe, 0x4a, 0x7c, 0xf2, 0x39, 0x0c, 0x6b, 0x2c, 0xb2, 0x06, 0xed, 0x53, 0x5a, 0x46, + 0x55, 0x7f, 0x6a, 0x64, 0xdf, 0x87, 0x49, 0x6e, 0x12, 0x6c, 0xe0, 0x9b, 0xc3, 0x2f, 0x5a, 0x9f, + 0x3a, 0xde, 0x33, 0x70, 0x9f, 0xe7, 0x49, 0x52, 0x29, 0xc6, 0x4c, 0x94, 0x8a, 0x31, 0x13, 0x73, + 0x94, 0x5b, 0x57, 0xa2, 0xfc, 0x37, 0x07, 0xd6, 0x0f, 0xde, 0xd3, 0x54, 0xbd, 0xe6, 0x8a, 0x1d, + 0xb3, 0x28, 0x54, 0x8c, 0xa7, 0xe4, 0x11, 0xb8, 0x3c, 0x89, 0x83, 0x2b, 0xc3, 0xd4, 0xe3, 0x49, + 0xe1, 0xf5, 0x23, 0x70, 0x53, 0x7a, 0x16, 0x5c, 0x79, 0x5d, 0x2f, 0xa5, 0x67, 0x46, 0xfa, 0x3e, + 0x0c, 0x63, 0x9a, 0x50, 0x45, 0x83, 0x2a, 0x3a, 0x3a, 0x74, 0x03, 0x43, 0xdc, 0x37, 0xe1, 0xf8, + 0x18, 0x3e, 0xd0, 0x26, 0xb3, 0x50, 0xd0, 0x54, 0x05, 0x59, 0xa8, 0x4e, 0x30, 0x26, 0xae, 0x3f, + 0x4c, 0xe9, 0xd9, 0x1b, 0xa4, 0xbe, 0x09, 0xd5, 0x89, 0xf7, 0xd7, 0x16, 0xb8, 0x55, 0x30, 0xc9, + 0x87, 0xb0, 0xaa, 0xaf, 0x0d, 0x58, 0x5c, 0x20, 0xd1, 0xd5, 0xc7, 0x17, 0xb1, 0xae, 0x0a, 0x7e, + 0x7c, 0x2c, 0xa9, 0x42, 0xf7, 0xda, 0x7e, 0x71, 0xd2, 0x99, 0x25, 0xd9, 0x37, 0xa6, 0x10, 0x56, + 0x7c, 0xfc, 0xd6, 0x88, 0xcf, 0x14, 0x9b, 0x51, 0xbc, 0xb0, 0xed, 0x9b, 0x03, 0x19, 0x41, 0x87, + 0x06, 0x2a, 0x9c, 0x62, 0x86, 0xbb, 0xfe, 0x0a, 0x7d, 0x1b, 0x4e, 0xc9, 0x8f, 0xe1, 0xa6, 0xe4, + 0xb9, 0x88, 0x68, 0x50, 0x5e, 0xdb, 0x45, 0xee, 0xc0, 0x50, 0x9f, 0x9b, 0xcb, 0x3d, 0x68, 0x1f, + 0xb3, 0x78, 0xbc, 0x8a, 0xc0, 0xac, 0xd5, 0x93, 0xf0, 0x45, 0xec, 0x6b, 0x26, 0xf9, 0x19, 0x40, + 0x65, 0x29, 0x1e, 0xf7, 0x2e, 0x11, 0x75, 0x4b, 0xbb, 0x31, 0xd9, 0x02, 0x88, 0x58, 0x76, 0x42, + 0x45, 0xa0, 0x13, 0xc6, 0xc5, 0xe4, 0x70, 0x0d, 0xe5, 0xd7, 0xf4, 0x42, 0xb3, 0x99, 0x0c, 0xa6, + 0xdf, 0xb0, 0x2c, 0xa3, 0xf1, 0x18, 0x10, 0x61, 0x97, 0xc9, 0x2f, 0x0c, 0xc1, 0xfb, 0x0a, 0xba, + 0x85, 0x73, 0x77, 0xc1, 0x7d, 0xcf, 0x93, 0x7c, 0x56, 0x81, 0x36, 0xf4, 0x7b, 0x86, 0xf0, 0x22, + 0x26, 0x77, 0x00, 0xbb, 0x24, 0x5e, 0xd1, 0x42, 0x88, 0x10, 0x5f, 0x7d, 0xc1, 0x6d, 0xe8, 0x46, + 0x9c, 0x9f, 0x32, 0x83, 0xdd, 0xaa, 0x5f, 0x9c, 0xbc, 0xff, 0xb6, 0xe0, 0x66, 0xbd, 0x58, 0xf4, + 0x15, 0x68, 0x05, 0x91, 0x76, 0xd0, 0x0c, 0x9a, 0x3d, 0xac, 0xa1, 0xdd, 0xb2, 0xd1, 0x2e, 0x55, + 0x66, 0x3c, 0x36, 0x17, 0x0c, 0x8d, 0xca, 0x2b, 0x1e, 0x53, 0x9d, 0xeb, 0x39, 0x8b, 0x31, 0x3c, + 0x43, 0x5f, 0x7f, 0x6a, 0xca, 0x94, 0xc5, 0x45, 0xf3, 0xd1, 0x9f, 0xe8, 0x9e, 0x40, 0xbb, 0x5d, + 0x13, 0x70, 0x73, 0xd2, 0x01, 0x9f, 0x69, 0xea, 0xaa, 0x89, 0xa2, 0xfe, 0x26, 0xdb, 0xd0, 0x17, + 0x34, 0x4b, 0x8a, 0xdc, 0x47, 0xf0, 0x5d, 0xdf, 0x26, 0x91, 0x7b, 0x00, 0x11, 0x4f, 0x12, 0x1a, + 0xa1, 0x80, 0x8b, 0x02, 0x16, 0x45, 0xe7, 0x9d, 0x52, 0x49, 0x20, 0x69, 0x84, 0x50, 0x77, 0xfc, + 0xae, 0x52, 0xc9, 0x21, 0x8d, 0xf4, 0x3b, 0x72, 0x49, 0x45, 0x80, 0xed, 0xab, 0x8f, 0x7a, 0x3d, + 0x4d, 0xc0, 0x26, 0xbb, 0x05, 0x30, 0x15, 0x3c, 0xcf, 0x0c, 0x77, 0xb0, 0xdd, 0xd6, 0x9d, 0x1c, + 0x29, 0xc8, 0x7e, 0x00, 0x37, 0xe5, 0xc5, 0x2c, 0x61, 0xe9, 0x69, 0xa0, 0x42, 0x31, 0xa5, 0x6a, + 0x3c, 0x34, 0x15, 0x50, 0x50, 0xdf, 0x22, 0xd1, 0xcb, 0x80, 0xec, 0x0b, 0x1a, 0x2a, 0xfa, 0x1d, + 0x86, 0xd6, 0xb7, 0xeb, 0x0d, 0xe4, 0x16, 0x74, 0x79, 0x40, 0xcf, 0xa3, 0xa4, 0x28, 0xd1, 0x0e, + 0x3f, 0x38, 0x8f, 0x12, 0xef, 0x21, 0x8c, 0x6a, 0x37, 0x16, 0x6d, 0x7d, 0x03, 0x3a, 0x54, 0x08, + 0x5e, 0x36, 0x21, 0x73, 0xf0, 0x7e, 0x07, 0xe4, 0x5d, 0x16, 0xff, 0x10, 0xee, 0x79, 0xb7, 0x60, + 0x54, 0x33, 0x6d, 0xfc, 0xf0, 0xfe, 0xe9, 0x00, 0x79, 0x86, 0xbd, 0xe4, 0xfb, 0x8d, 0x71, 0x5d, + 0xdd, 0x7a, 0xc4, 0x98, 0x5e, 0x15, 0x87, 0x2a, 0x2c, 0x06, 0xe0, 0x80, 0x49, 0x63, 0xff, 0x59, + 0xa8, 0xc2, 0x62, 0x10, 0x09, 0x1a, 0xe5, 0x42, 0xcf, 0x44, 0x4c, 0x42, 0x1c, 0x44, 0x7e, 0x49, + 0x22, 0x9f, 0xc0, 0x6d, 0x36, 0x4d, 0xb9, 0xa0, 0x73, 0xb1, 0xc0, 0x40, 0xd5, 0x45, 0xe1, 0x0d, + 0xc3, 0xad, 0x14, 0x0e, 0x10, 0xb9, 0x87, 0x30, 0xaa, 0x3d, 0xe3, 0x4a, 0x98, 0xff, 0xe4, 0xc0, + 0xf8, 0x89, 0xe2, 0x33, 0x16, 0xf9, 0x54, 0x3b, 0x5f, 0x7b, 0xfa, 0x7d, 0x18, 0xea, 0x6e, 0xbe, + 0xf8, 0xfc, 0x01, 0x4f, 0xe2, 0xf9, 0xb4, 0xbc, 0x03, 0xba, 0xa1, 0x07, 0x16, 0x0a, 0xab, 0x3c, + 0x89, 0x31, 0x13, 0xef, 0x83, 0xee, 0xba, 0x96, 0xbe, 0xd9, 0x1b, 0x06, 0x29, 0x3d, 0xab, 0xe9, + 0x6b, 0x21, 0xd4, 0x37, 0xad, 0x7a, 0x35, 0xa5, 0x67, 0x5a, 0xdf, 0xbb, 0x0b, 0x77, 0x96, 0xf8, + 0x56, 0x84, 0xeb, 0x5f, 0x0e, 0x8c, 0x9e, 0x48, 0xc9, 0xa6, 0xe9, 0x6f, 0xb1, 0xed, 0x94, 0x4e, + 0x6f, 0x40, 0x27, 0xe2, 0x79, 0xaa, 0xd0, 0xd9, 0x8e, 0x6f, 0x0e, 0x0b, 0x95, 0xd8, 0x6a, 0x54, + 0xe2, 0x42, 0x2d, 0xb7, 0x9b, 0xb5, 0x6c, 0xd5, 0xea, 0x4a, 0xad, 0x56, 0x7f, 0x04, 0x7d, 0x1d, + 0xe4, 0x20, 0xa2, 0xa9, 0xa2, 0xa2, 0xe8, 0xf3, 0xa0, 0x49, 0xfb, 0x48, 0xd1, 0x02, 0xf6, 0x3c, + 0x32, 0xad, 0x1e, 0xb2, 0xf9, 0x30, 0xfa, 0xb7, 0x03, 0x1b, 0xf5, 0xa7, 0x14, 0x31, 0xbb, 0x74, + 0x2e, 0xe9, 0x56, 0x26, 0x92, 0xe2, 0x1d, 0xfa, 0x53, 0x37, 0x85, 0x2c, 0x3f, 0x4a, 0x58, 0x14, + 0x68, 0x86, 0xf1, 0xdf, 0x35, 0x94, 0x77, 0x22, 0x99, 0xa3, 0xb2, 0x62, 0xa3, 0x42, 0x60, 0x25, + 0xcc, 0xd5, 0x49, 0x39, 0x9b, 0xf4, 0xf7, 0x02, 0x52, 0xdd, 0xeb, 0x90, 0x5a, 0x6d, 0x22, 0x55, + 0x65, 0x5a, 0xcf, 0xce, 0xb4, 0x4f, 0x60, 0x64, 0x96, 0xdb, 0x7a, 0xb8, 0xb6, 0x00, 0xaa, 0x39, + 0x22, 0xc7, 0x8e, 0x69, 0x66, 0xe5, 0x20, 0x91, 0xde, 0xaf, 0xc0, 0x7d, 0xc9, 0x8d, 0x5d, 0x49, + 0x1e, 0x83, 0x9b, 0x94, 0x07, 0x14, 0xed, 0xef, 0x91, 0x79, 0x8d, 0x97, 0x72, 0xfe, 0x5c, 0xc8, + 0xfb, 0x1c, 0x7a, 0x25, 0xb9, 0xc4, 0xcc, 0xb9, 0x0c, 0xb3, 0xd6, 0x02, 0x66, 0xde, 0x3f, 0x1c, + 0xd8, 0xa8, 0xbb, 0x5c, 0x84, 0xe5, 0x1d, 0x0c, 0xab, 0x2b, 0x82, 0x59, 0x98, 0x15, 0xbe, 0x3c, + 0xb6, 0x7d, 0x69, 0xaa, 0x55, 0x0e, 0xca, 0x57, 0x61, 0x66, 0x72, 0x79, 0x90, 0x58, 0xa4, 0xc9, + 0x5b, 0x58, 0x6f, 0x88, 0x2c, 0xd9, 0xec, 0x7e, 0x6a, 0x6f, 0x76, 0xb5, 0xed, 0xb4, 0xd2, 0xb6, + 0xd7, 0xbd, 0xcf, 0xe0, 0x43, 0xd3, 0x0e, 0xf6, 0xab, 0x18, 0x96, 0xd8, 0xd7, 0x43, 0xed, 0x2c, + 0x86, 0xda, 0x9b, 0xc0, 0xb8, 0xa9, 0x5a, 0x94, 0xdf, 0x14, 0xd6, 0x0f, 0x55, 0xa8, 0x98, 0x54, + 0x2c, 0xaa, 0x7e, 0x62, 0x2c, 0xe4, 0x86, 0x73, 0xdd, 0x44, 0x6c, 0xd6, 0xe1, 0x1a, 0xb4, 0x95, + 0x2a, 0xf3, 0x57, 0x7f, 0xea, 0x28, 0x10, 0xfb, 0xa6, 0x22, 0x06, 0x3f, 0xc0, 0x55, 0x3a, 0x1f, + 0x14, 0x57, 0x61, 0x62, 0x36, 0x8e, 0x15, 0xdc, 0x38, 0x5c, 0xa4, 0xe0, 0xca, 0x61, 0x86, 0x72, + 0x6c, 0xb8, 0x1d, 0xb3, 0x8f, 0x68, 0x02, 0x32, 0xb7, 0x00, 0xb0, 0x54, 0x4d, 0x95, 0x75, 0x8d, + 0xae, 0xa6, 0xec, 0x6b, 0x82, 0x77, 0x0f, 0x36, 0xbf, 0xa0, 0x4a, 0xef, 0x4e, 0x62, 0x9f, 0xa7, + 0xc7, 0x6c, 0x9a, 0x8b, 0xd0, 0x0a, 0x85, 0xf7, 0x1f, 0x07, 0xb6, 0x2e, 0x11, 0x28, 0x1e, 0x3c, + 0x86, 0xd5, 0x59, 0x28, 0x15, 0x15, 0x65, 0x95, 0x94, 0xc7, 0x45, 0x28, 0x5a, 0xd7, 0x41, 0xd1, + 0x6e, 0x40, 0x71, 0x0b, 0xba, 0xb3, 0xf0, 0x3c, 0x98, 0x1d, 0x15, 0xcb, 0x51, 0x67, 0x16, 0x9e, + 0xbf, 0x3a, 0xc2, 0xce, 0xc6, 0x44, 0x70, 0x94, 0x47, 0xa7, 0x54, 0xc9, 0xaa, 0xb3, 0x31, 0xf1, + 0xd4, 0x50, 0xf4, 0xa3, 0xb5, 0xc0, 0xd7, 0x39, 0xcd, 0xa9, 0x2c, 0x7a, 0x85, 0x1e, 0x8e, 0xbf, + 0x41, 0x02, 0x2e, 0x53, 0xb8, 0x59, 0x62, 0x97, 0xe8, 0xf9, 0xc5, 0x69, 0xef, 0x2f, 0x3d, 0x18, + 0x1c, 0xd2, 0xf0, 0x8c, 0xd2, 0x18, 0x1f, 0x4c, 0xa6, 0x65, 0xa1, 0xd5, 0x7f, 0xf8, 0x92, 0x07, + 0x8b, 0x15, 0xb5, 0xf4, 0x97, 0xf6, 0xe4, 0xe3, 0xeb, 0xc4, 0x8a, 0x9c, 0xbd, 0x41, 0x5e, 0x43, + 0xdf, 0xfa, 0x65, 0x49, 0x36, 0x2d, 0xc5, 0xc6, 0x0f, 0xe6, 0xc9, 0xd6, 0x25, 0xdc, 0xd2, 0xda, + 0x63, 0x87, 0xbc, 0x84, 0xbe, 0xb5, 0xd2, 0xd8, 0xf6, 0x9a, 0xbb, 0x95, 0x6d, 0x6f, 0xc9, 0x1e, + 0xe4, 0xdd, 0xd0, 0xd6, 0xac, 0xc5, 0xc4, 0xb6, 0xd6, 0x5c, 0x85, 0x6c, 0x6b, 0xcb, 0xb6, 0x19, + 0xb4, 0x66, 0xed, 0x01, 0xb6, 0xb5, 0xe6, 0x96, 0x63, 0x5b, 0x5b, 0xb2, 0x3c, 0x78, 0x37, 0xc8, + 0x57, 0x30, 0x3a, 0x54, 0x82, 0x86, 0xb3, 0x39, 0x7b, 0x01, 0xc1, 0xff, 0xc3, 0xea, 0x8e, 0xf3, + 0xd8, 0x21, 0x7f, 0x80, 0xf5, 0xc6, 0x94, 0x27, 0xde, 0x5c, 0xf3, 0xb2, 0xf5, 0x64, 0x72, 0xff, + 0x4a, 0x99, 0xca, 0xf3, 0x2f, 0x61, 0x60, 0x0f, 0x57, 0x62, 0x39, 0xb5, 0x64, 0x7f, 0x98, 0xdc, + 0xbb, 0x8c, 0x6d, 0x1b, 0xb4, 0xfb, 0xbb, 0x6d, 0x70, 0xc9, 0x84, 0xb3, 0x0d, 0x2e, 0x1b, 0x0b, + 0xde, 0x0d, 0xf2, 0x7b, 0x58, 0x5b, 0xec, 0xb3, 0xe4, 0xa3, 0x45, 0xe8, 0x1a, 0xed, 0x7b, 0xe2, + 0x5d, 0x25, 0x52, 0x19, 0x7f, 0x01, 0x30, 0x6f, 0x9f, 0xe4, 0xee, 0x5c, 0xa7, 0xd1, 0xbe, 0x27, + 0x9b, 0xcb, 0x99, 0x95, 0xa9, 0x3f, 0xc2, 0xad, 0xa5, 0x3d, 0x8a, 0x58, 0x05, 0x78, 0x55, 0x97, + 0x9b, 0xfc, 0xe4, 0x5a, 0xb9, 0xf2, 0xae, 0xa7, 0xf7, 0x60, 0x4d, 0x9a, 0x16, 0x71, 0x2c, 0x77, + 0xa3, 0x84, 0xd1, 0x54, 0x3d, 0x05, 0xd4, 0x78, 0x23, 0xb8, 0xe2, 0x47, 0x5d, 0xfc, 0x37, 0xee, + 0xe7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x35, 0x0b, 0x9e, 0x2e, 0x9c, 0x13, 0x00, 0x00, } diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 5c40332e6..96ab2154f 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -1,6 +1,12 @@ package filer_pb import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/needle" ) @@ -67,3 +73,33 @@ func AfterEntryDeserialization(chunks []*FileChunk) { } } + +func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { + resp, err := client.CreateEntry(context.Background(), request) + if err != nil { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) + return fmt.Errorf("CreateEntry: %v", err) + } + if resp.Error != "" { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) + return fmt.Errorf("CreateEntry : %v", resp.Error) + } + return nil +} + +func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + resp, err := client.LookupDirectoryEntry(context.Background(), request) + if err != nil { + if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil, ErrNotFound + } + glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err) + return nil, fmt.Errorf("LookupEntry1: %v", err) + } + if resp.Entry == nil { + return nil, ErrNotFound + } + return resp, nil +} + +var ErrNotFound = errors.New("filer: no entry is found in filer store") diff --git a/weed/util/grpc_client_server.go b/weed/pb/grpc_client_server.go similarity index 52% rename from weed/util/grpc_client_server.go rename to weed/pb/grpc_client_server.go index 31497ad35..4b5f9eff3 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/pb/grpc_client_server.go @@ -1,4 +1,4 @@ -package util +package pb import ( "context" @@ -11,6 +11,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/keepalive" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" ) var ( @@ -29,7 +32,8 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { Time: 10 * time.Second, // wait time before ping if no activity Timeout: 20 * time.Second, // ping timeout }), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: 60 * time.Second, // min time a client should wait before sending a ping + MinTime: 60 * time.Second, // min time a client should wait before sending a ping + PermitWithoutStream: true, })) for _, opt := range opts { if opt != nil { @@ -46,8 +50,9 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr options = append(options, // grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 30 * time.Second, // client ping server if no activity for this long - Timeout: 20 * time.Second, + Time: 30 * time.Second, // client ping server if no activity for this long + Timeout: 20 * time.Second, + PermitWithoutStream: true, })) for _, opt := range opts { if opt != nil { @@ -57,17 +62,24 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr return grpc.DialContext(ctx, address, options...) } -func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { +func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { grpcClientsLock.Lock() existingConnection, found := grpcClients[address] if found { grpcClientsLock.Unlock() - return fn(existingConnection) + err := fn(existingConnection) + if err != nil { + grpcClientsLock.Lock() + delete(grpcClients, address) + grpcClientsLock.Unlock() + existingConnection.Close() + } + return err } - grpcConnection, err := GrpcDial(ctx, address, opts...) + grpcConnection, err := GrpcDial(context.Background(), address, opts...) if err != nil { grpcClientsLock.Unlock() return fmt.Errorf("fail to dial %s: %v", address, err) @@ -118,3 +130,53 @@ func ServerToGrpcAddress(server string) (serverGrpcAddress string) { return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort) } + +func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { + + masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master) + if parseErr != nil { + return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) + } + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := master_pb.NewSeaweedClient(grpcConnection) + return fn(client) + }, masterGrpcAddress, grpcDialOption) + +} + +func WithFilerClient(filer string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress, parseErr := ParseServerToGrpcAddress(filer) + if parseErr != nil { + return fmt.Errorf("failed to parse filer grpc %v: %v", filer, parseErr) + } + + return WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, fn) + +} + +func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, grpcDialOption) + +} + +func ParseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { + hostnameAndPort := strings.Split(filer, ":") + if len(hostnameAndPort) != 2 { + return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort) + } + + filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + if parseErr != nil { + return "", fmt.Errorf("filer port parse error: %v", parseErr) + } + + filerGrpcPort := int(filerPort) + 10000 + + return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil +} diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto new file mode 100644 index 000000000..2eef22dd9 --- /dev/null +++ b/weed/pb/iam.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package iam_pb; + +option java_package = "seaweedfs.client"; +option java_outer_classname = "IamProto"; + +////////////////////////////////////////////////// + +service SeaweedIdentityAccessManagement { + +} + +////////////////////////////////////////////////// + +message S3ApiConfiguration { + repeated Identity identities = 1; +} + +message Identity { + string name = 1; + repeated Credential credentials = 2; + repeated string actions = 3; +} + +message Credential { + string access_key = 1; + string secret_key = 2; + // uint64 expiration = 3; + // bool is_disabled = 4; +} + +/* +message Policy { + repeated Statement statements = 1; +} + +message Statement { + repeated Action action = 1; + repeated Resource resource = 2; +} + +message Action { + string action = 1; +} +message Resource { + string bucket = 1; + // string path = 2; +} +*/ \ No newline at end of file diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go new file mode 100644 index 000000000..b7d7b038b --- /dev/null +++ b/weed/pb/iam_pb/iam.pb.go @@ -0,0 +1,174 @@ +// Code generated by protoc-gen-go. +// source: iam.proto +// DO NOT EDIT! + +/* +Package iam_pb is a generated protocol buffer package. + +It is generated from these files: + iam.proto + +It has these top-level messages: + S3ApiConfiguration + Identity + Credential +*/ +package iam_pb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type S3ApiConfiguration struct { + Identities []*Identity `protobuf:"bytes,1,rep,name=identities" json:"identities,omitempty"` +} + +func (m *S3ApiConfiguration) Reset() { *m = S3ApiConfiguration{} } +func (m *S3ApiConfiguration) String() string { return proto.CompactTextString(m) } +func (*S3ApiConfiguration) ProtoMessage() {} +func (*S3ApiConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *S3ApiConfiguration) GetIdentities() []*Identity { + if m != nil { + return m.Identities + } + return nil +} + +type Identity struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials" json:"credentials,omitempty"` + Actions []string `protobuf:"bytes,3,rep,name=actions" json:"actions,omitempty"` +} + +func (m *Identity) Reset() { *m = Identity{} } +func (m *Identity) String() string { return proto.CompactTextString(m) } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Identity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Identity) GetCredentials() []*Credential { + if m != nil { + return m.Credentials + } + return nil +} + +func (m *Identity) GetActions() []string { + if m != nil { + return m.Actions + } + return nil +} + +type Credential struct { + AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey" json:"access_key,omitempty"` + SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey" json:"secret_key,omitempty"` +} + +func (m *Credential) Reset() { *m = Credential{} } +func (m *Credential) String() string { return proto.CompactTextString(m) } +func (*Credential) ProtoMessage() {} +func (*Credential) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Credential) GetAccessKey() string { + if m != nil { + return m.AccessKey + } + return "" +} + +func (m *Credential) GetSecretKey() string { + if m != nil { + return m.SecretKey + } + return "" +} + +func init() { + proto.RegisterType((*S3ApiConfiguration)(nil), "iam_pb.S3ApiConfiguration") + proto.RegisterType((*Identity)(nil), "iam_pb.Identity") + proto.RegisterType((*Credential)(nil), "iam_pb.Credential") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for SeaweedIdentityAccessManagement service + +type SeaweedIdentityAccessManagementClient interface { +} + +type seaweedIdentityAccessManagementClient struct { + cc *grpc.ClientConn +} + +func NewSeaweedIdentityAccessManagementClient(cc *grpc.ClientConn) SeaweedIdentityAccessManagementClient { + return &seaweedIdentityAccessManagementClient{cc} +} + +// Server API for SeaweedIdentityAccessManagement service + +type SeaweedIdentityAccessManagementServer interface { +} + +func RegisterSeaweedIdentityAccessManagementServer(s *grpc.Server, srv SeaweedIdentityAccessManagementServer) { + s.RegisterService(&_SeaweedIdentityAccessManagement_serviceDesc, srv) +} + +var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{ + ServiceName: "iam_pb.SeaweedIdentityAccessManagement", + HandlerType: (*SeaweedIdentityAccessManagementServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: "iam.proto", +} + +func init() { proto.RegisterFile("iam.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xc3, 0x40, + 0x10, 0x85, 0x69, 0x23, 0xb5, 0x99, 0x5e, 0xca, 0x9c, 0xf6, 0xa0, 0x18, 0x73, 0xca, 0x29, 0x48, + 0xeb, 0x1f, 0xa8, 0x05, 0xa1, 0x16, 0x41, 0xd2, 0x1f, 0x50, 0xa6, 0xdb, 0x69, 0x19, 0xec, 0x6e, + 0x42, 0x76, 0x45, 0xf2, 0xef, 0x25, 0xbb, 0x46, 0x7b, 0xdb, 0x7d, 0xdf, 0x7b, 0xb3, 0x3b, 0x0f, + 0x52, 0x21, 0x53, 0x36, 0x6d, 0xed, 0x6b, 0x9c, 0x08, 0x99, 0x7d, 0x73, 0xc8, 0x5f, 0x01, 0x77, + 0xcb, 0x55, 0x23, 0xeb, 0xda, 0x9e, 0xe4, 0xfc, 0xd5, 0x92, 0x97, 0xda, 0xe2, 0x13, 0x80, 0x1c, + 0xd9, 0x7a, 0xf1, 0xc2, 0x4e, 0x8d, 0xb2, 0xa4, 0x98, 0x2d, 0xe6, 0x65, 0x8c, 0x94, 0x9b, 0x48, + 0xba, 0xea, 0xca, 0x93, 0x5b, 0x98, 0x0e, 0x3a, 0x22, 0xdc, 0x58, 0x32, 0xac, 0x46, 0xd9, 0xa8, + 0x48, 0xab, 0x70, 0xc6, 0x67, 0x98, 0xe9, 0x96, 0x83, 0x83, 0x2e, 0x4e, 0x8d, 0xc3, 0x48, 0x1c, + 0x46, 0xae, 0xff, 0x50, 0x75, 0x6d, 0x43, 0x05, 0xb7, 0xa4, 0xfb, 0x1f, 0x39, 0x95, 0x64, 0x49, + 0x91, 0x56, 0xc3, 0x35, 0x7f, 0x03, 0xf8, 0x0f, 0xe1, 0x3d, 0x00, 0x69, 0xcd, 0xce, 0xed, 0x3f, + 0xb9, 0xfb, 0x7d, 0x37, 0x8d, 0xca, 0x96, 0xbb, 0x1e, 0x3b, 0xd6, 0x2d, 0xfb, 0x80, 0xc7, 0x11, + 0x47, 0x65, 0xcb, 0xdd, 0xe2, 0x11, 0x1e, 0x76, 0x4c, 0xdf, 0xcc, 0xc7, 0x61, 0x85, 0x55, 0x88, + 0xbe, 0x93, 0xa5, 0x33, 0x1b, 0xb6, 0xfe, 0xe5, 0x0e, 0xe6, 0x2e, 0x5a, 0x4e, 0xae, 0xd4, 0x17, + 0xe9, 0xb5, 0xe9, 0x86, 0xcc, 0x47, 0x5f, 0xe6, 0x61, 0x12, 0x3a, 0x5d, 0xfe, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x83, 0x4f, 0x61, 0x03, 0x60, 0x01, 0x00, 0x00, +} diff --git a/weed/pb/master.proto b/weed/pb/master.proto index f03b3a9ab..4310b2602 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -25,6 +25,8 @@ service Seaweed { } rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) { } + rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) { + } } ////////////////////////////////////////////////// @@ -58,6 +60,7 @@ message HeartbeatResponse { string leader = 2; string metrics_address = 3; uint32 metrics_interval_seconds = 4; + repeated StorageBackend storage_backends = 5; } message VolumeInformationMessage { @@ -73,6 +76,8 @@ message VolumeInformationMessage { uint32 ttl = 10; uint32 compact_revision = 11; int64 modified_at_second = 12; + string remote_storage_name = 13; + string remote_storage_key = 14; } message VolumeShortInformationMessage { @@ -89,6 +94,12 @@ message VolumeEcShardInformationMessage { uint32 ec_index_bits = 3; } +message StorageBackend { + string type = 1; + string id = 2; + map properties = 3; +} + message Empty { } @@ -103,6 +114,7 @@ message SuperBlockExtra { message KeepConnectedRequest { string name = 1; + uint32 grpc_port = 2; } message VolumeLocation { @@ -201,6 +213,7 @@ message DataNodeInfo { uint64 active_volume_count = 5; repeated VolumeInformationMessage volume_infos = 6; repeated VolumeEcShardInformationMessage ec_shard_infos = 7; + uint64 remote_volume_count = 8; } message RackInfo { string id = 1; @@ -209,6 +222,7 @@ message RackInfo { uint64 free_volume_count = 4; uint64 active_volume_count = 5; repeated DataNodeInfo data_node_infos = 6; + uint64 remote_volume_count = 7; } message DataCenterInfo { string id = 1; @@ -217,6 +231,7 @@ message DataCenterInfo { uint64 free_volume_count = 4; uint64 active_volume_count = 5; repeated RackInfo rack_infos = 6; + uint64 remote_volume_count = 7; } message TopologyInfo { string id = 1; @@ -225,6 +240,7 @@ message TopologyInfo { uint64 free_volume_count = 4; uint64 active_volume_count = 5; repeated DataCenterInfo data_center_infos = 6; + uint64 remote_volume_count = 7; } message VolumeListRequest { } @@ -251,3 +267,10 @@ message GetMasterConfigurationResponse { string metrics_address = 1; uint32 metrics_interval_seconds = 2; } + +message ListMasterClientsRequest { + string client_type = 1; +} +message ListMasterClientsResponse { + repeated string grpc_addresses = 1; +} diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index e9fe8164c..c33e2b768 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -14,6 +14,7 @@ It has these top-level messages: VolumeInformationMessage VolumeShortInformationMessage VolumeEcShardInformationMessage + StorageBackend Empty SuperBlockExtra KeepConnectedRequest @@ -41,6 +42,8 @@ It has these top-level messages: LookupEcVolumeResponse GetMasterConfigurationRequest GetMasterConfigurationResponse + ListMasterClientsRequest + ListMasterClientsResponse */ package master_pb @@ -204,10 +207,11 @@ func (m *Heartbeat) GetHasNoEcShards() bool { } type HeartbeatResponse struct { - VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit" json:"volume_size_limit,omitempty"` - Leader string `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` - MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` + VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit" json:"volume_size_limit,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` + MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends" json:"storage_backends,omitempty"` } func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } @@ -243,19 +247,28 @@ func (m *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 { return 0 } +func (m *HeartbeatResponse) GetStorageBackends() []*StorageBackend { + if m != nil { + return m.StorageBackends + } + return nil +} + type VolumeInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` - DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"` - DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` - CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` - ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond" json:"modified_at_second,omitempty"` + Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` + FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` + DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"` + DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` + CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` + ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond" json:"modified_at_second,omitempty"` + RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName" json:"remote_storage_name,omitempty"` + RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey" json:"remote_storage_key,omitempty"` } func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } @@ -347,6 +360,20 @@ func (m *VolumeInformationMessage) GetModifiedAtSecond() int64 { return 0 } +func (m *VolumeInformationMessage) GetRemoteStorageName() string { + if m != nil { + return m.RemoteStorageName + } + return "" +} + +func (m *VolumeInformationMessage) GetRemoteStorageKey() string { + if m != nil { + return m.RemoteStorageKey + } + return "" +} + type VolumeShortInformationMessage struct { Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` @@ -427,13 +454,45 @@ func (m *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 { return 0 } +type StorageBackend struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + Properties map[string]string `protobuf:"bytes,3,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *StorageBackend) Reset() { *m = StorageBackend{} } +func (m *StorageBackend) String() string { return proto.CompactTextString(m) } +func (*StorageBackend) ProtoMessage() {} +func (*StorageBackend) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *StorageBackend) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *StorageBackend) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *StorageBackend) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + type Empty struct { } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } type SuperBlockExtra struct { ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding" json:"erasure_coding,omitempty"` @@ -442,7 +501,7 @@ type SuperBlockExtra struct { func (m *SuperBlockExtra) Reset() { *m = SuperBlockExtra{} } func (m *SuperBlockExtra) String() string { return proto.CompactTextString(m) } func (*SuperBlockExtra) ProtoMessage() {} -func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { if m != nil { @@ -461,7 +520,7 @@ func (m *SuperBlockExtra_ErasureCoding) Reset() { *m = SuperBlockExtra_E func (m *SuperBlockExtra_ErasureCoding) String() string { return proto.CompactTextString(m) } func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{6, 0} + return fileDescriptor0, []int{7, 0} } func (m *SuperBlockExtra_ErasureCoding) GetData() uint32 { @@ -486,13 +545,14 @@ func (m *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { } type KeepConnectedRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort" json:"grpc_port,omitempty"` } func (m *KeepConnectedRequest) Reset() { *m = KeepConnectedRequest{} } func (m *KeepConnectedRequest) String() string { return proto.CompactTextString(m) } func (*KeepConnectedRequest) ProtoMessage() {} -func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *KeepConnectedRequest) GetName() string { if m != nil { @@ -501,6 +561,13 @@ func (m *KeepConnectedRequest) GetName() string { return "" } +func (m *KeepConnectedRequest) GetGrpcPort() uint32 { + if m != nil { + return m.GrpcPort + } + return 0 +} + type VolumeLocation struct { Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` @@ -512,7 +579,7 @@ type VolumeLocation struct { func (m *VolumeLocation) Reset() { *m = VolumeLocation{} } func (m *VolumeLocation) String() string { return proto.CompactTextString(m) } func (*VolumeLocation) ProtoMessage() {} -func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *VolumeLocation) GetUrl() string { if m != nil { @@ -557,7 +624,7 @@ type LookupVolumeRequest struct { func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *LookupVolumeRequest) GetVolumeIds() []string { if m != nil { @@ -580,7 +647,7 @@ type LookupVolumeResponse struct { func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { if m != nil { @@ -599,7 +666,7 @@ func (m *LookupVolumeResponse_VolumeIdLocation) Reset() { *m = LookupVol func (m *LookupVolumeResponse_VolumeIdLocation) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{10, 0} + return fileDescriptor0, []int{11, 0} } func (m *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { @@ -631,7 +698,7 @@ type Location struct { func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *Location) GetUrl() string { if m != nil { @@ -662,7 +729,7 @@ type AssignRequest struct { func (m *AssignRequest) Reset() { *m = AssignRequest{} } func (m *AssignRequest) String() string { return proto.CompactTextString(m) } func (*AssignRequest) ProtoMessage() {} -func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *AssignRequest) GetCount() uint64 { if m != nil { @@ -739,7 +806,7 @@ type AssignResponse struct { func (m *AssignResponse) Reset() { *m = AssignResponse{} } func (m *AssignResponse) String() string { return proto.CompactTextString(m) } func (*AssignResponse) ProtoMessage() {} -func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *AssignResponse) GetFid() string { if m != nil { @@ -792,7 +859,7 @@ type StatisticsRequest struct { func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *StatisticsRequest) GetReplication() string { if m != nil { @@ -827,7 +894,7 @@ type StatisticsResponse struct { func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *StatisticsResponse) GetReplication() string { if m != nil { @@ -879,7 +946,7 @@ type StorageType struct { func (m *StorageType) Reset() { *m = StorageType{} } func (m *StorageType) String() string { return proto.CompactTextString(m) } func (*StorageType) ProtoMessage() {} -func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *StorageType) GetReplication() string { if m != nil { @@ -902,7 +969,7 @@ type Collection struct { func (m *Collection) Reset() { *m = Collection{} } func (m *Collection) String() string { return proto.CompactTextString(m) } func (*Collection) ProtoMessage() {} -func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *Collection) GetName() string { if m != nil { @@ -919,7 +986,7 @@ type CollectionListRequest struct { func (m *CollectionListRequest) Reset() { *m = CollectionListRequest{} } func (m *CollectionListRequest) String() string { return proto.CompactTextString(m) } func (*CollectionListRequest) ProtoMessage() {} -func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *CollectionListRequest) GetIncludeNormalVolumes() bool { if m != nil { @@ -942,7 +1009,7 @@ type CollectionListResponse struct { func (m *CollectionListResponse) Reset() { *m = CollectionListResponse{} } func (m *CollectionListResponse) String() string { return proto.CompactTextString(m) } func (*CollectionListResponse) ProtoMessage() {} -func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *CollectionListResponse) GetCollections() []*Collection { if m != nil { @@ -958,7 +1025,7 @@ type CollectionDeleteRequest struct { func (m *CollectionDeleteRequest) Reset() { *m = CollectionDeleteRequest{} } func (m *CollectionDeleteRequest) String() string { return proto.CompactTextString(m) } func (*CollectionDeleteRequest) ProtoMessage() {} -func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *CollectionDeleteRequest) GetName() string { if m != nil { @@ -973,7 +1040,7 @@ type CollectionDeleteResponse struct { func (m *CollectionDeleteResponse) Reset() { *m = CollectionDeleteResponse{} } func (m *CollectionDeleteResponse) String() string { return proto.CompactTextString(m) } func (*CollectionDeleteResponse) ProtoMessage() {} -func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } // // volume related @@ -986,12 +1053,13 @@ type DataNodeInfo struct { ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos" json:"volume_infos,omitempty"` EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos" json:"ec_shard_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` } func (m *DataNodeInfo) Reset() { *m = DataNodeInfo{} } func (m *DataNodeInfo) String() string { return proto.CompactTextString(m) } func (*DataNodeInfo) ProtoMessage() {} -func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *DataNodeInfo) GetId() string { if m != nil { @@ -1042,6 +1110,13 @@ func (m *DataNodeInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage { return nil } +func (m *DataNodeInfo) GetRemoteVolumeCount() uint64 { + if m != nil { + return m.RemoteVolumeCount + } + return 0 +} + type RackInfo struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` @@ -1049,12 +1124,13 @@ type RackInfo struct { FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos" json:"data_node_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` } func (m *RackInfo) Reset() { *m = RackInfo{} } func (m *RackInfo) String() string { return proto.CompactTextString(m) } func (*RackInfo) ProtoMessage() {} -func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *RackInfo) GetId() string { if m != nil { @@ -1098,6 +1174,13 @@ func (m *RackInfo) GetDataNodeInfos() []*DataNodeInfo { return nil } +func (m *RackInfo) GetRemoteVolumeCount() uint64 { + if m != nil { + return m.RemoteVolumeCount + } + return 0 +} + type DataCenterInfo struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` @@ -1105,12 +1188,13 @@ type DataCenterInfo struct { FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos" json:"rack_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` } func (m *DataCenterInfo) Reset() { *m = DataCenterInfo{} } func (m *DataCenterInfo) String() string { return proto.CompactTextString(m) } func (*DataCenterInfo) ProtoMessage() {} -func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *DataCenterInfo) GetId() string { if m != nil { @@ -1154,6 +1238,13 @@ func (m *DataCenterInfo) GetRackInfos() []*RackInfo { return nil } +func (m *DataCenterInfo) GetRemoteVolumeCount() uint64 { + if m != nil { + return m.RemoteVolumeCount + } + return 0 +} + type TopologyInfo struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` @@ -1161,12 +1252,13 @@ type TopologyInfo struct { FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos" json:"data_center_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` } func (m *TopologyInfo) Reset() { *m = TopologyInfo{} } func (m *TopologyInfo) String() string { return proto.CompactTextString(m) } func (*TopologyInfo) ProtoMessage() {} -func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *TopologyInfo) GetId() string { if m != nil { @@ -1210,13 +1302,20 @@ func (m *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { return nil } +func (m *TopologyInfo) GetRemoteVolumeCount() uint64 { + if m != nil { + return m.RemoteVolumeCount + } + return 0 +} + type VolumeListRequest struct { } func (m *VolumeListRequest) Reset() { *m = VolumeListRequest{} } func (m *VolumeListRequest) String() string { return proto.CompactTextString(m) } func (*VolumeListRequest) ProtoMessage() {} -func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } type VolumeListResponse struct { TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo" json:"topology_info,omitempty"` @@ -1226,7 +1325,7 @@ type VolumeListResponse struct { func (m *VolumeListResponse) Reset() { *m = VolumeListResponse{} } func (m *VolumeListResponse) String() string { return proto.CompactTextString(m) } func (*VolumeListResponse) ProtoMessage() {} -func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *VolumeListResponse) GetTopologyInfo() *TopologyInfo { if m != nil { @@ -1249,7 +1348,7 @@ type LookupEcVolumeRequest struct { func (m *LookupEcVolumeRequest) Reset() { *m = LookupEcVolumeRequest{} } func (m *LookupEcVolumeRequest) String() string { return proto.CompactTextString(m) } func (*LookupEcVolumeRequest) ProtoMessage() {} -func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } func (m *LookupEcVolumeRequest) GetVolumeId() uint32 { if m != nil { @@ -1266,7 +1365,7 @@ type LookupEcVolumeResponse struct { func (m *LookupEcVolumeResponse) Reset() { *m = LookupEcVolumeResponse{} } func (m *LookupEcVolumeResponse) String() string { return proto.CompactTextString(m) } func (*LookupEcVolumeResponse) ProtoMessage() {} -func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *LookupEcVolumeResponse) GetVolumeId() uint32 { if m != nil { @@ -1293,7 +1392,7 @@ func (m *LookupEcVolumeResponse_EcShardIdLocation) Reset() { func (m *LookupEcVolumeResponse_EcShardIdLocation) String() string { return proto.CompactTextString(m) } func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{29, 0} + return fileDescriptor0, []int{30, 0} } func (m *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { @@ -1316,7 +1415,7 @@ type GetMasterConfigurationRequest struct { func (m *GetMasterConfigurationRequest) Reset() { *m = GetMasterConfigurationRequest{} } func (m *GetMasterConfigurationRequest) String() string { return proto.CompactTextString(m) } func (*GetMasterConfigurationRequest) ProtoMessage() {} -func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } type GetMasterConfigurationResponse struct { MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` @@ -1326,7 +1425,7 @@ type GetMasterConfigurationResponse struct { func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { if m != nil { @@ -1342,12 +1441,45 @@ func (m *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 { return 0 } +type ListMasterClientsRequest struct { + ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType" json:"client_type,omitempty"` +} + +func (m *ListMasterClientsRequest) Reset() { *m = ListMasterClientsRequest{} } +func (m *ListMasterClientsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMasterClientsRequest) ProtoMessage() {} +func (*ListMasterClientsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *ListMasterClientsRequest) GetClientType() string { + if m != nil { + return m.ClientType + } + return "" +} + +type ListMasterClientsResponse struct { + GrpcAddresses []string `protobuf:"bytes,1,rep,name=grpc_addresses,json=grpcAddresses" json:"grpc_addresses,omitempty"` +} + +func (m *ListMasterClientsResponse) Reset() { *m = ListMasterClientsResponse{} } +func (m *ListMasterClientsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMasterClientsResponse) ProtoMessage() {} +func (*ListMasterClientsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *ListMasterClientsResponse) GetGrpcAddresses() []string { + if m != nil { + return m.GrpcAddresses + } + return nil +} + func init() { proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat") proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse") proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage") proto.RegisterType((*VolumeShortInformationMessage)(nil), "master_pb.VolumeShortInformationMessage") proto.RegisterType((*VolumeEcShardInformationMessage)(nil), "master_pb.VolumeEcShardInformationMessage") + proto.RegisterType((*StorageBackend)(nil), "master_pb.StorageBackend") proto.RegisterType((*Empty)(nil), "master_pb.Empty") proto.RegisterType((*SuperBlockExtra)(nil), "master_pb.SuperBlockExtra") proto.RegisterType((*SuperBlockExtra_ErasureCoding)(nil), "master_pb.SuperBlockExtra.ErasureCoding") @@ -1378,6 +1510,8 @@ func init() { proto.RegisterType((*LookupEcVolumeResponse_EcShardIdLocation)(nil), "master_pb.LookupEcVolumeResponse.EcShardIdLocation") proto.RegisterType((*GetMasterConfigurationRequest)(nil), "master_pb.GetMasterConfigurationRequest") proto.RegisterType((*GetMasterConfigurationResponse)(nil), "master_pb.GetMasterConfigurationResponse") + proto.RegisterType((*ListMasterClientsRequest)(nil), "master_pb.ListMasterClientsRequest") + proto.RegisterType((*ListMasterClientsResponse)(nil), "master_pb.ListMasterClientsResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -1401,6 +1535,7 @@ type SeaweedClient interface { VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) + ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) } type seaweedClient struct { @@ -1545,6 +1680,15 @@ func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMaste return out, nil } +func (c *seaweedClient) ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) { + out := new(ListMasterClientsResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/ListMasterClients", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Seaweed service type SeaweedServer interface { @@ -1558,6 +1702,7 @@ type SeaweedServer interface { VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) + ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) } func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { @@ -1760,6 +1905,24 @@ func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _Seaweed_ListMasterClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMasterClientsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).ListMasterClients(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/ListMasterClients", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).ListMasterClients(ctx, req.(*ListMasterClientsRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Seaweed_serviceDesc = grpc.ServiceDesc{ ServiceName: "master_pb.Seaweed", HandlerType: (*SeaweedServer)(nil), @@ -1796,6 +1959,10 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ MethodName: "GetMasterConfiguration", Handler: _Seaweed_GetMasterConfiguration_Handler, }, + { + MethodName: "ListMasterClients", + Handler: _Seaweed_ListMasterClients_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1817,127 +1984,142 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1943 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0x4b, 0x6f, 0x1b, 0xc9, - 0x11, 0xf6, 0x90, 0x14, 0x45, 0x16, 0x1f, 0x22, 0x5b, 0xb2, 0x96, 0xe6, 0xc6, 0x16, 0x3d, 0x1b, - 0x60, 0xb5, 0xce, 0x46, 0xd9, 0x68, 0x17, 0x48, 0x80, 0x24, 0x58, 0xd8, 0xb2, 0x76, 0x23, 0xd8, - 0xf2, 0xda, 0x43, 0xc7, 0x0b, 0x04, 0x08, 0x26, 0xcd, 0x99, 0x96, 0x34, 0xd0, 0xbc, 0x32, 0xdd, - 0x94, 0xc9, 0xcd, 0x31, 0xb9, 0x05, 0xc8, 0x25, 0x87, 0x9c, 0x72, 0xcf, 0x3d, 0xb7, 0x1c, 0x72, - 0xc9, 0x8f, 0xc8, 0x39, 0x7f, 0x21, 0xd7, 0x20, 0x40, 0xd0, 0xaf, 0x99, 0x1e, 0x92, 0x92, 0xac, - 0x05, 0x7c, 0xf0, 0x6d, 0xa6, 0xaa, 0xba, 0xba, 0xe6, 0xab, 0xae, 0xaa, 0xaf, 0x49, 0x68, 0x47, - 0x98, 0x32, 0x92, 0xed, 0xa5, 0x59, 0xc2, 0x12, 0xd4, 0x94, 0x6f, 0x6e, 0x3a, 0xb1, 0xff, 0x50, - 0x87, 0xe6, 0xcf, 0x09, 0xce, 0xd8, 0x84, 0x60, 0x86, 0xba, 0x50, 0x09, 0xd2, 0x81, 0x35, 0xb2, - 0x76, 0x9b, 0x4e, 0x25, 0x48, 0x11, 0x82, 0x5a, 0x9a, 0x64, 0x6c, 0x50, 0x19, 0x59, 0xbb, 0x1d, - 0x47, 0x3c, 0xa3, 0xbb, 0x00, 0xe9, 0x74, 0x12, 0x06, 0x9e, 0x3b, 0xcd, 0xc2, 0x41, 0x55, 0xd8, - 0x36, 0xa5, 0xe4, 0x17, 0x59, 0x88, 0x76, 0xa1, 0x17, 0xe1, 0x99, 0x7b, 0x91, 0x84, 0xd3, 0x88, - 0xb8, 0x5e, 0x32, 0x8d, 0xd9, 0xa0, 0x26, 0x96, 0x77, 0x23, 0x3c, 0x7b, 0x25, 0xc4, 0x07, 0x5c, - 0x8a, 0x46, 0x3c, 0xaa, 0x99, 0x7b, 0x12, 0x84, 0xc4, 0x3d, 0x27, 0xf3, 0xc1, 0xda, 0xc8, 0xda, - 0xad, 0x39, 0x10, 0xe1, 0xd9, 0x17, 0x41, 0x48, 0x9e, 0x90, 0x39, 0xda, 0x81, 0x96, 0x8f, 0x19, - 0x76, 0x3d, 0x12, 0x33, 0x92, 0x0d, 0xea, 0x62, 0x2f, 0xe0, 0xa2, 0x03, 0x21, 0xe1, 0xf1, 0x65, - 0xd8, 0x3b, 0x1f, 0xac, 0x0b, 0x8d, 0x78, 0xe6, 0xf1, 0x61, 0x3f, 0x0a, 0x62, 0x57, 0x44, 0xde, - 0x10, 0x5b, 0x37, 0x85, 0xe4, 0x39, 0x0f, 0xff, 0x67, 0xb0, 0x2e, 0x63, 0xa3, 0x83, 0xe6, 0xa8, - 0xba, 0xdb, 0xda, 0xff, 0x60, 0x2f, 0x47, 0x63, 0x4f, 0x86, 0x77, 0x14, 0x9f, 0x24, 0x59, 0x84, - 0x59, 0x90, 0xc4, 0xc7, 0x84, 0x52, 0x7c, 0x4a, 0x1c, 0xbd, 0x06, 0x1d, 0x41, 0x2b, 0x26, 0xaf, - 0x5d, 0xed, 0x02, 0x84, 0x8b, 0xdd, 0x25, 0x17, 0xe3, 0xb3, 0x24, 0x63, 0x2b, 0xfc, 0x40, 0x4c, - 0x5e, 0xbf, 0x52, 0xae, 0x5e, 0xc0, 0x86, 0x4f, 0x42, 0xc2, 0x88, 0x9f, 0xbb, 0x6b, 0xdd, 0xd0, - 0x5d, 0x57, 0x39, 0xd0, 0x2e, 0xbf, 0x0b, 0xdd, 0x33, 0x4c, 0xdd, 0x38, 0xc9, 0x3d, 0xb6, 0x47, - 0xd6, 0x6e, 0xc3, 0x69, 0x9f, 0x61, 0xfa, 0x2c, 0xd1, 0x56, 0x5f, 0x42, 0x93, 0x78, 0x2e, 0x3d, - 0xc3, 0x99, 0x4f, 0x07, 0x3d, 0xb1, 0xe5, 0x83, 0xa5, 0x2d, 0x0f, 0xbd, 0x31, 0x37, 0x58, 0xb1, - 0x69, 0x83, 0x48, 0x15, 0x45, 0xcf, 0xa0, 0xc3, 0xc1, 0x28, 0x9c, 0xf5, 0x6f, 0xec, 0x8c, 0xa3, - 0x79, 0xa8, 0xfd, 0xbd, 0x82, 0xbe, 0x46, 0xa4, 0xf0, 0x89, 0x6e, 0xec, 0x53, 0xc3, 0x9a, 0xfb, - 0xfd, 0x10, 0x7a, 0x0a, 0x96, 0xc2, 0xed, 0xa6, 0x00, 0xa6, 0x23, 0x80, 0xd1, 0x86, 0xf6, 0xdf, - 0x2d, 0xe8, 0xe7, 0xd5, 0xe0, 0x10, 0x9a, 0x26, 0x31, 0x25, 0xe8, 0x01, 0xf4, 0xd5, 0x71, 0xa6, - 0xc1, 0x37, 0xc4, 0x0d, 0x83, 0x28, 0x60, 0xa2, 0x48, 0x6a, 0xce, 0x86, 0x54, 0x8c, 0x83, 0x6f, - 0xc8, 0x53, 0x2e, 0x46, 0xdb, 0x50, 0x0f, 0x09, 0xf6, 0x49, 0x26, 0x6a, 0xa6, 0xe9, 0xa8, 0x37, - 0xf4, 0x21, 0x6c, 0x44, 0x84, 0x65, 0x81, 0x47, 0x5d, 0xec, 0xfb, 0x19, 0xa1, 0x54, 0x95, 0x4e, - 0x57, 0x89, 0x1f, 0x4a, 0x29, 0xfa, 0x31, 0x0c, 0xb4, 0x61, 0xc0, 0xcf, 0xf8, 0x05, 0x0e, 0x5d, - 0x4a, 0xbc, 0x24, 0xf6, 0xa9, 0xaa, 0xa3, 0x6d, 0xa5, 0x3f, 0x52, 0xea, 0xb1, 0xd4, 0xda, 0x7f, - 0xa9, 0xc2, 0xe0, 0xb2, 0x03, 0x2c, 0x2a, 0xdb, 0x17, 0x41, 0x77, 0x9c, 0x4a, 0xe0, 0xf3, 0xca, - 0xe1, 0x1f, 0x23, 0xa2, 0xac, 0x39, 0xe2, 0x19, 0xdd, 0x03, 0xf0, 0x92, 0x30, 0x24, 0x1e, 0x5f, - 0xa8, 0xc2, 0x33, 0x24, 0xbc, 0xb2, 0x44, 0xb1, 0x16, 0x45, 0x5d, 0x73, 0x9a, 0x5c, 0x22, 0xeb, - 0xf9, 0x3e, 0xb4, 0x25, 0xf0, 0xca, 0x40, 0xd6, 0x73, 0x4b, 0xca, 0xa4, 0xc9, 0xc7, 0x80, 0x74, - 0x82, 0x27, 0xf3, 0xdc, 0xb0, 0x2e, 0x0c, 0x7b, 0x4a, 0xf3, 0x68, 0xae, 0xad, 0xdf, 0x87, 0x66, - 0x46, 0xb0, 0xef, 0x26, 0x71, 0x38, 0x17, 0x25, 0xde, 0x70, 0x1a, 0x5c, 0xf0, 0x55, 0x1c, 0xce, - 0xd1, 0xf7, 0xa0, 0x9f, 0x91, 0x34, 0x0c, 0x3c, 0xec, 0xa6, 0x21, 0xf6, 0x48, 0x44, 0x62, 0x5d, - 0xed, 0x3d, 0xa5, 0x78, 0xae, 0xe5, 0x68, 0x00, 0xeb, 0x17, 0x24, 0xa3, 0xfc, 0xb3, 0x9a, 0xc2, - 0x44, 0xbf, 0xa2, 0x1e, 0x54, 0x19, 0x0b, 0x07, 0x20, 0xa4, 0xfc, 0x11, 0x7d, 0x04, 0x3d, 0x2f, - 0x89, 0x52, 0xec, 0x31, 0x37, 0x23, 0x17, 0x81, 0x58, 0xd4, 0x12, 0xea, 0x0d, 0x25, 0x77, 0x94, - 0x98, 0x7f, 0x4e, 0x94, 0xf8, 0xc1, 0x49, 0x40, 0x7c, 0x17, 0x33, 0x95, 0x26, 0x51, 0x72, 0x55, - 0xa7, 0xa7, 0x35, 0x0f, 0x99, 0x4c, 0x90, 0xfd, 0x57, 0x0b, 0xee, 0x5e, 0x59, 0xce, 0x4b, 0x49, - 0xba, 0x2e, 0x21, 0x6f, 0x0b, 0x03, 0x7b, 0x0a, 0x3b, 0xd7, 0x14, 0xd9, 0x35, 0xb1, 0x56, 0x96, - 0x62, 0xb5, 0xa1, 0x43, 0x3c, 0x37, 0x88, 0x7d, 0x32, 0x73, 0x27, 0x01, 0x93, 0xc7, 0xbf, 0xe3, - 0xb4, 0x88, 0x77, 0xc4, 0x65, 0x8f, 0x02, 0x46, 0xed, 0x75, 0x58, 0x3b, 0x8c, 0x52, 0x36, 0xb7, - 0xff, 0x61, 0xc1, 0xc6, 0x78, 0x9a, 0x92, 0xec, 0x51, 0x98, 0x78, 0xe7, 0x87, 0x33, 0x96, 0x61, - 0xf4, 0x15, 0x74, 0x49, 0x86, 0xe9, 0x34, 0xe3, 0xc7, 0xc6, 0x0f, 0xe2, 0x53, 0xb1, 0x79, 0xb9, - 0x5b, 0x2e, 0xac, 0xd9, 0x3b, 0x94, 0x0b, 0x0e, 0x84, 0xbd, 0xd3, 0x21, 0xe6, 0xeb, 0xf0, 0x97, - 0xd0, 0x29, 0xe9, 0x79, 0x4d, 0xf0, 0xd9, 0xa2, 0x3e, 0x4a, 0x3c, 0xf3, 0x7a, 0x4e, 0x71, 0x16, - 0xb0, 0xb9, 0x9a, 0x81, 0xea, 0x8d, 0xd7, 0x82, 0xea, 0x09, 0x81, 0xcf, 0xbf, 0xa5, 0xca, 0xa7, - 0x8c, 0x94, 0x1c, 0xf9, 0xd4, 0x7e, 0x00, 0x5b, 0x4f, 0x08, 0x49, 0x0f, 0x92, 0x38, 0x26, 0x1e, - 0x23, 0xbe, 0x43, 0x7e, 0x33, 0x25, 0x94, 0xf1, 0x2d, 0x62, 0x1c, 0x11, 0x35, 0x62, 0xc5, 0xb3, - 0xfd, 0x67, 0x0b, 0xba, 0x12, 0xed, 0xa7, 0x89, 0x27, 0x30, 0xe6, 0x19, 0xe1, 0xc3, 0x55, 0x5a, - 0xf1, 0xc7, 0x85, 0xa9, 0x5b, 0x59, 0x9c, 0xba, 0x77, 0xa0, 0x21, 0xc6, 0x52, 0x11, 0xcc, 0x3a, - 0x9f, 0x34, 0x81, 0x4f, 0x8b, 0xb2, 0xf4, 0xa5, 0xba, 0x26, 0xd4, 0x2d, 0x3d, 0x39, 0xb8, 0x49, - 0xd1, 0xb4, 0xd6, 0xcc, 0xa6, 0x65, 0xbf, 0x84, 0xcd, 0xa7, 0x49, 0x72, 0x3e, 0x4d, 0x65, 0x78, - 0xfa, 0x23, 0xca, 0xdf, 0x6e, 0x8d, 0xaa, 0x3c, 0x96, 0xfc, 0xdb, 0xaf, 0x3b, 0x09, 0xf6, 0x7f, - 0x2c, 0xd8, 0x2a, 0xbb, 0x55, 0x7d, 0xf6, 0xd7, 0xb0, 0x99, 0xfb, 0x75, 0x43, 0x85, 0x85, 0xdc, - 0xa0, 0xb5, 0xff, 0x89, 0x91, 0xe6, 0x55, 0xab, 0xf5, 0xec, 0xf6, 0x35, 0x88, 0x4e, 0xff, 0x62, - 0x41, 0x42, 0x87, 0x33, 0xe8, 0x2d, 0x9a, 0xf1, 0x2e, 0x93, 0xef, 0xaa, 0x10, 0x6f, 0xe8, 0x95, - 0xe8, 0x87, 0xd0, 0x2c, 0x02, 0xa9, 0x88, 0x40, 0x36, 0x4b, 0x81, 0xa8, 0xbd, 0x0a, 0x2b, 0xb4, - 0x05, 0x6b, 0x24, 0xcb, 0x92, 0x4c, 0xd5, 0xab, 0x7c, 0xb1, 0x7f, 0x02, 0x8d, 0x6f, 0x9d, 0x5d, - 0xfb, 0x6f, 0x15, 0xe8, 0x3c, 0xa4, 0x34, 0x38, 0x8d, 0x75, 0x0a, 0xb6, 0x60, 0x4d, 0xf6, 0x4e, - 0x39, 0x86, 0xe4, 0x0b, 0x1a, 0x41, 0x4b, 0x95, 0xbd, 0x01, 0xbd, 0x29, 0xba, 0xb6, 0xa3, 0xa8, - 0x56, 0x50, 0x93, 0xa1, 0xf1, 0x76, 0xb8, 0xc0, 0xc1, 0xd6, 0x2e, 0xe5, 0x60, 0x75, 0x83, 0x83, - 0xbd, 0x0f, 0x4d, 0xb1, 0x28, 0x4e, 0x7c, 0xa2, 0xc8, 0x59, 0x83, 0x0b, 0x9e, 0x25, 0x3e, 0x41, - 0xfb, 0xb0, 0x1d, 0x91, 0x28, 0xc9, 0xe6, 0x6e, 0x84, 0x53, 0x97, 0x53, 0x40, 0x31, 0x56, 0xa3, - 0x89, 0x6a, 0x5d, 0x48, 0x6a, 0x8f, 0x71, 0x7a, 0x8c, 0x67, 0x7c, 0xb2, 0x1e, 0x4f, 0xd0, 0x3e, - 0xdc, 0xfe, 0x3a, 0x0b, 0x18, 0x9e, 0x84, 0xa4, 0x4c, 0x2d, 0x65, 0x2b, 0xdb, 0xd4, 0x4a, 0x83, - 0x5f, 0xda, 0x7f, 0xb2, 0xa0, 0xab, 0x51, 0x53, 0x27, 0xac, 0x07, 0xd5, 0x93, 0x3c, 0xcb, 0xfc, - 0x51, 0xe7, 0xa2, 0x72, 0x59, 0x2e, 0x96, 0xf8, 0x6d, 0x8e, 0x7c, 0xcd, 0x44, 0x3e, 0x4f, 0xfa, - 0x9a, 0x91, 0x74, 0x0e, 0x0d, 0x9e, 0xb2, 0x33, 0x0d, 0x0d, 0x7f, 0xb6, 0x4f, 0xa1, 0x3f, 0x66, - 0x98, 0x05, 0x94, 0x05, 0x1e, 0xd5, 0xe9, 0x5c, 0x48, 0x9c, 0x75, 0x5d, 0xe2, 0x2a, 0x97, 0x25, - 0xae, 0x9a, 0x27, 0xce, 0xfe, 0xa7, 0x05, 0xc8, 0xdc, 0x49, 0x41, 0xf0, 0x16, 0xb6, 0xe2, 0x90, - 0xb1, 0x84, 0x71, 0xa2, 0xc2, 0x29, 0x85, 0x22, 0x06, 0x42, 0xc2, 0xd3, 0xc7, 0x4f, 0xc3, 0x94, - 0x12, 0x5f, 0x6a, 0x25, 0x2b, 0x68, 0x70, 0x81, 0x50, 0x96, 0x49, 0x45, 0x7d, 0x81, 0x54, 0xd8, - 0x0f, 0xa1, 0x35, 0x66, 0x49, 0x86, 0x4f, 0xc9, 0xcb, 0x79, 0xfa, 0x26, 0xd1, 0xab, 0xe8, 0x2a, - 0x05, 0x10, 0x23, 0x80, 0x83, 0x22, 0xfa, 0x55, 0x1d, 0xf8, 0xb7, 0x70, 0xbb, 0xb0, 0x78, 0x1a, - 0x50, 0xa6, 0xf3, 0xf2, 0x19, 0x6c, 0x07, 0xb1, 0x17, 0x4e, 0x7d, 0xe2, 0xc6, 0x7c, 0x00, 0x86, - 0x39, 0xaf, 0xb6, 0x04, 0x1d, 0xd9, 0x52, 0xda, 0x67, 0x42, 0xa9, 0xf9, 0xf5, 0xc7, 0x80, 0xf4, - 0x2a, 0xe2, 0xe5, 0x2b, 0x2a, 0x62, 0x45, 0x4f, 0x69, 0x0e, 0x3d, 0x65, 0x6d, 0xbf, 0x80, 0xed, - 0xc5, 0xcd, 0x55, 0xaa, 0x7e, 0x04, 0xad, 0x02, 0x76, 0xdd, 0x07, 0x6f, 0x1b, 0xed, 0xa7, 0x58, - 0xe7, 0x98, 0x96, 0xf6, 0xf7, 0xe1, 0xbd, 0x42, 0xf5, 0x58, 0x34, 0xfa, 0xab, 0x06, 0xd0, 0x10, - 0x06, 0xcb, 0xe6, 0x32, 0x06, 0xfb, 0xdf, 0x15, 0x68, 0x3f, 0x56, 0x95, 0xcb, 0x59, 0x80, 0x31, - 0xf7, 0x9b, 0x62, 0xee, 0xdf, 0x87, 0x76, 0xa9, 0x20, 0x25, 0xa1, 0x6c, 0x5d, 0x18, 0x17, 0xbd, - 0x55, 0x57, 0xc2, 0xaa, 0x30, 0x5b, 0xbc, 0x12, 0x3e, 0x80, 0xfe, 0x49, 0x46, 0xc8, 0xf2, 0xed, - 0xb1, 0xe6, 0x6c, 0x70, 0x85, 0x69, 0xbb, 0x07, 0x9b, 0xd8, 0x63, 0xc1, 0xc5, 0x82, 0xb5, 0x3c, - 0x5f, 0x7d, 0xa9, 0x32, 0xed, 0xbf, 0xc8, 0x03, 0x0d, 0xe2, 0x93, 0x84, 0x0e, 0xea, 0x6f, 0x7e, - 0xfb, 0x53, 0x5f, 0xc3, 0x35, 0x14, 0x3d, 0x87, 0xae, 0xbe, 0x45, 0x28, 0x4f, 0xeb, 0x37, 0xbe, - 0xa1, 0xb4, 0x49, 0xa1, 0xa2, 0xf6, 0xef, 0x2b, 0xd0, 0x70, 0xb0, 0x77, 0xfe, 0x6e, 0xe3, 0xfb, - 0x39, 0x6c, 0xe4, 0x3d, 0xbf, 0x04, 0xf1, 0x7b, 0x06, 0x30, 0xe6, 0x51, 0x72, 0x3a, 0xbe, 0xf1, - 0x46, 0xed, 0xff, 0x59, 0xd0, 0x7d, 0x9c, 0xcf, 0x95, 0x77, 0x1b, 0x8c, 0x7d, 0x00, 0x3e, 0x08, - 0x4b, 0x38, 0x98, 0xc4, 0x41, 0xa7, 0xdb, 0x69, 0x66, 0xea, 0x89, 0xda, 0x7f, 0xac, 0x40, 0xfb, - 0x65, 0x92, 0x26, 0x61, 0x72, 0x3a, 0x7f, 0xb7, 0xbf, 0xfe, 0x10, 0xfa, 0x06, 0x67, 0x28, 0x81, - 0x70, 0x67, 0xe1, 0x30, 0x14, 0xc9, 0x76, 0x36, 0xfc, 0xd2, 0x3b, 0xb5, 0x37, 0xa1, 0xaf, 0x78, - 0x71, 0xd1, 0x92, 0xed, 0xdf, 0x59, 0x80, 0x4c, 0xa9, 0xea, 0x95, 0x3f, 0x85, 0x0e, 0x53, 0xd8, - 0x89, 0xfd, 0xd4, 0xe5, 0xc0, 0x3c, 0x7b, 0x26, 0xb6, 0x4e, 0x9b, 0x99, 0x48, 0xff, 0x00, 0xb6, - 0x96, 0x6e, 0xf8, 0x9c, 0x90, 0x48, 0x84, 0xfb, 0x0b, 0x97, 0xfc, 0xe3, 0x89, 0xfd, 0x19, 0xdc, - 0x96, 0x24, 0x54, 0xf7, 0x71, 0xdd, 0x5f, 0x97, 0xd8, 0x64, 0xa7, 0x60, 0x93, 0xf6, 0x7f, 0x2d, - 0xd8, 0x5e, 0x5c, 0xa6, 0xe2, 0xbf, 0x6a, 0x1d, 0xc2, 0x80, 0x54, 0xbf, 0x31, 0x79, 0xb1, 0xa4, - 0xa3, 0x9f, 0x2e, 0xf1, 0xe2, 0x45, 0xdf, 0x7b, 0xba, 0x0f, 0x15, 0xd4, 0xb8, 0x47, 0xcb, 0x02, - 0x3a, 0xc4, 0xd0, 0x5f, 0x32, 0xe3, 0xb7, 0x0a, 0xbd, 0xaf, 0x8a, 0x69, 0x5d, 0x2d, 0xfc, 0x16, - 0xc4, 0xd8, 0xde, 0x81, 0xbb, 0x5f, 0x12, 0x76, 0x2c, 0x6c, 0x0e, 0x92, 0xf8, 0x24, 0x38, 0x9d, - 0x66, 0xd2, 0xa8, 0x48, 0xed, 0xbd, 0xcb, 0x2c, 0x14, 0x4c, 0x2b, 0x7e, 0x46, 0xb1, 0x6e, 0xfc, - 0x33, 0x4a, 0xe5, 0xaa, 0x9f, 0x51, 0xf6, 0xff, 0x55, 0x87, 0xf5, 0x31, 0xc1, 0xaf, 0x09, 0xf1, - 0xd1, 0x11, 0x74, 0xc6, 0x24, 0xf6, 0x8b, 0x1f, 0x48, 0xb7, 0x8c, 0x6f, 0xcc, 0xa5, 0xc3, 0xef, - 0xac, 0x92, 0xe6, 0x23, 0xf4, 0xd6, 0xae, 0xf5, 0x89, 0x85, 0x5e, 0x40, 0xa7, 0x74, 0x23, 0x44, - 0x3b, 0xc6, 0xa2, 0x55, 0x77, 0xc5, 0xe1, 0x9d, 0xa5, 0x81, 0xa2, 0x51, 0xcd, 0x5d, 0xb6, 0xcd, - 0x9b, 0x10, 0xba, 0x77, 0xe9, 0x15, 0x49, 0x3a, 0xdc, 0xb9, 0xe6, 0x0a, 0x65, 0xdf, 0x42, 0x9f, - 0x43, 0x5d, 0x52, 0x66, 0x34, 0x30, 0x8c, 0x4b, 0x77, 0x8f, 0x52, 0x5c, 0x65, 0x7e, 0x6d, 0xdf, - 0x42, 0x4f, 0x00, 0x0a, 0xd2, 0x89, 0x4c, 0x60, 0x96, 0x58, 0xef, 0xf0, 0xee, 0x25, 0xda, 0xdc, - 0xd9, 0xd7, 0xd0, 0x2d, 0x53, 0x23, 0x34, 0x5a, 0xc9, 0x7e, 0x8c, 0xfe, 0x30, 0xbc, 0x7f, 0x85, - 0x45, 0xee, 0xf8, 0x57, 0xd0, 0x5b, 0x64, 0x3c, 0xc8, 0x5e, 0xb9, 0xb0, 0xc4, 0x9e, 0x86, 0x1f, - 0x5c, 0x69, 0x63, 0x82, 0x50, 0xb4, 0xa8, 0x12, 0x08, 0x4b, 0xfd, 0xac, 0x04, 0xc2, 0x72, 0x5f, - 0x93, 0x20, 0x94, 0xeb, 0xba, 0x04, 0xc2, 0xca, 0x2e, 0x54, 0x02, 0x61, 0x75, 0x53, 0xb0, 0x6f, - 0xa1, 0x04, 0xb6, 0x57, 0x57, 0x1b, 0x32, 0x7f, 0x52, 0xb9, 0xb2, 0x64, 0x87, 0x1f, 0xbd, 0x81, - 0xa5, 0xde, 0x70, 0x52, 0x17, 0xff, 0x3e, 0x7c, 0xfa, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, - 0x05, 0x7a, 0xc2, 0x8d, 0x18, 0x00, 0x00, + // 2183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xf7, 0x92, 0x94, 0x48, 0x3e, 0x7e, 0x8f, 0x64, 0x85, 0x66, 0x62, 0x8b, 0xde, 0xa4, 0x88, + 0xec, 0xa6, 0x6a, 0xaa, 0x04, 0x68, 0xd0, 0x34, 0x08, 0x2c, 0x59, 0x71, 0x05, 0x5b, 0x8a, 0xbd, + 0x72, 0x1d, 0xa0, 0x40, 0xb1, 0x19, 0xee, 0x8e, 0xa4, 0x85, 0xf6, 0xab, 0xbb, 0x43, 0x59, 0x4c, + 0x2f, 0x05, 0x7a, 0xec, 0xa9, 0xe8, 0xa1, 0xff, 0x42, 0x2f, 0x3d, 0xb5, 0x97, 0x5e, 0x72, 0xe9, + 0x7f, 0xd4, 0x6b, 0x2e, 0xc5, 0x7c, 0xed, 0xce, 0x2e, 0x49, 0x29, 0x0a, 0x90, 0x83, 0x6f, 0xbb, + 0xef, 0xbd, 0x79, 0xf3, 0xf6, 0xf7, 0xe6, 0xbd, 0xf9, 0x3d, 0x12, 0xda, 0x01, 0x4e, 0x29, 0x49, + 0xb6, 0xe3, 0x24, 0xa2, 0x11, 0x6a, 0x8a, 0x37, 0x3b, 0x9e, 0x98, 0x7f, 0x59, 0x85, 0xe6, 0x6f, + 0x08, 0x4e, 0xe8, 0x84, 0x60, 0x8a, 0xba, 0x50, 0xf1, 0xe2, 0xa1, 0x31, 0x36, 0xb6, 0x9a, 0x56, + 0xc5, 0x8b, 0x11, 0x82, 0x5a, 0x1c, 0x25, 0x74, 0x58, 0x19, 0x1b, 0x5b, 0x1d, 0x8b, 0x3f, 0xa3, + 0xbb, 0x00, 0xf1, 0x74, 0xe2, 0x7b, 0x8e, 0x3d, 0x4d, 0xfc, 0x61, 0x95, 0xdb, 0x36, 0x85, 0xe4, + 0xb7, 0x89, 0x8f, 0xb6, 0xa0, 0x1f, 0xe0, 0x4b, 0xfb, 0x22, 0xf2, 0xa7, 0x01, 0xb1, 0x9d, 0x68, + 0x1a, 0xd2, 0x61, 0x8d, 0x2f, 0xef, 0x06, 0xf8, 0xf2, 0x15, 0x17, 0xef, 0x31, 0x29, 0x1a, 0xb3, + 0xa8, 0x2e, 0xed, 0x13, 0xcf, 0x27, 0xf6, 0x39, 0x99, 0x0d, 0x57, 0xc6, 0xc6, 0x56, 0xcd, 0x82, + 0x00, 0x5f, 0x7e, 0xe1, 0xf9, 0xe4, 0x29, 0x99, 0xa1, 0x4d, 0x68, 0xb9, 0x98, 0x62, 0xdb, 0x21, + 0x21, 0x25, 0xc9, 0x70, 0x95, 0xef, 0x05, 0x4c, 0xb4, 0xc7, 0x25, 0x2c, 0xbe, 0x04, 0x3b, 0xe7, + 0xc3, 0x3a, 0xd7, 0xf0, 0x67, 0x16, 0x1f, 0x76, 0x03, 0x2f, 0xb4, 0x79, 0xe4, 0x0d, 0xbe, 0x75, + 0x93, 0x4b, 0x9e, 0xb3, 0xf0, 0x3f, 0x83, 0xba, 0x88, 0x2d, 0x1d, 0x36, 0xc7, 0xd5, 0xad, 0xd6, + 0xce, 0xbb, 0xdb, 0x19, 0x1a, 0xdb, 0x22, 0xbc, 0x83, 0xf0, 0x24, 0x4a, 0x02, 0x4c, 0xbd, 0x28, + 0x3c, 0x24, 0x69, 0x8a, 0x4f, 0x89, 0xa5, 0xd6, 0xa0, 0x03, 0x68, 0x85, 0xe4, 0xb5, 0xad, 0x5c, + 0x00, 0x77, 0xb1, 0x35, 0xe7, 0xe2, 0xf8, 0x2c, 0x4a, 0xe8, 0x02, 0x3f, 0x10, 0x92, 0xd7, 0xaf, + 0xa4, 0xab, 0x17, 0xd0, 0x73, 0x89, 0x4f, 0x28, 0x71, 0x33, 0x77, 0xad, 0x1b, 0xba, 0xeb, 0x4a, + 0x07, 0xca, 0xe5, 0x7b, 0xd0, 0x3d, 0xc3, 0xa9, 0x1d, 0x46, 0x99, 0xc7, 0xf6, 0xd8, 0xd8, 0x6a, + 0x58, 0xed, 0x33, 0x9c, 0x1e, 0x45, 0xca, 0xea, 0x09, 0x34, 0x89, 0x63, 0xa7, 0x67, 0x38, 0x71, + 0xd3, 0x61, 0x9f, 0x6f, 0xf9, 0x70, 0x6e, 0xcb, 0x7d, 0xe7, 0x98, 0x19, 0x2c, 0xd8, 0xb4, 0x41, + 0x84, 0x2a, 0x45, 0x47, 0xd0, 0x61, 0x60, 0xe4, 0xce, 0x06, 0x37, 0x76, 0xc6, 0xd0, 0xdc, 0x57, + 0xfe, 0x5e, 0xc1, 0x40, 0x21, 0x92, 0xfb, 0x44, 0x37, 0xf6, 0xa9, 0x60, 0xcd, 0xfc, 0xbe, 0x0f, + 0x7d, 0x09, 0x4b, 0xee, 0x76, 0x8d, 0x03, 0xd3, 0xe1, 0xc0, 0x28, 0x43, 0xf3, 0x4f, 0x15, 0x18, + 0x64, 0xd5, 0x60, 0x91, 0x34, 0x8e, 0xc2, 0x94, 0xa0, 0x87, 0x30, 0x90, 0xc7, 0x39, 0xf5, 0xbe, + 0x21, 0xb6, 0xef, 0x05, 0x1e, 0xe5, 0x45, 0x52, 0xb3, 0x7a, 0x42, 0x71, 0xec, 0x7d, 0x43, 0x9e, + 0x31, 0x31, 0xda, 0x80, 0x55, 0x9f, 0x60, 0x97, 0x24, 0xbc, 0x66, 0x9a, 0x96, 0x7c, 0x43, 0xef, + 0x43, 0x2f, 0x20, 0x34, 0xf1, 0x9c, 0xd4, 0xc6, 0xae, 0x9b, 0x90, 0x34, 0x95, 0xa5, 0xd3, 0x95, + 0xe2, 0x47, 0x42, 0x8a, 0x3e, 0x81, 0xa1, 0x32, 0xf4, 0xd8, 0x19, 0xbf, 0xc0, 0xbe, 0x9d, 0x12, + 0x27, 0x0a, 0xdd, 0x54, 0xd6, 0xd1, 0x86, 0xd4, 0x1f, 0x48, 0xf5, 0xb1, 0xd0, 0xa2, 0xc7, 0xd0, + 0x4f, 0x69, 0x94, 0xe0, 0x53, 0x62, 0x4f, 0xb0, 0x73, 0x4e, 0xd8, 0x8a, 0x15, 0x0e, 0xde, 0x1d, + 0x0d, 0xbc, 0x63, 0x61, 0xb2, 0x2b, 0x2c, 0xac, 0x5e, 0x5a, 0x78, 0x4f, 0xcd, 0xef, 0xaa, 0x30, + 0x5c, 0x56, 0x06, 0xbc, 0x3f, 0xb8, 0xfc, 0xd3, 0x3b, 0x56, 0xc5, 0x73, 0x59, 0xfd, 0x31, 0x48, + 0xf8, 0xb7, 0xd6, 0x2c, 0xfe, 0x8c, 0xee, 0x01, 0x38, 0x91, 0xef, 0x13, 0x87, 0x2d, 0x94, 0x1f, + 0xa9, 0x49, 0x58, 0x7d, 0xf2, 0x92, 0xcf, 0x5b, 0x43, 0xcd, 0x6a, 0x32, 0x89, 0xe8, 0x0a, 0xf7, + 0xa1, 0x2d, 0xd2, 0x27, 0x0d, 0x44, 0x57, 0x68, 0x09, 0x99, 0x30, 0xf9, 0x00, 0x90, 0x3a, 0x26, + 0x93, 0x59, 0x66, 0xb8, 0xca, 0x0d, 0xfb, 0x52, 0xb3, 0x3b, 0x53, 0xd6, 0x6f, 0x43, 0x33, 0x21, + 0xd8, 0xb5, 0xa3, 0xd0, 0x9f, 0xf1, 0x46, 0xd1, 0xb0, 0x1a, 0x4c, 0xf0, 0x65, 0xe8, 0xcf, 0xd0, + 0x4f, 0x61, 0x90, 0x90, 0xd8, 0xf7, 0x1c, 0x6c, 0xc7, 0x3e, 0x76, 0x48, 0x40, 0x42, 0xd5, 0x33, + 0xfa, 0x52, 0xf1, 0x5c, 0xc9, 0xd1, 0x10, 0xea, 0x17, 0x24, 0x49, 0xd9, 0x67, 0x35, 0xb9, 0x89, + 0x7a, 0x45, 0x7d, 0xa8, 0x52, 0xea, 0x0f, 0x81, 0x4b, 0xd9, 0x23, 0x7a, 0x00, 0x7d, 0x27, 0x0a, + 0x62, 0xec, 0x50, 0x3b, 0x21, 0x17, 0x1e, 0x5f, 0xd4, 0xe2, 0xea, 0x9e, 0x94, 0x5b, 0x52, 0xcc, + 0x3e, 0x27, 0x88, 0x5c, 0xef, 0xc4, 0x23, 0xae, 0x8d, 0xa9, 0x4c, 0x36, 0x2f, 0xdc, 0xaa, 0xd5, + 0x57, 0x9a, 0x47, 0x54, 0xa4, 0x19, 0x6d, 0xc3, 0x5a, 0x42, 0x82, 0x88, 0x12, 0x5b, 0x25, 0x3b, + 0xc4, 0x01, 0x19, 0x76, 0x38, 0xce, 0x03, 0xa1, 0x92, 0x39, 0x3e, 0xc2, 0x01, 0x61, 0xde, 0x4b, + 0xf6, 0xac, 0xd7, 0x76, 0xb9, 0x79, 0xbf, 0x60, 0xfe, 0x94, 0xcc, 0xcc, 0x7f, 0x18, 0x70, 0xf7, + 0xca, 0x96, 0x33, 0x77, 0x04, 0xae, 0x4b, 0xf7, 0x8f, 0x85, 0xb0, 0x39, 0x85, 0xcd, 0x6b, 0x1a, + 0xc1, 0x35, 0xb1, 0x56, 0xe6, 0x62, 0x35, 0xa1, 0x43, 0x1c, 0xdb, 0x0b, 0x5d, 0x72, 0x69, 0x4f, + 0x3c, 0x2a, 0x4a, 0xb4, 0x63, 0xb5, 0x88, 0x73, 0xc0, 0x64, 0xbb, 0x1e, 0x4d, 0xcd, 0x6f, 0x0d, + 0xe8, 0x16, 0x6b, 0x88, 0x55, 0x01, 0x9d, 0xc5, 0x44, 0xde, 0x9b, 0xfc, 0x59, 0x6e, 0x5d, 0x91, + 0x37, 0xa9, 0x8b, 0x0e, 0x00, 0xe2, 0x24, 0x8a, 0x49, 0x42, 0x3d, 0xc2, 0xfc, 0xb2, 0xb2, 0x7c, + 0xb0, 0xb4, 0x2c, 0xb7, 0x9f, 0x67, 0xb6, 0xfb, 0x21, 0x4d, 0x66, 0x96, 0xb6, 0x78, 0xf4, 0x19, + 0xf4, 0x4a, 0x6a, 0x86, 0x0e, 0xcb, 0xaa, 0x08, 0x80, 0x3d, 0xa2, 0x75, 0x58, 0xb9, 0xc0, 0xfe, + 0x94, 0xc8, 0x10, 0xc4, 0xcb, 0xaf, 0x2a, 0x9f, 0x18, 0x66, 0x1d, 0x56, 0xf6, 0x83, 0x98, 0xce, + 0xd8, 0x97, 0xf4, 0x8e, 0xa7, 0x31, 0x49, 0x76, 0xfd, 0xc8, 0x39, 0xdf, 0xbf, 0xa4, 0x09, 0x46, + 0x5f, 0x42, 0x97, 0x24, 0x38, 0x9d, 0x26, 0xac, 0xaa, 0x5c, 0x2f, 0x3c, 0xe5, 0x3e, 0x8b, 0x57, + 0x52, 0x69, 0xcd, 0xf6, 0xbe, 0x58, 0xb0, 0xc7, 0xed, 0xad, 0x0e, 0xd1, 0x5f, 0x47, 0xbf, 0x83, + 0x4e, 0x41, 0xcf, 0xc0, 0x62, 0x17, 0xb8, 0xcc, 0x0a, 0x7f, 0x66, 0x4d, 0x33, 0xc6, 0x89, 0x47, + 0x67, 0x92, 0x68, 0xc8, 0x37, 0xd6, 0x2a, 0x64, 0xe3, 0xf5, 0x5c, 0x01, 0x5a, 0xc7, 0x6a, 0x0a, + 0xc9, 0x81, 0x9b, 0x9a, 0x4f, 0x60, 0xfd, 0x29, 0x21, 0xf1, 0x5e, 0x14, 0x86, 0xc4, 0xa1, 0xc4, + 0xb5, 0xc8, 0x1f, 0xa6, 0x24, 0xa5, 0x6c, 0x0b, 0x5e, 0x13, 0x32, 0x1f, 0xec, 0x99, 0x75, 0x81, + 0xd3, 0x24, 0x76, 0x6c, 0x8d, 0xce, 0x34, 0x98, 0x80, 0x71, 0x02, 0xf3, 0xef, 0x06, 0x74, 0xc5, + 0x59, 0x7a, 0x16, 0x39, 0xfc, 0x04, 0x31, 0x44, 0x19, 0xbd, 0x91, 0x88, 0x4e, 0x13, 0xbf, 0xc4, + 0x7b, 0x2a, 0x65, 0xde, 0x73, 0x07, 0x1a, 0x9c, 0x18, 0xe4, 0x91, 0xd6, 0xd9, 0x5d, 0xef, 0xb9, + 0x69, 0xde, 0xd2, 0x5c, 0xa1, 0xae, 0x71, 0x75, 0x4b, 0xdd, 0xdd, 0xcc, 0x24, 0xbf, 0x36, 0x56, + 0xf4, 0x6b, 0xc3, 0x7c, 0x09, 0x6b, 0xcf, 0xa2, 0xe8, 0x7c, 0x1a, 0x8b, 0xf0, 0xd4, 0x17, 0x16, + 0x81, 0x31, 0xc6, 0x55, 0x16, 0x4b, 0x06, 0xcc, 0x75, 0xe7, 0xdc, 0xfc, 0x9f, 0x01, 0xeb, 0x45, + 0xb7, 0xf2, 0xa6, 0xfb, 0x1a, 0xd6, 0x32, 0xbf, 0xb6, 0x2f, 0xb1, 0x10, 0x1b, 0xb4, 0x76, 0x3e, + 0xd4, 0xce, 0xc0, 0xa2, 0xd5, 0x8a, 0x3d, 0xb9, 0x0a, 0x44, 0x6b, 0x70, 0x51, 0x92, 0xa4, 0xa3, + 0x4b, 0xe8, 0x97, 0xcd, 0x58, 0x6e, 0xb2, 0x5d, 0x25, 0xe2, 0x0d, 0xb5, 0x12, 0xfd, 0x02, 0x9a, + 0x79, 0x20, 0x15, 0x1e, 0xc8, 0x5a, 0x21, 0x10, 0xb9, 0x57, 0x6e, 0xc5, 0xce, 0x3e, 0x49, 0x92, + 0x28, 0x91, 0xdd, 0x48, 0xbc, 0x98, 0x9f, 0x42, 0xe3, 0x07, 0x67, 0xd7, 0xfc, 0x57, 0x05, 0x3a, + 0x8f, 0xd2, 0xd4, 0x3b, 0x0d, 0x55, 0x0a, 0xd6, 0x61, 0x45, 0xdc, 0x3b, 0x82, 0x08, 0x88, 0x17, + 0x34, 0x86, 0x96, 0x6c, 0x6a, 0x1a, 0xf4, 0xba, 0xe8, 0xda, 0x7e, 0x29, 0x1b, 0x5d, 0x4d, 0x84, + 0xc6, 0xae, 0x92, 0x12, 0x0b, 0x5e, 0x59, 0xca, 0x82, 0x57, 0x35, 0x16, 0xfc, 0x36, 0x34, 0xf9, + 0xa2, 0x30, 0x72, 0x89, 0xa4, 0xc7, 0x0d, 0x26, 0x38, 0x8a, 0x5c, 0x82, 0x76, 0x60, 0x23, 0x20, + 0x41, 0x94, 0xcc, 0xec, 0x00, 0xc7, 0x36, 0x23, 0xe1, 0x9c, 0xd8, 0x04, 0x13, 0xd9, 0x98, 0x91, + 0xd0, 0x1e, 0xe2, 0xf8, 0x10, 0x5f, 0x32, 0x6e, 0x73, 0x38, 0x41, 0x3b, 0x70, 0xfb, 0xab, 0xc4, + 0xa3, 0x78, 0xe2, 0x93, 0x22, 0xb9, 0x17, 0x8d, 0x7a, 0x4d, 0x29, 0x35, 0x86, 0x6f, 0xfe, 0xcd, + 0x80, 0xae, 0x42, 0x4d, 0x9e, 0xb0, 0x3e, 0x54, 0x4f, 0xb2, 0x2c, 0xb3, 0x47, 0x95, 0x8b, 0xca, + 0xb2, 0x5c, 0xcc, 0x4d, 0x18, 0x19, 0xf2, 0x35, 0x1d, 0xf9, 0x2c, 0xe9, 0x2b, 0x5a, 0xd2, 0x19, + 0x34, 0x78, 0x4a, 0xcf, 0x14, 0x34, 0xec, 0xd9, 0x3c, 0x85, 0xc1, 0x31, 0xc5, 0xd4, 0x4b, 0xa9, + 0xe7, 0xa4, 0x2a, 0x9d, 0xa5, 0xc4, 0x19, 0xd7, 0x25, 0xae, 0xb2, 0x2c, 0x71, 0xd5, 0x2c, 0x71, + 0xe6, 0x7f, 0x0d, 0x40, 0xfa, 0x4e, 0x12, 0x82, 0x1f, 0x61, 0x2b, 0x06, 0x19, 0x8d, 0x28, 0xa3, + 0x8a, 0x8c, 0x8e, 0x49, 0x52, 0xc5, 0x25, 0x2c, 0x7d, 0xec, 0x34, 0x4c, 0x53, 0xe2, 0x0a, 0xad, + 0x60, 0x54, 0x0d, 0x26, 0xe0, 0xca, 0x22, 0x21, 0x5b, 0x2d, 0x11, 0x32, 0xf3, 0x11, 0xb4, 0xe4, + 0xe5, 0xf4, 0x92, 0x5d, 0x6c, 0xd7, 0x47, 0x2f, 0xa3, 0xab, 0xe4, 0x40, 0x8c, 0x01, 0xf6, 0xf2, + 0xe8, 0x17, 0xb4, 0x67, 0xf3, 0x8f, 0x70, 0x3b, 0xb7, 0x78, 0xe6, 0xa5, 0x54, 0xe5, 0xe5, 0x63, + 0xd8, 0xf0, 0x42, 0xc7, 0x9f, 0xba, 0xc4, 0x0e, 0xd9, 0xf5, 0xee, 0x67, 0x93, 0x8d, 0xc1, 0xa9, + 0xdc, 0xba, 0xd4, 0x1e, 0x71, 0xa5, 0x9a, 0x70, 0x3e, 0x00, 0xa4, 0x56, 0x11, 0x27, 0x5b, 0x51, + 0xe1, 0x2b, 0xfa, 0x52, 0xb3, 0xef, 0x48, 0x6b, 0xf3, 0x05, 0x6c, 0x94, 0x37, 0x97, 0xa9, 0xfa, + 0x25, 0xb4, 0x72, 0xd8, 0x55, 0x1f, 0xbc, 0xad, 0xb5, 0x9f, 0x7c, 0x9d, 0xa5, 0x5b, 0x9a, 0x3f, + 0x83, 0xb7, 0x72, 0xd5, 0x63, 0xde, 0xe8, 0xaf, 0xb8, 0x9d, 0xcc, 0x11, 0x0c, 0xe7, 0xcd, 0x45, + 0x0c, 0xe6, 0x5f, 0xab, 0xd0, 0x7e, 0x2c, 0x2b, 0x97, 0x71, 0x1c, 0x8d, 0xd5, 0x08, 0x6a, 0x71, + 0x1f, 0xda, 0x85, 0x82, 0x14, 0x64, 0xbc, 0x75, 0xa1, 0x8d, 0xda, 0x8b, 0x86, 0xf2, 0x2a, 0x37, + 0x2b, 0x0f, 0xe5, 0x0f, 0x61, 0x70, 0x92, 0x10, 0x32, 0x3f, 0xbf, 0xd7, 0xac, 0x1e, 0x53, 0xe8, + 0xb6, 0xdb, 0xb0, 0x86, 0x1d, 0xea, 0x5d, 0x94, 0xac, 0xc5, 0xf9, 0x1a, 0x08, 0x95, 0x6e, 0xff, + 0x45, 0x16, 0xa8, 0x17, 0x9e, 0x44, 0xe9, 0x70, 0xf5, 0xfb, 0xcf, 0xdf, 0xf2, 0x6b, 0x98, 0x26, + 0x45, 0xcf, 0xa1, 0xab, 0xe6, 0x38, 0xe9, 0xa9, 0x7e, 0xe3, 0x19, 0xb1, 0x4d, 0x72, 0x55, 0xaa, + 0x91, 0xea, 0xc2, 0x97, 0x34, 0xc4, 0x97, 0x08, 0x95, 0xde, 0xd8, 0xfe, 0x5d, 0x81, 0x86, 0x85, + 0x9d, 0xf3, 0x37, 0x3b, 0x1f, 0x9f, 0x43, 0x2f, 0xbb, 0x23, 0x0a, 0x29, 0x79, 0x4b, 0x03, 0x52, + 0x3f, 0x7a, 0x56, 0xc7, 0xd5, 0xde, 0x96, 0xc2, 0x56, 0x5f, 0x06, 0xdb, 0x3f, 0x2b, 0xd0, 0x7d, + 0x9c, 0xdd, 0x5b, 0x6f, 0x36, 0x78, 0x3b, 0x00, 0xec, 0xa2, 0x2d, 0xe0, 0xa6, 0x13, 0x13, 0x75, + 0x3c, 0xac, 0x66, 0x22, 0x9f, 0x6e, 0x8e, 0xd7, 0xb7, 0x15, 0x68, 0xbf, 0x8c, 0xe2, 0xc8, 0x8f, + 0x4e, 0x67, 0x6f, 0x36, 0x5a, 0xfb, 0x30, 0xd0, 0x38, 0x4c, 0x01, 0xb4, 0x3b, 0xa5, 0xc3, 0x96, + 0x1f, 0x0e, 0xab, 0xe7, 0x16, 0xde, 0x6f, 0x0e, 0xe0, 0x1a, 0x0c, 0x24, 0xaf, 0xcf, 0xaf, 0x14, + 0xf3, 0xcf, 0x06, 0x20, 0x5d, 0x2a, 0x7b, 0xfd, 0xaf, 0xa1, 0x43, 0x25, 0xd6, 0x3c, 0x3e, 0x39, + 0xf9, 0xe8, 0xb5, 0xa0, 0xe7, 0xc2, 0x6a, 0x53, 0x3d, 0x33, 0x3f, 0x87, 0xf5, 0xb9, 0xdf, 0x88, + 0x18, 0xa1, 0x12, 0x19, 0x19, 0x94, 0x7e, 0x26, 0x3a, 0x9c, 0x98, 0x1f, 0xc3, 0x6d, 0x41, 0xa2, + 0xd5, 0x3d, 0xa4, 0xee, 0x87, 0x39, 0x36, 0xdc, 0xc9, 0xd9, 0xb0, 0xf9, 0x9d, 0x01, 0x1b, 0xe5, + 0x65, 0x32, 0xfe, 0xab, 0xd6, 0x21, 0x0c, 0x48, 0xf6, 0x4b, 0x9d, 0xd7, 0x0b, 0x3a, 0xfd, 0xd1, + 0x1c, 0xaf, 0x2f, 0xfb, 0xde, 0x56, 0x7d, 0x34, 0xa7, 0xf6, 0xfd, 0xb4, 0x28, 0x48, 0x47, 0x18, + 0x06, 0x73, 0x66, 0x6c, 0x2a, 0x52, 0xfb, 0xca, 0x98, 0xea, 0x72, 0xe1, 0x0f, 0x20, 0xf6, 0xe6, + 0x26, 0xdc, 0x7d, 0x42, 0xe8, 0x21, 0xb7, 0xd9, 0x8b, 0xc2, 0x13, 0xef, 0x74, 0x9a, 0x08, 0xa3, + 0x3c, 0xb5, 0xf7, 0x96, 0x59, 0x48, 0x98, 0x16, 0xfc, 0x10, 0x67, 0xdc, 0xf8, 0x87, 0xb8, 0xca, + 0x55, 0x3f, 0xc4, 0x99, 0x9f, 0xc2, 0x90, 0x9d, 0x2c, 0x19, 0x85, 0xef, 0x91, 0x90, 0x66, 0x3c, + 0x73, 0x13, 0x5a, 0x0e, 0x97, 0xd8, 0xda, 0x4f, 0x06, 0x20, 0x44, 0x8c, 0x5f, 0x99, 0xbb, 0x70, + 0x67, 0xc1, 0x62, 0x19, 0xfc, 0x4f, 0xa0, 0xcb, 0xa7, 0x58, 0x19, 0x39, 0x51, 0xb3, 0x5f, 0x87, + 0x49, 0x1f, 0x29, 0xe1, 0xce, 0x7f, 0xea, 0x50, 0x3f, 0x26, 0xf8, 0x35, 0x21, 0x2e, 0x3a, 0x80, + 0xce, 0x31, 0x09, 0xdd, 0xfc, 0x37, 0xfe, 0x75, 0x0d, 0xe4, 0x4c, 0x3a, 0x7a, 0x67, 0x91, 0x34, + 0xe3, 0x20, 0xb7, 0xb6, 0x8c, 0x0f, 0x0d, 0xf4, 0x02, 0x3a, 0x85, 0x79, 0x1b, 0x6d, 0x6a, 0x8b, + 0x16, 0x4d, 0xe2, 0xa3, 0x3b, 0x73, 0x37, 0xb2, 0x4a, 0x6b, 0xe6, 0xb2, 0xad, 0x8f, 0x92, 0xe8, + 0xde, 0xd2, 0x19, 0x53, 0x38, 0xdc, 0xbc, 0x66, 0x06, 0x35, 0x6f, 0xa1, 0xcf, 0x61, 0x55, 0xcc, + 0x1c, 0x68, 0xa8, 0x19, 0x17, 0x86, 0xb7, 0x42, 0x5c, 0xc5, 0x01, 0xc5, 0xbc, 0x85, 0x9e, 0x02, + 0xe4, 0xac, 0x1d, 0xbd, 0x53, 0xf8, 0x91, 0xa6, 0x34, 0x36, 0x8c, 0xee, 0x2e, 0xd1, 0x66, 0xce, + 0xbe, 0x82, 0x6e, 0x91, 0x5b, 0xa2, 0xf1, 0x42, 0xfa, 0xa8, 0x35, 0xa8, 0xd1, 0xfd, 0x2b, 0x2c, + 0x32, 0xc7, 0xbf, 0x87, 0x7e, 0x99, 0x32, 0x22, 0x73, 0xe1, 0xc2, 0x02, 0xfd, 0x1c, 0xbd, 0x7b, + 0xa5, 0x8d, 0x0e, 0x42, 0xde, 0x23, 0x0b, 0x20, 0xcc, 0x35, 0xd4, 0x02, 0x08, 0xf3, 0x8d, 0x55, + 0x80, 0x50, 0x6c, 0x2c, 0x05, 0x10, 0x16, 0xb6, 0xc1, 0x02, 0x08, 0x8b, 0xbb, 0x92, 0x79, 0x0b, + 0x45, 0xb0, 0xb1, 0xb8, 0xdc, 0x91, 0xfe, 0x83, 0xd5, 0x95, 0x3d, 0x63, 0xf4, 0xe0, 0x7b, 0x58, + 0x66, 0x1b, 0x7e, 0x0d, 0x83, 0xb9, 0xea, 0x44, 0x3a, 0xa4, 0xcb, 0x0a, 0x7f, 0xf4, 0xde, 0xd5, + 0x46, 0x6a, 0x87, 0xc9, 0x2a, 0xff, 0x8b, 0xee, 0xa3, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x6a, + 0xee, 0xfc, 0x90, 0xb2, 0x1b, 0x00, 0x00, } diff --git a/weed/pb/proto_read_write_test.go b/weed/pb/proto_read_write_test.go new file mode 100644 index 000000000..7f6444ab5 --- /dev/null +++ b/weed/pb/proto_read_write_test.go @@ -0,0 +1,43 @@ +package pb + +import ( + "fmt" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/golang/protobuf/jsonpb" +) + +func TestJsonpMarshalUnmarshal(t *testing.T) { + + tv := &volume_server_pb.RemoteFile{ + BackendType: "aws", + BackendId: "", + FileSize: 12, + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + if text, err := m.MarshalToString(tv); err != nil { + fmt.Printf("marshal eror: %v\n", err) + } else { + fmt.Printf("marshalled: %s\n", text) + } + + rawJson := `{ + "backendType":"aws", + "backendId":"temp", + "FileSize":12 + }` + + tv1 := &volume_server_pb.RemoteFile{} + if err := jsonpb.UnmarshalString(rawJson, tv1); err != nil { + fmt.Printf("unmarshal error: %v\n", err) + } + + fmt.Printf("unmarshalled: %+v\n", tv1) + +} diff --git a/weed/pb/queue.proto b/weed/pb/queue.proto new file mode 100644 index 000000000..39b6ee05a --- /dev/null +++ b/weed/pb/queue.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +package queue_pb; + +option java_package = "seaweedfs.client"; +option java_outer_classname = "QueueProto"; + +////////////////////////////////////////////////// + +service SeaweedQueue { + + rpc StreamWrite (stream WriteMessageRequest) returns (stream WriteMessageResponse) { + } + + rpc StreamRead (ReadMessageRequest) returns (stream ReadMessageResponse) { + } + + rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { + } + + rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) { + } + +} + +////////////////////////////////////////////////// + + +message WriteMessageRequest { + string topic = 1; + int64 event_ns = 2; + bytes partition_key = 3; + bytes data = 4; +} + +message WriteMessageResponse { + string error = 1; + int64 ack_ns = 2; +} + +message ReadMessageRequest { + string topic = 1; + int64 start_ns = 2; +} + +message ReadMessageResponse { + string error = 1; + int64 event_ns = 2; + bytes data = 3; +} + +message ConfigureTopicRequest { + string topic = 1; + int64 ttl_seconds = 2; + int32 partition_count = 3; +} +message ConfigureTopicResponse { + string error = 1; +} + +message DeleteTopicRequest { + string topic = 1; +} +message DeleteTopicResponse { + string error = 1; +} diff --git a/weed/pb/queue_pb/queue.pb.go b/weed/pb/queue_pb/queue.pb.go new file mode 100644 index 000000000..8ec4d62aa --- /dev/null +++ b/weed/pb/queue_pb/queue.pb.go @@ -0,0 +1,516 @@ +// Code generated by protoc-gen-go. +// source: queue.proto +// DO NOT EDIT! + +/* +Package queue_pb is a generated protocol buffer package. + +It is generated from these files: + queue.proto + +It has these top-level messages: + WriteMessageRequest + WriteMessageResponse + ReadMessageRequest + ReadMessageResponse + ConfigureTopicRequest + ConfigureTopicResponse + DeleteTopicRequest + DeleteTopicResponse +*/ +package queue_pb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type WriteMessageRequest struct { + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"` + PartitionKey []byte `protobuf:"bytes,3,opt,name=partition_key,json=partitionKey,proto3" json:"partition_key,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *WriteMessageRequest) Reset() { *m = WriteMessageRequest{} } +func (m *WriteMessageRequest) String() string { return proto.CompactTextString(m) } +func (*WriteMessageRequest) ProtoMessage() {} +func (*WriteMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *WriteMessageRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *WriteMessageRequest) GetEventNs() int64 { + if m != nil { + return m.EventNs + } + return 0 +} + +func (m *WriteMessageRequest) GetPartitionKey() []byte { + if m != nil { + return m.PartitionKey + } + return nil +} + +func (m *WriteMessageRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type WriteMessageResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + AckNs int64 `protobuf:"varint,2,opt,name=ack_ns,json=ackNs" json:"ack_ns,omitempty"` +} + +func (m *WriteMessageResponse) Reset() { *m = WriteMessageResponse{} } +func (m *WriteMessageResponse) String() string { return proto.CompactTextString(m) } +func (*WriteMessageResponse) ProtoMessage() {} +func (*WriteMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *WriteMessageResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *WriteMessageResponse) GetAckNs() int64 { + if m != nil { + return m.AckNs + } + return 0 +} + +type ReadMessageRequest struct { + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + StartNs int64 `protobuf:"varint,2,opt,name=start_ns,json=startNs" json:"start_ns,omitempty"` +} + +func (m *ReadMessageRequest) Reset() { *m = ReadMessageRequest{} } +func (m *ReadMessageRequest) String() string { return proto.CompactTextString(m) } +func (*ReadMessageRequest) ProtoMessage() {} +func (*ReadMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ReadMessageRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *ReadMessageRequest) GetStartNs() int64 { + if m != nil { + return m.StartNs + } + return 0 +} + +type ReadMessageResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *ReadMessageResponse) Reset() { *m = ReadMessageResponse{} } +func (m *ReadMessageResponse) String() string { return proto.CompactTextString(m) } +func (*ReadMessageResponse) ProtoMessage() {} +func (*ReadMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ReadMessageResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *ReadMessageResponse) GetEventNs() int64 { + if m != nil { + return m.EventNs + } + return 0 +} + +func (m *ReadMessageResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type ConfigureTopicRequest struct { + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + TtlSeconds int64 `protobuf:"varint,2,opt,name=ttl_seconds,json=ttlSeconds" json:"ttl_seconds,omitempty"` + PartitionCount int32 `protobuf:"varint,3,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"` +} + +func (m *ConfigureTopicRequest) Reset() { *m = ConfigureTopicRequest{} } +func (m *ConfigureTopicRequest) String() string { return proto.CompactTextString(m) } +func (*ConfigureTopicRequest) ProtoMessage() {} +func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ConfigureTopicRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *ConfigureTopicRequest) GetTtlSeconds() int64 { + if m != nil { + return m.TtlSeconds + } + return 0 +} + +func (m *ConfigureTopicRequest) GetPartitionCount() int32 { + if m != nil { + return m.PartitionCount + } + return 0 +} + +type ConfigureTopicResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` +} + +func (m *ConfigureTopicResponse) Reset() { *m = ConfigureTopicResponse{} } +func (m *ConfigureTopicResponse) String() string { return proto.CompactTextString(m) } +func (*ConfigureTopicResponse) ProtoMessage() {} +func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ConfigureTopicResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type DeleteTopicRequest struct { + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` +} + +func (m *DeleteTopicRequest) Reset() { *m = DeleteTopicRequest{} } +func (m *DeleteTopicRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTopicRequest) ProtoMessage() {} +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DeleteTopicRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +type DeleteTopicResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` +} + +func (m *DeleteTopicResponse) Reset() { *m = DeleteTopicResponse{} } +func (m *DeleteTopicResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteTopicResponse) ProtoMessage() {} +func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *DeleteTopicResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterType((*WriteMessageRequest)(nil), "queue_pb.WriteMessageRequest") + proto.RegisterType((*WriteMessageResponse)(nil), "queue_pb.WriteMessageResponse") + proto.RegisterType((*ReadMessageRequest)(nil), "queue_pb.ReadMessageRequest") + proto.RegisterType((*ReadMessageResponse)(nil), "queue_pb.ReadMessageResponse") + proto.RegisterType((*ConfigureTopicRequest)(nil), "queue_pb.ConfigureTopicRequest") + proto.RegisterType((*ConfigureTopicResponse)(nil), "queue_pb.ConfigureTopicResponse") + proto.RegisterType((*DeleteTopicRequest)(nil), "queue_pb.DeleteTopicRequest") + proto.RegisterType((*DeleteTopicResponse)(nil), "queue_pb.DeleteTopicResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for SeaweedQueue service + +type SeaweedQueueClient interface { + StreamWrite(ctx context.Context, opts ...grpc.CallOption) (SeaweedQueue_StreamWriteClient, error) + StreamRead(ctx context.Context, in *ReadMessageRequest, opts ...grpc.CallOption) (SeaweedQueue_StreamReadClient, error) + ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) +} + +type seaweedQueueClient struct { + cc *grpc.ClientConn +} + +func NewSeaweedQueueClient(cc *grpc.ClientConn) SeaweedQueueClient { + return &seaweedQueueClient{cc} +} + +func (c *seaweedQueueClient) StreamWrite(ctx context.Context, opts ...grpc.CallOption) (SeaweedQueue_StreamWriteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SeaweedQueue_serviceDesc.Streams[0], c.cc, "/queue_pb.SeaweedQueue/StreamWrite", opts...) + if err != nil { + return nil, err + } + x := &seaweedQueueStreamWriteClient{stream} + return x, nil +} + +type SeaweedQueue_StreamWriteClient interface { + Send(*WriteMessageRequest) error + Recv() (*WriteMessageResponse, error) + grpc.ClientStream +} + +type seaweedQueueStreamWriteClient struct { + grpc.ClientStream +} + +func (x *seaweedQueueStreamWriteClient) Send(m *WriteMessageRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedQueueStreamWriteClient) Recv() (*WriteMessageResponse, error) { + m := new(WriteMessageResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedQueueClient) StreamRead(ctx context.Context, in *ReadMessageRequest, opts ...grpc.CallOption) (SeaweedQueue_StreamReadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SeaweedQueue_serviceDesc.Streams[1], c.cc, "/queue_pb.SeaweedQueue/StreamRead", opts...) + if err != nil { + return nil, err + } + x := &seaweedQueueStreamReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedQueue_StreamReadClient interface { + Recv() (*ReadMessageResponse, error) + grpc.ClientStream +} + +type seaweedQueueStreamReadClient struct { + grpc.ClientStream +} + +func (x *seaweedQueueStreamReadClient) Recv() (*ReadMessageResponse, error) { + m := new(ReadMessageResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedQueueClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { + out := new(ConfigureTopicResponse) + err := grpc.Invoke(ctx, "/queue_pb.SeaweedQueue/ConfigureTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedQueueClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) { + out := new(DeleteTopicResponse) + err := grpc.Invoke(ctx, "/queue_pb.SeaweedQueue/DeleteTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for SeaweedQueue service + +type SeaweedQueueServer interface { + StreamWrite(SeaweedQueue_StreamWriteServer) error + StreamRead(*ReadMessageRequest, SeaweedQueue_StreamReadServer) error + ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) + DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) +} + +func RegisterSeaweedQueueServer(s *grpc.Server, srv SeaweedQueueServer) { + s.RegisterService(&_SeaweedQueue_serviceDesc, srv) +} + +func _SeaweedQueue_StreamWrite_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedQueueServer).StreamWrite(&seaweedQueueStreamWriteServer{stream}) +} + +type SeaweedQueue_StreamWriteServer interface { + Send(*WriteMessageResponse) error + Recv() (*WriteMessageRequest, error) + grpc.ServerStream +} + +type seaweedQueueStreamWriteServer struct { + grpc.ServerStream +} + +func (x *seaweedQueueStreamWriteServer) Send(m *WriteMessageResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedQueueStreamWriteServer) Recv() (*WriteMessageRequest, error) { + m := new(WriteMessageRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedQueue_StreamRead_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadMessageRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SeaweedQueueServer).StreamRead(m, &seaweedQueueStreamReadServer{stream}) +} + +type SeaweedQueue_StreamReadServer interface { + Send(*ReadMessageResponse) error + grpc.ServerStream +} + +type seaweedQueueStreamReadServer struct { + grpc.ServerStream +} + +func (x *seaweedQueueStreamReadServer) Send(m *ReadMessageResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedQueue_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigureTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedQueueServer).ConfigureTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queue_pb.SeaweedQueue/ConfigureTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedQueueServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedQueue_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedQueueServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queue_pb.SeaweedQueue/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedQueueServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SeaweedQueue_serviceDesc = grpc.ServiceDesc{ + ServiceName: "queue_pb.SeaweedQueue", + HandlerType: (*SeaweedQueueServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ConfigureTopic", + Handler: _SeaweedQueue_ConfigureTopic_Handler, + }, + { + MethodName: "DeleteTopic", + Handler: _SeaweedQueue_DeleteTopic_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamWrite", + Handler: _SeaweedQueue_StreamWrite_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamRead", + Handler: _SeaweedQueue_StreamRead_Handler, + ServerStreams: true, + }, + }, + Metadata: "queue.proto", +} + +func init() { proto.RegisterFile("queue.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 429 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xae, 0x9b, 0xa6, 0x94, 0x49, 0x28, 0x68, 0xd2, 0xa2, 0x10, 0xd1, 0x36, 0x5a, 0x0e, 0x44, + 0x20, 0x59, 0x15, 0xbc, 0x41, 0x03, 0x27, 0x68, 0x04, 0x0e, 0x08, 0x89, 0x8b, 0xb5, 0xb5, 0xa7, + 0x95, 0x15, 0xb3, 0xeb, 0xee, 0x8e, 0xa9, 0x7a, 0xe2, 0x2d, 0x79, 0x1e, 0xe4, 0xb5, 0x5c, 0xdb, + 0x34, 0xb1, 0x7a, 0xf3, 0xcc, 0x78, 0xe7, 0xfb, 0xd9, 0x6f, 0x61, 0x70, 0x9d, 0x53, 0x4e, 0x7e, + 0x66, 0x34, 0x6b, 0xdc, 0x73, 0x45, 0x98, 0x5d, 0x88, 0x3f, 0x30, 0xfa, 0x61, 0x12, 0xa6, 0x73, + 0xb2, 0x56, 0x5e, 0x51, 0x40, 0xd7, 0x39, 0x59, 0xc6, 0x03, 0xe8, 0xb3, 0xce, 0x92, 0x68, 0xec, + 0x4d, 0xbd, 0xd9, 0xe3, 0xa0, 0x2c, 0xf0, 0x05, 0xec, 0xd1, 0x6f, 0x52, 0x1c, 0x2a, 0x3b, 0xde, + 0x9e, 0x7a, 0xb3, 0x5e, 0xf0, 0xc8, 0xd5, 0x0b, 0x8b, 0xaf, 0xe0, 0x49, 0x26, 0x0d, 0x27, 0x9c, + 0x68, 0x15, 0xae, 0xe8, 0x76, 0xdc, 0x9b, 0x7a, 0xb3, 0x61, 0x30, 0xbc, 0x6b, 0x7e, 0xa2, 0x5b, + 0x44, 0xd8, 0x89, 0x25, 0xcb, 0xf1, 0x8e, 0x9b, 0xb9, 0x6f, 0x31, 0x87, 0x83, 0x36, 0x01, 0x9b, + 0x69, 0x65, 0xa9, 0x60, 0x40, 0xc6, 0x68, 0x53, 0x31, 0x70, 0x05, 0x1e, 0xc2, 0xae, 0x8c, 0x56, + 0x35, 0x7e, 0x5f, 0x46, 0xab, 0x85, 0x15, 0x1f, 0x01, 0x03, 0x92, 0xf1, 0x43, 0x45, 0x58, 0x96, + 0xa6, 0x29, 0xc2, 0xd5, 0x0b, 0x2b, 0x7e, 0xc2, 0xa8, 0xb5, 0xa6, 0x93, 0x4a, 0x87, 0x19, 0x95, + 0xce, 0x5e, 0x43, 0xe7, 0x0d, 0x1c, 0xce, 0xb5, 0xba, 0x4c, 0xae, 0x72, 0x43, 0xdf, 0x0a, 0x22, + 0xdd, 0x2c, 0x4f, 0x60, 0xc0, 0x9c, 0x86, 0x96, 0x22, 0xad, 0xe2, 0x0a, 0x00, 0x98, 0xd3, 0x65, + 0xd9, 0xc1, 0xd7, 0xf0, 0xb4, 0x36, 0x3c, 0xd2, 0xb9, 0x62, 0x07, 0xd7, 0x0f, 0xf6, 0xef, 0xda, + 0xf3, 0xa2, 0x2b, 0x7c, 0x78, 0xfe, 0x3f, 0x70, 0x97, 0x2e, 0xf1, 0x06, 0xf0, 0x03, 0xa5, 0xc4, + 0x0f, 0x60, 0x29, 0xde, 0xc2, 0xa8, 0xf5, 0x6f, 0xd7, 0xe2, 0x77, 0x7f, 0xb7, 0x61, 0xb8, 0x24, + 0x79, 0x43, 0x14, 0x7f, 0x2d, 0xe2, 0x87, 0x01, 0x0c, 0x96, 0x6c, 0x48, 0xfe, 0x72, 0x01, 0xc0, + 0x23, 0xbf, 0x4a, 0xa5, 0xbf, 0x26, 0x92, 0x93, 0xe3, 0x4d, 0xe3, 0x12, 0x54, 0x6c, 0xcd, 0xbc, + 0x53, 0x0f, 0xcf, 0x01, 0xca, 0x9d, 0xc5, 0x45, 0xe2, 0xcb, 0xfa, 0xcc, 0xfd, 0x7c, 0x4c, 0x8e, + 0x36, 0x4c, 0xab, 0x85, 0xa7, 0x1e, 0x7e, 0x87, 0xfd, 0xb6, 0x79, 0x78, 0x52, 0x1f, 0x5a, 0x7b, + 0x9f, 0x93, 0xe9, 0xe6, 0x1f, 0xaa, 0xc5, 0xf8, 0x19, 0x06, 0x0d, 0xdf, 0x9a, 0x34, 0xef, 0x5b, + 0xdf, 0xa4, 0xb9, 0xc6, 0x6c, 0xb1, 0x75, 0x76, 0x0c, 0xcf, 0x6c, 0xe9, 0xeb, 0xa5, 0xf5, 0xa3, + 0x34, 0x21, 0xc5, 0x67, 0xe0, 0x2c, 0xfe, 0x52, 0xbc, 0xf6, 0x8b, 0x5d, 0xf7, 0xe8, 0xdf, 0xff, + 0x0b, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x3e, 0x14, 0xd8, 0x03, 0x04, 0x00, 0x00, +} diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go new file mode 100644 index 000000000..c4f733f5c --- /dev/null +++ b/weed/pb/volume_info.go @@ -0,0 +1,76 @@ +package pb + +import ( + "bytes" + "fmt" + "io/ioutil" + + _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" + "github.com/chrislusf/seaweedfs/weed/util" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" +) + +// MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil +func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, error) { + + volumeInfo := &volume_server_pb.VolumeInfo{} + + glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) + if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { + if !exists { + return volumeInfo, false, nil + } + if !canRead { + glog.Warningf("can not read %s", fileName) + return volumeInfo, false, fmt.Errorf("can not read %s", fileName) + } + return volumeInfo, false, nil + } + + glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) + tierData, readErr := ioutil.ReadFile(fileName) + if readErr != nil { + glog.Warningf("fail to read %s : %v", fileName, readErr) + return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr) + } + + glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) + if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { + glog.Warningf("unmarshal error: %v", err) + return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err) + } + + if len(volumeInfo.GetFiles()) == 0 { + return volumeInfo, false, nil + } + + return volumeInfo, true, nil +} + +func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error { + + if exists, _, canWrite, _, _ := util.CheckFile(fileName); exists && !canWrite { + return fmt.Errorf("%s not writable", fileName) + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, marshalErr := m.MarshalToString(volumeInfo) + if marshalErr != nil { + return fmt.Errorf("marshal to %s: %v", fileName, marshalErr) + } + + writeErr := ioutil.WriteFile(fileName, []byte(text), 0755) + if writeErr != nil { + return fmt.Errorf("fail to write %s : %v", fileName, writeErr) + } + + return nil +} diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 3a5874c02..ce53fdc96 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -8,6 +8,10 @@ service VolumeServer { //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) { } + + rpc FileGet (FileGetRequest) returns (stream FileGetResponse) { + } + rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) { } rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) { @@ -35,6 +39,8 @@ service VolumeServer { } rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { } + rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) { + } // copy the .idx .dat files, and mount this volume rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) { @@ -66,8 +72,19 @@ service VolumeServer { } rpc VolumeEcBlobDelete (VolumeEcBlobDeleteRequest) returns (VolumeEcBlobDeleteResponse) { } + rpc VolumeEcShardsToVolume (VolumeEcShardsToVolumeRequest) returns (VolumeEcShardsToVolumeResponse) { + } - // query + // tiered storage + rpc VolumeTierMoveDatToRemote (VolumeTierMoveDatToRemoteRequest) returns (stream VolumeTierMoveDatToRemoteResponse) { + } + rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) { + } + + rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) { + } + + // query rpc Query (QueryRequest) returns (stream QueriedStripe) { } @@ -90,6 +107,22 @@ message DeleteResult { uint32 version = 5; } +message FileGetRequest { + string file_id = 1; + bool accept_gzip = 2; +} +message FileGetResponse { + bytes data = 1; + uint32 content_length = 2; + string content_type = 3; + uint64 last_modified = 4; + string filename = 5; + string etag = 6; + bool is_gzipped = 7; + map headers = 8; + int32 errorCode = 9; +} + message Empty { } @@ -181,6 +214,14 @@ message VolumeMarkReadonlyRequest { message VolumeMarkReadonlyResponse { } +message VolumeConfigureRequest { + uint32 volume_id = 1; + string replication = 2; +} +message VolumeConfigureResponse { + string error = 1; +} + message VolumeCopyRequest { uint32 volume_id = 1; string collection = 2; @@ -199,6 +240,7 @@ message CopyFileRequest { uint64 stop_offset = 4; string collection = 5; bool is_ec_volume = 6; + bool ignore_source_file_not_found = 7; } message CopyFileResponse { bytes file_content = 1; @@ -245,6 +287,8 @@ message VolumeEcShardsCopyRequest { repeated uint32 shard_ids = 3; bool copy_ecx_file = 4; string source_data_node = 5; + bool copy_ecj_file = 6; + bool copy_vif_file = 7; } message VolumeEcShardsCopyResponse { } @@ -293,6 +337,13 @@ message VolumeEcBlobDeleteRequest { message VolumeEcBlobDeleteResponse { } +message VolumeEcShardsToVolumeRequest { + uint32 volume_id = 1; + string collection = 2; +} +message VolumeEcShardsToVolumeResponse { +} + message ReadVolumeFileStatusRequest { uint32 volume_id = 1; } @@ -312,6 +363,8 @@ message DiskStatus { uint64 all = 2; uint64 used = 3; uint64 free = 4; + float percent_free = 5; + float percent_used = 6; } message MemStatus { @@ -324,6 +377,52 @@ message MemStatus { uint64 stack = 7; } +// tired storage on volume servers +message RemoteFile { + string backend_type = 1; + string backend_id = 2; + string key = 3; + uint64 offset = 4; + uint64 file_size = 5; + uint64 modified_time = 6; + string extension = 7; +} +message VolumeInfo { + repeated RemoteFile files = 1; + uint32 version = 2; + string replication = 3; +} + +message VolumeTierMoveDatToRemoteRequest { + uint32 volume_id = 1; + string collection = 2; + string destination_backend_name = 3; + bool keep_local_dat_file = 4; +} +message VolumeTierMoveDatToRemoteResponse { + int64 processed = 1; + float processedPercentage = 2; +} + +message VolumeTierMoveDatFromRemoteRequest { + uint32 volume_id = 1; + string collection = 2; + bool keep_remote_dat_file = 3; +} +message VolumeTierMoveDatFromRemoteResponse { + int64 processed = 1; + float processedPercentage = 2; +} + +message VolumeServerStatusRequest { + +} +message VolumeServerStatusResponse { + repeated DiskStatus disk_statuses = 1; + MemStatus memory_status = 2; +} + +// select on volume servers message QueryRequest { repeated string selections = 1; repeated string from_file_ids = 2; @@ -338,17 +437,17 @@ message QueryRequest { // NONE | GZIP | BZIP2 string compression_type = 1; message CSVInput { - string file_header_info = 1; // Valid values: NONE | USE | IGNORE - string record_delimiter = 2; // Default: \n - string field_delimiter = 3; // Default: , - string quote_charactoer = 4; // Default: " + string file_header_info = 1; // Valid values: NONE | USE | IGNORE + string record_delimiter = 2; // Default: \n + string field_delimiter = 3; // Default: , + string quote_charactoer = 4; // Default: " string quote_escape_character = 5; // Default: " - string comments = 6; // Default: # + string comments = 6; // Default: # // If true, records might contain record delimiters within quote characters - bool allow_quoted_record_delimiter = 7; // default False. + bool allow_quoted_record_delimiter = 7; // default False. } message JSONInput { - string type = 1; // Valid values: DOCUMENT | LINES + string type = 1; // Valid values: DOCUMENT | LINES } message ParquetInput { } @@ -361,10 +460,10 @@ message QueryRequest { message OutputSerialization { message CSVOutput { - string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED - string record_delimiter = 2; // Default: \n - string field_delimiter = 3; // Default: , - string quote_charactoer = 4; // Default: " + string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED + string record_delimiter = 2; // Default: \n + string field_delimiter = 3; // Default: , + string quote_charactoer = 4; // Default: " string quote_escape_character = 5; // Default: " } message JSONOutput { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 24907cdba..56baa0cf7 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -12,6 +12,8 @@ It has these top-level messages: BatchDeleteRequest BatchDeleteResponse DeleteResult + FileGetRequest + FileGetResponse Empty VacuumVolumeCheckRequest VacuumVolumeCheckResponse @@ -37,6 +39,8 @@ It has these top-level messages: VolumeDeleteResponse VolumeMarkReadonlyRequest VolumeMarkReadonlyResponse + VolumeConfigureRequest + VolumeConfigureResponse VolumeCopyRequest VolumeCopyResponse CopyFileRequest @@ -61,10 +65,20 @@ It has these top-level messages: VolumeEcShardReadResponse VolumeEcBlobDeleteRequest VolumeEcBlobDeleteResponse + VolumeEcShardsToVolumeRequest + VolumeEcShardsToVolumeResponse ReadVolumeFileStatusRequest ReadVolumeFileStatusResponse DiskStatus MemStatus + RemoteFile + VolumeInfo + VolumeTierMoveDatToRemoteRequest + VolumeTierMoveDatToRemoteResponse + VolumeTierMoveDatFromRemoteRequest + VolumeTierMoveDatFromRemoteResponse + VolumeServerStatusRequest + VolumeServerStatusResponse QueryRequest QueriedStripe */ @@ -170,13 +184,117 @@ func (m *DeleteResult) GetVersion() uint32 { return 0 } +type FileGetRequest struct { + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` + AcceptGzip bool `protobuf:"varint,2,opt,name=accept_gzip,json=acceptGzip" json:"accept_gzip,omitempty"` +} + +func (m *FileGetRequest) Reset() { *m = FileGetRequest{} } +func (m *FileGetRequest) String() string { return proto.CompactTextString(m) } +func (*FileGetRequest) ProtoMessage() {} +func (*FileGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *FileGetRequest) GetFileId() string { + if m != nil { + return m.FileId + } + return "" +} + +func (m *FileGetRequest) GetAcceptGzip() bool { + if m != nil { + return m.AcceptGzip + } + return false +} + +type FileGetResponse struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + ContentLength uint32 `protobuf:"varint,2,opt,name=content_length,json=contentLength" json:"content_length,omitempty"` + ContentType string `protobuf:"bytes,3,opt,name=content_type,json=contentType" json:"content_type,omitempty"` + LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified" json:"last_modified,omitempty"` + Filename string `protobuf:"bytes,5,opt,name=filename" json:"filename,omitempty"` + Etag string `protobuf:"bytes,6,opt,name=etag" json:"etag,omitempty"` + IsGzipped bool `protobuf:"varint,7,opt,name=is_gzipped,json=isGzipped" json:"is_gzipped,omitempty"` + Headers map[string]string `protobuf:"bytes,8,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ErrorCode int32 `protobuf:"varint,9,opt,name=errorCode" json:"errorCode,omitempty"` +} + +func (m *FileGetResponse) Reset() { *m = FileGetResponse{} } +func (m *FileGetResponse) String() string { return proto.CompactTextString(m) } +func (*FileGetResponse) ProtoMessage() {} +func (*FileGetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *FileGetResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *FileGetResponse) GetContentLength() uint32 { + if m != nil { + return m.ContentLength + } + return 0 +} + +func (m *FileGetResponse) GetContentType() string { + if m != nil { + return m.ContentType + } + return "" +} + +func (m *FileGetResponse) GetLastModified() uint64 { + if m != nil { + return m.LastModified + } + return 0 +} + +func (m *FileGetResponse) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func (m *FileGetResponse) GetEtag() string { + if m != nil { + return m.Etag + } + return "" +} + +func (m *FileGetResponse) GetIsGzipped() bool { + if m != nil { + return m.IsGzipped + } + return false +} + +func (m *FileGetResponse) GetHeaders() map[string]string { + if m != nil { + return m.Headers + } + return nil +} + +func (m *FileGetResponse) GetErrorCode() int32 { + if m != nil { + return m.ErrorCode + } + return 0 +} + type Empty struct { } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } type VacuumVolumeCheckRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -185,7 +303,7 @@ type VacuumVolumeCheckRequest struct { func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} } func (m *VacuumVolumeCheckRequest) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCheckRequest) ProtoMessage() {} -func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *VacuumVolumeCheckRequest) GetVolumeId() uint32 { if m != nil { @@ -201,7 +319,7 @@ type VacuumVolumeCheckResponse struct { func (m *VacuumVolumeCheckResponse) Reset() { *m = VacuumVolumeCheckResponse{} } func (m *VacuumVolumeCheckResponse) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCheckResponse) ProtoMessage() {} -func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { if m != nil { @@ -218,7 +336,7 @@ type VacuumVolumeCompactRequest struct { func (m *VacuumVolumeCompactRequest) Reset() { *m = VacuumVolumeCompactRequest{} } func (m *VacuumVolumeCompactRequest) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCompactRequest) ProtoMessage() {} -func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *VacuumVolumeCompactRequest) GetVolumeId() uint32 { if m != nil { @@ -240,7 +358,7 @@ type VacuumVolumeCompactResponse struct { func (m *VacuumVolumeCompactResponse) Reset() { *m = VacuumVolumeCompactResponse{} } func (m *VacuumVolumeCompactResponse) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCompactResponse) ProtoMessage() {} -func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } type VacuumVolumeCommitRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -249,7 +367,7 @@ type VacuumVolumeCommitRequest struct { func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} } func (m *VacuumVolumeCommitRequest) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCommitRequest) ProtoMessage() {} -func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 { if m != nil { @@ -264,7 +382,7 @@ type VacuumVolumeCommitResponse struct { func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} } func (m *VacuumVolumeCommitResponse) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCommitResponse) ProtoMessage() {} -func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } type VacuumVolumeCleanupRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -273,7 +391,7 @@ type VacuumVolumeCleanupRequest struct { func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} } func (m *VacuumVolumeCleanupRequest) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCleanupRequest) ProtoMessage() {} -func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { if m != nil { @@ -288,7 +406,7 @@ type VacuumVolumeCleanupResponse struct { func (m *VacuumVolumeCleanupResponse) Reset() { *m = VacuumVolumeCleanupResponse{} } func (m *VacuumVolumeCleanupResponse) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCleanupResponse) ProtoMessage() {} -func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } type DeleteCollectionRequest struct { Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"` @@ -297,7 +415,7 @@ type DeleteCollectionRequest struct { func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *DeleteCollectionRequest) GetCollection() string { if m != nil { @@ -312,7 +430,7 @@ type DeleteCollectionResponse struct { func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } type AllocateVolumeRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -326,7 +444,7 @@ type AllocateVolumeRequest struct { func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} } func (m *AllocateVolumeRequest) String() string { return proto.CompactTextString(m) } func (*AllocateVolumeRequest) ProtoMessage() {} -func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *AllocateVolumeRequest) GetVolumeId() uint32 { if m != nil { @@ -376,7 +494,7 @@ type AllocateVolumeResponse struct { func (m *AllocateVolumeResponse) Reset() { *m = AllocateVolumeResponse{} } func (m *AllocateVolumeResponse) String() string { return proto.CompactTextString(m) } func (*AllocateVolumeResponse) ProtoMessage() {} -func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } type VolumeSyncStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -385,7 +503,7 @@ type VolumeSyncStatusRequest struct { func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} } func (m *VolumeSyncStatusRequest) String() string { return proto.CompactTextString(m) } func (*VolumeSyncStatusRequest) ProtoMessage() {} -func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *VolumeSyncStatusRequest) GetVolumeId() uint32 { if m != nil { @@ -407,7 +525,7 @@ type VolumeSyncStatusResponse struct { func (m *VolumeSyncStatusResponse) Reset() { *m = VolumeSyncStatusResponse{} } func (m *VolumeSyncStatusResponse) String() string { return proto.CompactTextString(m) } func (*VolumeSyncStatusResponse) ProtoMessage() {} -func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *VolumeSyncStatusResponse) GetVolumeId() uint32 { if m != nil { @@ -466,7 +584,7 @@ type VolumeIncrementalCopyRequest struct { func (m *VolumeIncrementalCopyRequest) Reset() { *m = VolumeIncrementalCopyRequest{} } func (m *VolumeIncrementalCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeIncrementalCopyRequest) ProtoMessage() {} -func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -489,7 +607,7 @@ type VolumeIncrementalCopyResponse struct { func (m *VolumeIncrementalCopyResponse) Reset() { *m = VolumeIncrementalCopyResponse{} } func (m *VolumeIncrementalCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeIncrementalCopyResponse) ProtoMessage() {} -func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *VolumeIncrementalCopyResponse) GetFileContent() []byte { if m != nil { @@ -505,7 +623,7 @@ type VolumeMountRequest struct { func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeMountRequest) ProtoMessage() {} -func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *VolumeMountRequest) GetVolumeId() uint32 { if m != nil { @@ -520,7 +638,7 @@ type VolumeMountResponse struct { func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} } func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeMountResponse) ProtoMessage() {} -func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } type VolumeUnmountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -529,7 +647,7 @@ type VolumeUnmountRequest struct { func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountRequest) ProtoMessage() {} -func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *VolumeUnmountRequest) GetVolumeId() uint32 { if m != nil { @@ -544,7 +662,7 @@ type VolumeUnmountResponse struct { func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} } func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountResponse) ProtoMessage() {} -func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } type VolumeDeleteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -553,7 +671,7 @@ type VolumeDeleteRequest struct { func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteRequest) ProtoMessage() {} -func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *VolumeDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -568,7 +686,7 @@ type VolumeDeleteResponse struct { func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} } func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteResponse) ProtoMessage() {} -func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } type VolumeMarkReadonlyRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -577,7 +695,7 @@ type VolumeMarkReadonlyRequest struct { func (m *VolumeMarkReadonlyRequest) Reset() { *m = VolumeMarkReadonlyRequest{} } func (m *VolumeMarkReadonlyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeMarkReadonlyRequest) ProtoMessage() {} -func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { if m != nil { @@ -592,7 +710,47 @@ type VolumeMarkReadonlyResponse struct { func (m *VolumeMarkReadonlyResponse) Reset() { *m = VolumeMarkReadonlyResponse{} } func (m *VolumeMarkReadonlyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeMarkReadonlyResponse) ProtoMessage() {} -func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type VolumeConfigureRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` +} + +func (m *VolumeConfigureRequest) Reset() { *m = VolumeConfigureRequest{} } +func (m *VolumeConfigureRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeConfigureRequest) ProtoMessage() {} +func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *VolumeConfigureRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeConfigureRequest) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +type VolumeConfigureResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` +} + +func (m *VolumeConfigureResponse) Reset() { *m = VolumeConfigureResponse{} } +func (m *VolumeConfigureResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeConfigureResponse) ProtoMessage() {} +func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *VolumeConfigureResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} type VolumeCopyRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -605,7 +763,7 @@ type VolumeCopyRequest struct { func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} } func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeCopyRequest) ProtoMessage() {} -func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *VolumeCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -649,7 +807,7 @@ type VolumeCopyResponse struct { func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} } func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeCopyResponse) ProtoMessage() {} -func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { if m != nil { @@ -659,18 +817,19 @@ func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { } type CopyFileRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Ext string `protobuf:"bytes,2,opt,name=ext" json:"ext,omitempty"` - CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` - StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset" json:"stop_offset,omitempty"` - Collection string `protobuf:"bytes,5,opt,name=collection" json:"collection,omitempty"` - IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume" json:"is_ec_volume,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Ext string `protobuf:"bytes,2,opt,name=ext" json:"ext,omitempty"` + CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` + StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset" json:"stop_offset,omitempty"` + Collection string `protobuf:"bytes,5,opt,name=collection" json:"collection,omitempty"` + IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume" json:"is_ec_volume,omitempty"` + IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound" json:"ignore_source_file_not_found,omitempty"` } func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } func (*CopyFileRequest) ProtoMessage() {} -func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } func (m *CopyFileRequest) GetVolumeId() uint32 { if m != nil { @@ -714,6 +873,13 @@ func (m *CopyFileRequest) GetIsEcVolume() bool { return false } +func (m *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { + if m != nil { + return m.IgnoreSourceFileNotFound + } + return false +} + type CopyFileResponse struct { FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` } @@ -721,7 +887,7 @@ type CopyFileResponse struct { func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } func (*CopyFileResponse) ProtoMessage() {} -func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *CopyFileResponse) GetFileContent() []byte { if m != nil { @@ -739,7 +905,7 @@ type VolumeTailSenderRequest struct { func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} } func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTailSenderRequest) ProtoMessage() {} -func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } func (m *VolumeTailSenderRequest) GetVolumeId() uint32 { if m != nil { @@ -771,7 +937,7 @@ type VolumeTailSenderResponse struct { func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} } func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTailSenderResponse) ProtoMessage() {} -func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte { if m != nil { @@ -804,7 +970,7 @@ type VolumeTailReceiverRequest struct { func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} } func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTailReceiverRequest) ProtoMessage() {} -func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 { if m != nil { @@ -840,7 +1006,7 @@ type VolumeTailReceiverResponse struct { func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} } func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTailReceiverResponse) ProtoMessage() {} -func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } type VolumeEcShardsGenerateRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -850,7 +1016,7 @@ type VolumeEcShardsGenerateRequest struct { func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} } func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} -func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { if m != nil { @@ -872,7 +1038,7 @@ type VolumeEcShardsGenerateResponse struct { func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -882,7 +1048,7 @@ type VolumeEcShardsRebuildRequest struct { func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} } func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} -func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { if m != nil { @@ -905,7 +1071,7 @@ type VolumeEcShardsRebuildResponse struct { func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} } func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} -func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { if m != nil { @@ -920,12 +1086,14 @@ type VolumeEcShardsCopyRequest struct { ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile" json:"copy_ecx_file,omitempty"` SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` + CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile" json:"copy_ecj_file,omitempty"` + CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile" json:"copy_vif_file,omitempty"` } func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} } func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsCopyRequest) ProtoMessage() {} -func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -962,13 +1130,27 @@ func (m *VolumeEcShardsCopyRequest) GetSourceDataNode() string { return "" } +func (m *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool { + if m != nil { + return m.CopyEcjFile + } + return false +} + +func (m *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { + if m != nil { + return m.CopyVifFile + } + return false +} + type VolumeEcShardsCopyResponse struct { } func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} } func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsCopyResponse) ProtoMessage() {} -func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } type VolumeEcShardsDeleteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -979,7 +1161,7 @@ type VolumeEcShardsDeleteRequest struct { func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} } func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} -func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -1008,7 +1190,7 @@ type VolumeEcShardsDeleteResponse struct { func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} } func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} -func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } +func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } type VolumeEcShardsMountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1019,7 +1201,7 @@ type VolumeEcShardsMountRequest struct { func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} } func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsMountRequest) ProtoMessage() {} -func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 { if m != nil { @@ -1048,7 +1230,7 @@ type VolumeEcShardsMountResponse struct { func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} } func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsMountResponse) ProtoMessage() {} -func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } +func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } type VolumeEcShardsUnmountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1058,7 +1240,7 @@ type VolumeEcShardsUnmountRequest struct { func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} } func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} -func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { if m != nil { @@ -1080,7 +1262,7 @@ type VolumeEcShardsUnmountResponse struct { func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} } func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} -func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } type VolumeEcShardReadRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1093,7 +1275,7 @@ type VolumeEcShardReadRequest struct { func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} } func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardReadRequest) ProtoMessage() {} -func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 { if m != nil { @@ -1138,7 +1320,7 @@ type VolumeEcShardReadResponse struct { func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} } func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardReadResponse) ProtoMessage() {} -func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } func (m *VolumeEcShardReadResponse) GetData() []byte { if m != nil { @@ -1164,7 +1346,7 @@ type VolumeEcBlobDeleteRequest struct { func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} } func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} -func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } +func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -1200,7 +1382,39 @@ type VolumeEcBlobDeleteResponse struct { func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} } func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} -func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +type VolumeEcShardsToVolumeRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` +} + +func (m *VolumeEcShardsToVolumeRequest) Reset() { *m = VolumeEcShardsToVolumeRequest{} } +func (m *VolumeEcShardsToVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} +func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +type VolumeEcShardsToVolumeResponse struct { +} + +func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } +func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1209,7 +1423,7 @@ type ReadVolumeFileStatusRequest struct { func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusRequest) ProtoMessage() {} -func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { if m != nil { @@ -1232,7 +1446,7 @@ type ReadVolumeFileStatusResponse struct { func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusResponse) ProtoMessage() {} -func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { if m != nil { @@ -1291,16 +1505,18 @@ func (m *ReadVolumeFileStatusResponse) GetCollection() string { } type DiskStatus struct { - Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` + Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` + PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree" json:"percent_free,omitempty"` + PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed" json:"percent_used,omitempty"` } func (m *DiskStatus) Reset() { *m = DiskStatus{} } func (m *DiskStatus) String() string { return proto.CompactTextString(m) } func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } func (m *DiskStatus) GetDir() string { if m != nil { @@ -1330,6 +1546,20 @@ func (m *DiskStatus) GetFree() uint64 { return 0 } +func (m *DiskStatus) GetPercentFree() float32 { + if m != nil { + return m.PercentFree + } + return 0 +} + +func (m *DiskStatus) GetPercentUsed() float32 { + if m != nil { + return m.PercentUsed + } + return 0 +} + type MemStatus struct { Goroutines int32 `protobuf:"varint,1,opt,name=goroutines" json:"goroutines,omitempty"` All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` @@ -1343,7 +1573,7 @@ type MemStatus struct { func (m *MemStatus) Reset() { *m = MemStatus{} } func (m *MemStatus) String() string { return proto.CompactTextString(m) } func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } func (m *MemStatus) GetGoroutines() int32 { if m != nil { @@ -1394,6 +1624,264 @@ func (m *MemStatus) GetStack() uint64 { return 0 } +// tired storage on volume servers +type RemoteFile struct { + BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType" json:"backend_type,omitempty"` + BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId" json:"backend_id,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + Offset uint64 `protobuf:"varint,4,opt,name=offset" json:"offset,omitempty"` + FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` + ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime" json:"modified_time,omitempty"` + Extension string `protobuf:"bytes,7,opt,name=extension" json:"extension,omitempty"` +} + +func (m *RemoteFile) Reset() { *m = RemoteFile{} } +func (m *RemoteFile) String() string { return proto.CompactTextString(m) } +func (*RemoteFile) ProtoMessage() {} +func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{62} } + +func (m *RemoteFile) GetBackendType() string { + if m != nil { + return m.BackendType + } + return "" +} + +func (m *RemoteFile) GetBackendId() string { + if m != nil { + return m.BackendId + } + return "" +} + +func (m *RemoteFile) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *RemoteFile) GetOffset() uint64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *RemoteFile) GetFileSize() uint64 { + if m != nil { + return m.FileSize + } + return 0 +} + +func (m *RemoteFile) GetModifiedTime() uint64 { + if m != nil { + return m.ModifiedTime + } + return 0 +} + +func (m *RemoteFile) GetExtension() string { + if m != nil { + return m.Extension + } + return "" +} + +type VolumeInfo struct { + Files []*RemoteFile `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` +} + +func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } +func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } +func (*VolumeInfo) ProtoMessage() {} +func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{63} } + +func (m *VolumeInfo) GetFiles() []*RemoteFile { + if m != nil { + return m.Files + } + return nil +} + +func (m *VolumeInfo) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *VolumeInfo) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +type VolumeTierMoveDatToRemoteRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName" json:"destination_backend_name,omitempty"` + KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile" json:"keep_local_dat_file,omitempty"` +} + +func (m *VolumeTierMoveDatToRemoteRequest) Reset() { *m = VolumeTierMoveDatToRemoteRequest{} } +func (m *VolumeTierMoveDatToRemoteRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} +func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{64} +} + +func (m *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeTierMoveDatToRemoteRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string { + if m != nil { + return m.DestinationBackendName + } + return "" +} + +func (m *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { + if m != nil { + return m.KeepLocalDatFile + } + return false +} + +type VolumeTierMoveDatToRemoteResponse struct { + Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"` +} + +func (m *VolumeTierMoveDatToRemoteResponse) Reset() { *m = VolumeTierMoveDatToRemoteResponse{} } +func (m *VolumeTierMoveDatToRemoteResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} +func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{65} +} + +func (m *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { + if m != nil { + return m.Processed + } + return 0 +} + +func (m *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { + if m != nil { + return m.ProcessedPercentage + } + return 0 +} + +type VolumeTierMoveDatFromRemoteRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile" json:"keep_remote_dat_file,omitempty"` +} + +func (m *VolumeTierMoveDatFromRemoteRequest) Reset() { *m = VolumeTierMoveDatFromRemoteRequest{} } +func (m *VolumeTierMoveDatFromRemoteRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} +func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{66} +} + +func (m *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeTierMoveDatFromRemoteRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { + if m != nil { + return m.KeepRemoteDatFile + } + return false +} + +type VolumeTierMoveDatFromRemoteResponse struct { + Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"` +} + +func (m *VolumeTierMoveDatFromRemoteResponse) Reset() { *m = VolumeTierMoveDatFromRemoteResponse{} } +func (m *VolumeTierMoveDatFromRemoteResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} +func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{67} +} + +func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { + if m != nil { + return m.Processed + } + return 0 +} + +func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { + if m != nil { + return m.ProcessedPercentage + } + return 0 +} + +type VolumeServerStatusRequest struct { +} + +func (m *VolumeServerStatusRequest) Reset() { *m = VolumeServerStatusRequest{} } +func (m *VolumeServerStatusRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeServerStatusRequest) ProtoMessage() {} +func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} } + +type VolumeServerStatusResponse struct { + DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses" json:"disk_statuses,omitempty"` + MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus" json:"memory_status,omitempty"` +} + +func (m *VolumeServerStatusResponse) Reset() { *m = VolumeServerStatusResponse{} } +func (m *VolumeServerStatusResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeServerStatusResponse) ProtoMessage() {} +func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{69} } + +func (m *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { + if m != nil { + return m.DiskStatuses + } + return nil +} + +func (m *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus { + if m != nil { + return m.MemoryStatus + } + return nil +} + +// select on volume servers type QueryRequest struct { Selections []string `protobuf:"bytes,1,rep,name=selections" json:"selections,omitempty"` FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds" json:"from_file_ids,omitempty"` @@ -1405,7 +1893,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (m *QueryRequest) String() string { return proto.CompactTextString(m) } func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70} } func (m *QueryRequest) GetSelections() []string { if m != nil { @@ -1451,7 +1939,7 @@ type QueryRequest_Filter struct { func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} } func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) } func (*QueryRequest_Filter) ProtoMessage() {} -func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56, 0} } +func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 0} } func (m *QueryRequest_Filter) GetField() string { if m != nil { @@ -1486,7 +1974,7 @@ func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_In func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization) ProtoMessage() {} func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 1} + return fileDescriptor0, []int{70, 1} } func (m *QueryRequest_InputSerialization) GetCompressionType() string { @@ -1534,7 +2022,7 @@ func (m *QueryRequest_InputSerialization_CSVInput) Reset() { func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 1, 0} + return fileDescriptor0, []int{70, 1, 0} } func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -1596,7 +2084,7 @@ func (m *QueryRequest_InputSerialization_JSONInput) Reset() { func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 1, 1} + return fileDescriptor0, []int{70, 1, 1} } func (m *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -1617,7 +2105,7 @@ func (m *QueryRequest_InputSerialization_ParquetInput) String() string { } func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 1, 2} + return fileDescriptor0, []int{70, 1, 2} } type QueryRequest_OutputSerialization struct { @@ -1629,7 +2117,7 @@ func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_O func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 2} + return fileDescriptor0, []int{70, 2} } func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -1662,7 +2150,7 @@ func (m *QueryRequest_OutputSerialization_CSVOutput) String() string { } func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 2, 0} + return fileDescriptor0, []int{70, 2, 0} } func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -1712,7 +2200,7 @@ func (m *QueryRequest_OutputSerialization_JSONOutput) String() string { } func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 2, 1} + return fileDescriptor0, []int{70, 2, 1} } func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -1729,7 +2217,7 @@ type QueriedStripe struct { func (m *QueriedStripe) Reset() { *m = QueriedStripe{} } func (m *QueriedStripe) String() string { return proto.CompactTextString(m) } func (*QueriedStripe) ProtoMessage() {} -func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{71} } func (m *QueriedStripe) GetRecords() []byte { if m != nil { @@ -1742,6 +2230,8 @@ func init() { proto.RegisterType((*BatchDeleteRequest)(nil), "volume_server_pb.BatchDeleteRequest") proto.RegisterType((*BatchDeleteResponse)(nil), "volume_server_pb.BatchDeleteResponse") proto.RegisterType((*DeleteResult)(nil), "volume_server_pb.DeleteResult") + proto.RegisterType((*FileGetRequest)(nil), "volume_server_pb.FileGetRequest") + proto.RegisterType((*FileGetResponse)(nil), "volume_server_pb.FileGetResponse") proto.RegisterType((*Empty)(nil), "volume_server_pb.Empty") proto.RegisterType((*VacuumVolumeCheckRequest)(nil), "volume_server_pb.VacuumVolumeCheckRequest") proto.RegisterType((*VacuumVolumeCheckResponse)(nil), "volume_server_pb.VacuumVolumeCheckResponse") @@ -1767,6 +2257,8 @@ func init() { proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse") proto.RegisterType((*VolumeMarkReadonlyRequest)(nil), "volume_server_pb.VolumeMarkReadonlyRequest") proto.RegisterType((*VolumeMarkReadonlyResponse)(nil), "volume_server_pb.VolumeMarkReadonlyResponse") + proto.RegisterType((*VolumeConfigureRequest)(nil), "volume_server_pb.VolumeConfigureRequest") + proto.RegisterType((*VolumeConfigureResponse)(nil), "volume_server_pb.VolumeConfigureResponse") proto.RegisterType((*VolumeCopyRequest)(nil), "volume_server_pb.VolumeCopyRequest") proto.RegisterType((*VolumeCopyResponse)(nil), "volume_server_pb.VolumeCopyResponse") proto.RegisterType((*CopyFileRequest)(nil), "volume_server_pb.CopyFileRequest") @@ -1791,10 +2283,20 @@ func init() { proto.RegisterType((*VolumeEcShardReadResponse)(nil), "volume_server_pb.VolumeEcShardReadResponse") proto.RegisterType((*VolumeEcBlobDeleteRequest)(nil), "volume_server_pb.VolumeEcBlobDeleteRequest") proto.RegisterType((*VolumeEcBlobDeleteResponse)(nil), "volume_server_pb.VolumeEcBlobDeleteResponse") + proto.RegisterType((*VolumeEcShardsToVolumeRequest)(nil), "volume_server_pb.VolumeEcShardsToVolumeRequest") + proto.RegisterType((*VolumeEcShardsToVolumeResponse)(nil), "volume_server_pb.VolumeEcShardsToVolumeResponse") proto.RegisterType((*ReadVolumeFileStatusRequest)(nil), "volume_server_pb.ReadVolumeFileStatusRequest") proto.RegisterType((*ReadVolumeFileStatusResponse)(nil), "volume_server_pb.ReadVolumeFileStatusResponse") proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus") proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus") + proto.RegisterType((*RemoteFile)(nil), "volume_server_pb.RemoteFile") + proto.RegisterType((*VolumeInfo)(nil), "volume_server_pb.VolumeInfo") + proto.RegisterType((*VolumeTierMoveDatToRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteRequest") + proto.RegisterType((*VolumeTierMoveDatToRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteResponse") + proto.RegisterType((*VolumeTierMoveDatFromRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteRequest") + proto.RegisterType((*VolumeTierMoveDatFromRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteResponse") + proto.RegisterType((*VolumeServerStatusRequest)(nil), "volume_server_pb.VolumeServerStatusRequest") + proto.RegisterType((*VolumeServerStatusResponse)(nil), "volume_server_pb.VolumeServerStatusResponse") proto.RegisterType((*QueryRequest)(nil), "volume_server_pb.QueryRequest") proto.RegisterType((*QueryRequest_Filter)(nil), "volume_server_pb.QueryRequest.Filter") proto.RegisterType((*QueryRequest_InputSerialization)(nil), "volume_server_pb.QueryRequest.InputSerialization") @@ -1820,6 +2322,7 @@ const _ = grpc.SupportPackageIsVersion4 type VolumeServerClient interface { // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) + FileGet(ctx context.Context, in *FileGetRequest, opts ...grpc.CallOption) (VolumeServer_FileGetClient, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) @@ -1832,6 +2335,7 @@ type VolumeServerClient interface { VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) + VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) @@ -1847,7 +2351,12 @@ type VolumeServerClient interface { VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) - // query + VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) + // tiered storage + VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) + VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) + VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) + // query Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) } @@ -1868,6 +2377,38 @@ func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteReq return out, nil } +func (c *volumeServerClient) FileGet(ctx context.Context, in *FileGetRequest, opts ...grpc.CallOption) (VolumeServer_FileGetClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/FileGet", opts...) + if err != nil { + return nil, err + } + x := &volumeServerFileGetClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_FileGetClient interface { + Recv() (*FileGetResponse, error) + grpc.ClientStream +} + +type volumeServerFileGetClient struct { + grpc.ClientStream +} + +func (x *volumeServerFileGetClient) Recv() (*FileGetResponse, error) { + m := new(FileGetResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { out := new(VacuumVolumeCheckResponse) err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, c.cc, opts...) @@ -1932,7 +2473,7 @@ func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyn } func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) if err != nil { return nil, err } @@ -1999,6 +2540,15 @@ func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeM return out, nil } +func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { + out := new(VolumeConfigureResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) { out := new(VolumeCopyResponse) err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, c.cc, opts...) @@ -2018,7 +2568,7 @@ func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadV } func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } @@ -2050,7 +2600,7 @@ func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { } func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) if err != nil { return nil, err } @@ -2145,7 +2695,7 @@ func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *Volu } func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) if err != nil { return nil, err } @@ -2185,8 +2735,90 @@ func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeE return out, nil } +func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) { + out := new(VolumeEcShardsToVolumeResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeTierMoveDatToRemoteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeTierMoveDatToRemoteClient interface { + Recv() (*VolumeTierMoveDatToRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatToRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDatToRemoteResponse, error) { + m := new(VolumeTierMoveDatToRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeTierMoveDatFromRemoteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeTierMoveDatFromRemoteClient interface { + Recv() (*VolumeTierMoveDatFromRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatFromRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveDatFromRemoteResponse, error) { + m := new(VolumeTierMoveDatFromRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) { + out := new(VolumeServerStatusResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[7], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } @@ -2222,6 +2854,7 @@ func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) { type VolumeServerServer interface { // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) + FileGet(*FileGetRequest, VolumeServer_FileGetServer) error VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) @@ -2234,6 +2867,7 @@ type VolumeServerServer interface { VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) + VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) @@ -2249,7 +2883,12 @@ type VolumeServerServer interface { VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) - // query + VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) + // tiered storage + VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error + VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error + VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) + // query Query(*QueryRequest, VolumeServer_QueryServer) error } @@ -2275,6 +2914,27 @@ func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _VolumeServer_FileGet_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(FileGetRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).FileGet(m, &volumeServerFileGetServer{stream}) +} + +type VolumeServer_FileGetServer interface { + Send(*FileGetResponse) error + grpc.ServerStream +} + +type volumeServerFileGetServer struct { + grpc.ServerStream +} + +func (x *volumeServerFileGetServer) Send(m *FileGetResponse) error { + return x.ServerStream.SendMsg(m) +} + func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VacuumVolumeCheckRequest) if err := dec(in); err != nil { @@ -2494,6 +3154,24 @@ func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeConfigureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeConfigure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeCopyRequest) if err := dec(in); err != nil { @@ -2737,6 +3415,84 @@ func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsToVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, req.(*VolumeEcShardsToVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeTierMoveDatToRemote_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeTierMoveDatToRemoteRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &volumeServerVolumeTierMoveDatToRemoteServer{stream}) +} + +type VolumeServer_VolumeTierMoveDatToRemoteServer interface { + Send(*VolumeTierMoveDatToRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatToRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteServer) Send(m *VolumeTierMoveDatToRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeTierMoveDatFromRemote_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeTierMoveDatFromRemoteRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &volumeServerVolumeTierMoveDatFromRemoteServer{stream}) +} + +type VolumeServer_VolumeTierMoveDatFromRemoteServer interface { + Send(*VolumeTierMoveDatFromRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatFromRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDatFromRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeServerStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeServerStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(QueryRequest) if err := stream.RecvMsg(m); err != nil { @@ -2810,6 +3566,10 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeMarkReadonly", Handler: _VolumeServer_VolumeMarkReadonly_Handler, }, + { + MethodName: "VolumeConfigure", + Handler: _VolumeServer_VolumeConfigure_Handler, + }, { MethodName: "VolumeCopy", Handler: _VolumeServer_VolumeCopy_Handler, @@ -2850,8 +3610,21 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeEcBlobDelete", Handler: _VolumeServer_VolumeEcBlobDelete_Handler, }, + { + MethodName: "VolumeEcShardsToVolume", + Handler: _VolumeServer_VolumeEcShardsToVolume_Handler, + }, + { + MethodName: "VolumeServerStatus", + Handler: _VolumeServer_VolumeServerStatus_Handler, + }, }, Streams: []grpc.StreamDesc{ + { + StreamName: "FileGet", + Handler: _VolumeServer_FileGet_Handler, + ServerStreams: true, + }, { StreamName: "VolumeIncrementalCopy", Handler: _VolumeServer_VolumeIncrementalCopy_Handler, @@ -2872,6 +3645,16 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_VolumeEcShardRead_Handler, ServerStreams: true, }, + { + StreamName: "VolumeTierMoveDatToRemote", + Handler: _VolumeServer_VolumeTierMoveDatToRemote_Handler, + ServerStreams: true, + }, + { + StreamName: "VolumeTierMoveDatFromRemote", + Handler: _VolumeServer_VolumeTierMoveDatFromRemote_Handler, + ServerStreams: true, + }, { StreamName: "Query", Handler: _VolumeServer_Query_Handler, @@ -2884,162 +3667,210 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2503 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x77, 0x1c, 0x47, - 0x51, 0xeb, 0x5d, 0x49, 0xbb, 0xb5, 0x2b, 0x4b, 0x6e, 0xc9, 0xd2, 0x7a, 0x6c, 0xc9, 0xca, 0xe4, - 0xc3, 0xb2, 0x9d, 0xc8, 0x8e, 0x02, 0x24, 0x24, 0x04, 0xb0, 0x65, 0x1b, 0x4c, 0x62, 0x99, 0x8c, - 0x1c, 0x13, 0x70, 0x1e, 0xf3, 0x5a, 0x33, 0x2d, 0x6b, 0xd0, 0xcc, 0xf4, 0x78, 0xa6, 0x47, 0xd6, - 0xfa, 0xc1, 0x29, 0x5c, 0xf9, 0x01, 0x9c, 0x39, 0x71, 0xe1, 0xca, 0x0f, 0xe0, 0xc2, 0x0f, 0x80, - 0x2b, 0x17, 0xce, 0x1c, 0xb8, 0xf1, 0x1e, 0x17, 0x5e, 0x7f, 0xcc, 0xec, 0x7c, 0x6a, 0x47, 0xb1, - 0xdf, 0xe3, 0xe5, 0xd6, 0x53, 0x5d, 0x1f, 0x5d, 0xd5, 0x55, 0xd5, 0xd5, 0xd5, 0x03, 0x8b, 0x47, - 0xd4, 0x8d, 0x3d, 0x62, 0x46, 0x24, 0x3c, 0x22, 0xe1, 0x66, 0x10, 0x52, 0x46, 0xd1, 0x42, 0x0e, - 0x68, 0x06, 0x7b, 0xfa, 0x0d, 0x40, 0xb7, 0x31, 0xb3, 0x0e, 0xee, 0x10, 0x97, 0x30, 0x62, 0x90, - 0x67, 0x31, 0x89, 0x18, 0xba, 0x00, 0xdd, 0x7d, 0xc7, 0x25, 0xa6, 0x63, 0x47, 0xc3, 0xd6, 0x7a, - 0x7b, 0xa3, 0x67, 0xcc, 0xf2, 0xef, 0xfb, 0x76, 0xa4, 0x3f, 0x84, 0xc5, 0x1c, 0x41, 0x14, 0x50, - 0x3f, 0x22, 0xe8, 0x03, 0x98, 0x0d, 0x49, 0x14, 0xbb, 0x4c, 0x12, 0xf4, 0xb7, 0xd6, 0x36, 0x8b, - 0xb2, 0x36, 0x53, 0x92, 0xd8, 0x65, 0x46, 0x82, 0xae, 0x7f, 0xd5, 0x82, 0x41, 0x76, 0x06, 0xad, - 0xc0, 0xac, 0x12, 0x3e, 0x6c, 0xad, 0xb7, 0x36, 0x7a, 0xc6, 0x8c, 0x94, 0x8d, 0x96, 0x61, 0x26, - 0x62, 0x98, 0xc5, 0xd1, 0xf0, 0xcc, 0x7a, 0x6b, 0x63, 0xda, 0x50, 0x5f, 0x68, 0x09, 0xa6, 0x49, - 0x18, 0xd2, 0x70, 0xd8, 0x16, 0xe8, 0xf2, 0x03, 0x21, 0xe8, 0x44, 0xce, 0x0b, 0x32, 0xec, 0xac, - 0xb7, 0x36, 0xe6, 0x0c, 0x31, 0x46, 0x43, 0x98, 0x3d, 0x22, 0x61, 0xe4, 0x50, 0x7f, 0x38, 0x2d, - 0xc0, 0xc9, 0xa7, 0x3e, 0x0b, 0xd3, 0x77, 0xbd, 0x80, 0x8d, 0xf4, 0xf7, 0x61, 0xf8, 0x18, 0x5b, - 0x71, 0xec, 0x3d, 0x16, 0xcb, 0xdf, 0x3e, 0x20, 0xd6, 0x61, 0x62, 0x96, 0x8b, 0xd0, 0x53, 0x4a, - 0xa9, 0xb5, 0xcd, 0x19, 0x5d, 0x09, 0xb8, 0x6f, 0xeb, 0x3f, 0x84, 0x0b, 0x15, 0x84, 0xca, 0x3c, - 0xaf, 0xc3, 0xdc, 0x53, 0x1c, 0xee, 0xe1, 0xa7, 0xc4, 0x0c, 0x31, 0x73, 0xa8, 0xa0, 0x6e, 0x19, - 0x03, 0x05, 0x34, 0x38, 0x4c, 0x7f, 0x02, 0x5a, 0x8e, 0x03, 0xf5, 0x02, 0x6c, 0xb1, 0x26, 0xc2, - 0xd1, 0x3a, 0xf4, 0x83, 0x90, 0x60, 0xd7, 0xa5, 0x16, 0x66, 0x44, 0xd8, 0xa7, 0x6d, 0x64, 0x41, - 0xfa, 0x2a, 0x5c, 0xac, 0x64, 0x2e, 0x17, 0xa8, 0x7f, 0x50, 0x58, 0x3d, 0xf5, 0x3c, 0xa7, 0x91, - 0x68, 0xfd, 0x52, 0x69, 0xd5, 0x82, 0x52, 0xf1, 0xfd, 0x6e, 0x61, 0xd6, 0x25, 0xd8, 0x8f, 0x83, - 0x46, 0x8c, 0x8b, 0x2b, 0x4e, 0x48, 0x53, 0xce, 0x2b, 0xd2, 0x6d, 0xb6, 0xa9, 0xeb, 0x12, 0x8b, - 0x39, 0xd4, 0x4f, 0xd8, 0xae, 0x01, 0x58, 0x29, 0x50, 0x39, 0x51, 0x06, 0xa2, 0x6b, 0x30, 0x2c, - 0x93, 0x2a, 0xb6, 0xff, 0x68, 0xc1, 0xf9, 0x5b, 0xca, 0x68, 0x52, 0x70, 0xa3, 0x0d, 0xc8, 0x8b, - 0x3c, 0x53, 0x14, 0x59, 0xdc, 0xa0, 0x76, 0x69, 0x83, 0x38, 0x46, 0x48, 0x02, 0xd7, 0xb1, 0xb0, - 0x60, 0xd1, 0x11, 0x2c, 0xb2, 0x20, 0xb4, 0x00, 0x6d, 0xc6, 0x5c, 0xe1, 0xb9, 0x3d, 0x83, 0x0f, - 0xd1, 0x16, 0x2c, 0x7b, 0xc4, 0xa3, 0xe1, 0xc8, 0xf4, 0x70, 0x60, 0x7a, 0xf8, 0xd8, 0xe4, 0x6e, - 0x6e, 0x7a, 0x7b, 0xc3, 0x19, 0xb1, 0x3e, 0x24, 0x67, 0x1f, 0xe0, 0xe0, 0x01, 0x3e, 0xde, 0x75, - 0x5e, 0x90, 0x07, 0x7b, 0xfa, 0x10, 0x96, 0x8b, 0xfa, 0x29, 0xd5, 0xbf, 0x03, 0x2b, 0x12, 0xb2, - 0x3b, 0xf2, 0xad, 0x5d, 0x11, 0x5b, 0x8d, 0x36, 0xea, 0xbf, 0x2d, 0x18, 0x96, 0x09, 0x95, 0xe7, - 0xbf, 0xac, 0xd5, 0x4e, 0x6d, 0x93, 0xcb, 0xd0, 0x67, 0xd8, 0x71, 0x4d, 0xba, 0xbf, 0x1f, 0x11, - 0x26, 0x0c, 0xd1, 0x31, 0x80, 0x83, 0x1e, 0x0a, 0x08, 0xba, 0x0a, 0x0b, 0x96, 0xf4, 0x7e, 0x33, - 0x24, 0x47, 0x8e, 0xc8, 0x06, 0xb3, 0x62, 0x61, 0xf3, 0x56, 0x12, 0x15, 0x12, 0x8c, 0x74, 0x98, - 0x73, 0xec, 0x63, 0x53, 0xa4, 0x23, 0x91, 0x4c, 0xba, 0x82, 0x5b, 0xdf, 0xb1, 0x8f, 0xef, 0x39, - 0x2e, 0xe1, 0x16, 0xd5, 0x1f, 0xc3, 0x25, 0xa9, 0xfc, 0x7d, 0xdf, 0x0a, 0x89, 0x47, 0x7c, 0x86, - 0xdd, 0x6d, 0x1a, 0x8c, 0x1a, 0xb9, 0xcd, 0x05, 0xe8, 0x46, 0x8e, 0x6f, 0x11, 0xd3, 0x97, 0x49, - 0xad, 0x63, 0xcc, 0x8a, 0xef, 0x9d, 0x48, 0xbf, 0x0d, 0xab, 0x35, 0x7c, 0x95, 0x65, 0x5f, 0x83, - 0x81, 0x58, 0x98, 0x45, 0x7d, 0x46, 0x7c, 0x26, 0x78, 0x0f, 0x8c, 0x3e, 0x87, 0x6d, 0x4b, 0x90, - 0xfe, 0x2e, 0x20, 0xc9, 0xe3, 0x01, 0x8d, 0xfd, 0x66, 0xe1, 0x7c, 0x1e, 0x16, 0x73, 0x24, 0xca, - 0x37, 0xde, 0x83, 0x25, 0x09, 0xfe, 0xdc, 0xf7, 0x1a, 0xf3, 0x5a, 0x81, 0xf3, 0x05, 0x22, 0xc5, - 0x6d, 0x2b, 0x11, 0x92, 0x3f, 0x76, 0x4e, 0x64, 0xb6, 0x9c, 0xac, 0x20, 0x7f, 0xf2, 0x88, 0xcc, - 0x25, 0x17, 0x8c, 0xc3, 0x43, 0x83, 0x60, 0x9b, 0xfa, 0xee, 0xa8, 0x71, 0xe6, 0xaa, 0xa0, 0x54, - 0x7c, 0xff, 0xd4, 0x82, 0x73, 0x49, 0x4a, 0x6b, 0xb8, 0x9b, 0xa7, 0x74, 0xe7, 0x76, 0xad, 0x3b, - 0x77, 0xc6, 0xee, 0xbc, 0x01, 0x0b, 0x11, 0x8d, 0x43, 0x8b, 0x98, 0x36, 0x66, 0xd8, 0xf4, 0xa9, - 0x4d, 0x94, 0xb7, 0x9f, 0x95, 0xf0, 0x3b, 0x98, 0xe1, 0x1d, 0x6a, 0x13, 0xfd, 0x07, 0xc9, 0x66, - 0xe7, 0xbc, 0xe4, 0x2a, 0x9c, 0x73, 0x71, 0xc4, 0x4c, 0x1c, 0x04, 0xc4, 0xb7, 0x4d, 0xcc, 0xb8, - 0xab, 0xb5, 0x84, 0xab, 0x9d, 0xe5, 0x13, 0xb7, 0x04, 0xfc, 0x16, 0xdb, 0x89, 0xf4, 0xbf, 0xb5, - 0x60, 0x9e, 0xd3, 0x72, 0xd7, 0x6e, 0xa4, 0xef, 0x02, 0xb4, 0xc9, 0x31, 0x53, 0x8a, 0xf2, 0x21, - 0xba, 0x01, 0x8b, 0x2a, 0x86, 0x1c, 0xea, 0x8f, 0xc3, 0xab, 0x2d, 0xb3, 0xd1, 0x78, 0x2a, 0x8d, - 0xb0, 0xcb, 0xd0, 0x8f, 0x18, 0x0d, 0x92, 0x68, 0xed, 0xc8, 0x68, 0xe5, 0x20, 0x15, 0xad, 0x79, - 0x9b, 0x4e, 0x57, 0xd8, 0x74, 0xe0, 0x44, 0x26, 0xb1, 0x4c, 0xb9, 0x2a, 0x11, 0xef, 0x5d, 0x03, - 0x9c, 0xe8, 0xae, 0x25, 0xad, 0xa1, 0x7f, 0x1b, 0x16, 0xc6, 0x5a, 0x35, 0x8f, 0x9d, 0xaf, 0x5a, - 0x49, 0x3a, 0x7c, 0x84, 0x1d, 0x77, 0x97, 0xf8, 0x36, 0x09, 0x5f, 0x32, 0xa6, 0xd1, 0x4d, 0x58, - 0x72, 0x6c, 0x97, 0x98, 0xcc, 0xf1, 0x08, 0x8d, 0x99, 0x19, 0x11, 0x8b, 0xfa, 0x76, 0x94, 0xd8, - 0x87, 0xcf, 0x3d, 0x92, 0x53, 0xbb, 0x72, 0x46, 0xff, 0x6d, 0x9a, 0x5b, 0xb3, 0xab, 0x18, 0x57, - 0x15, 0x3e, 0x21, 0x9c, 0xe1, 0x01, 0xc1, 0x36, 0x09, 0x95, 0x1a, 0x03, 0x09, 0xfc, 0xb1, 0x80, - 0x71, 0x0b, 0x2b, 0xa4, 0x3d, 0x6a, 0x8f, 0xc4, 0x8a, 0x06, 0x06, 0x48, 0xd0, 0x6d, 0x6a, 0x8f, - 0x44, 0x92, 0x8b, 0x4c, 0xe1, 0x24, 0xd6, 0x41, 0xec, 0x1f, 0x8a, 0xd5, 0x74, 0x8d, 0xbe, 0x13, - 0x7d, 0x8a, 0x23, 0xb6, 0xcd, 0x41, 0xfa, 0x9f, 0x5b, 0x49, 0x94, 0xf1, 0x65, 0x18, 0xc4, 0x22, - 0xce, 0xd1, 0xff, 0xc1, 0x1c, 0x9c, 0x42, 0x45, 0x43, 0xae, 0xba, 0x54, 0x01, 0x83, 0xe4, 0x9c, - 0x3a, 0x8b, 0xc4, 0xcc, 0x38, 0xc8, 0xf3, 0x0b, 0x57, 0x41, 0xfe, 0x65, 0x92, 0x64, 0xef, 0x5a, - 0xbb, 0x07, 0x38, 0xb4, 0xa3, 0x1f, 0x11, 0x9f, 0x84, 0x98, 0xbd, 0x92, 0x43, 0x5f, 0x5f, 0x87, - 0xb5, 0x3a, 0xee, 0x4a, 0xfe, 0x93, 0xe4, 0xf0, 0x48, 0x30, 0x0c, 0xb2, 0x17, 0x3b, 0xae, 0xfd, - 0x4a, 0xc4, 0x7f, 0x52, 0x54, 0x2e, 0x65, 0xae, 0xfc, 0xe7, 0x1a, 0x9c, 0x0b, 0x05, 0x88, 0x99, - 0x11, 0x47, 0x48, 0xeb, 0xfd, 0x39, 0x63, 0x5e, 0x4d, 0x08, 0x42, 0x5e, 0xf7, 0xff, 0x25, 0xf5, - 0x80, 0x84, 0xdb, 0x2b, 0x4b, 0x8b, 0x17, 0xa1, 0x37, 0x16, 0xdf, 0x16, 0xe2, 0xbb, 0x91, 0x92, - 0xcb, 0xbd, 0xd3, 0xa2, 0xc1, 0xc8, 0x24, 0x96, 0x3c, 0x87, 0xc5, 0x56, 0x77, 0x8d, 0x3e, 0x07, - 0xde, 0xb5, 0xc4, 0x31, 0x7c, 0x8a, 0x1c, 0x99, 0x7a, 0x43, 0x5e, 0x09, 0xb5, 0x1b, 0xcf, 0xe1, - 0x62, 0x7e, 0xb6, 0xf9, 0xf1, 0xf4, 0x52, 0x4a, 0xea, 0x6b, 0x45, 0x37, 0x28, 0x9c, 0x71, 0x47, - 0xc5, 0x65, 0x37, 0x3e, 0xcf, 0x5f, 0x6e, 0x5d, 0xab, 0x45, 0x83, 0xe4, 0x8b, 0x82, 0x2f, 0x8a, - 0xcb, 0x3e, 0x45, 0x71, 0x70, 0xb2, 0xe0, 0xcb, 0x45, 0xd7, 0x2d, 0x56, 0x10, 0xbf, 0x4f, 0xf3, - 0xa2, 0xc2, 0xe0, 0xe7, 0x77, 0xe3, 0x7c, 0xa4, 0xe4, 0x0a, 0x73, 0xcc, 0x19, 0xb3, 0x4a, 0x2c, - 0xbf, 0x60, 0xaa, 0x73, 0x48, 0xd6, 0xe7, 0xea, 0x2b, 0x77, 0x95, 0x6c, 0xab, 0xab, 0x64, 0x72, - 0x45, 0x3e, 0x24, 0x23, 0xe1, 0x6b, 0x1d, 0x79, 0x45, 0xfe, 0x84, 0x8c, 0xf4, 0x9d, 0x42, 0xa4, - 0xc8, 0xa5, 0xa9, 0x98, 0x43, 0xd0, 0xe1, 0x4e, 0xaa, 0x52, 0xb5, 0x18, 0xa3, 0x55, 0x00, 0x27, - 0x32, 0x6d, 0xb1, 0xe7, 0x72, 0x51, 0x5d, 0xa3, 0xe7, 0x28, 0x27, 0xb0, 0xf5, 0xdf, 0x65, 0x42, - 0xef, 0xb6, 0x4b, 0xf7, 0x5e, 0xa1, 0x57, 0x66, 0xb5, 0x68, 0xe7, 0xb4, 0xc8, 0xde, 0x95, 0x3b, - 0xf9, 0xbb, 0x72, 0x26, 0x88, 0xb2, 0xcb, 0x51, 0x3b, 0xf3, 0x21, 0x5c, 0xe4, 0x0a, 0x4b, 0x0c, - 0x51, 0x25, 0x37, 0xbf, 0x49, 0xfc, 0xeb, 0x0c, 0x5c, 0xaa, 0x26, 0x6e, 0x72, 0x9b, 0xf8, 0x08, - 0xb4, 0xb4, 0x5a, 0xe7, 0x47, 0x4a, 0xc4, 0xb0, 0x17, 0xa4, 0x87, 0x8a, 0x3c, 0x7b, 0x56, 0x54, - 0xe9, 0xfe, 0x28, 0x99, 0x4f, 0x4e, 0x96, 0x52, 0xa9, 0xdf, 0x2e, 0x95, 0xfa, 0x5c, 0x80, 0x8d, - 0x59, 0x9d, 0x00, 0x59, 0xbb, 0xac, 0xd8, 0x98, 0xd5, 0x09, 0x48, 0x89, 0x85, 0x00, 0xe9, 0x35, - 0x7d, 0x85, 0x2f, 0x04, 0xac, 0x02, 0xa8, 0xb2, 0x24, 0xf6, 0x93, 0xab, 0x4b, 0x4f, 0x16, 0x25, - 0xb1, 0x5f, 0x5b, 0x5d, 0xcd, 0xd6, 0x56, 0x57, 0xf9, 0xed, 0xef, 0x96, 0x4e, 0x88, 0x2f, 0x00, - 0xee, 0x38, 0xd1, 0xa1, 0x34, 0x32, 0x2f, 0xe7, 0x6c, 0x27, 0x54, 0xf7, 0x65, 0x3e, 0xe4, 0x10, - 0xec, 0xba, 0xca, 0x74, 0x7c, 0xc8, 0xdd, 0x37, 0x8e, 0x88, 0xad, 0xac, 0x23, 0xc6, 0x1c, 0xb6, - 0x1f, 0x12, 0xa2, 0x0c, 0x20, 0xc6, 0xfa, 0x1f, 0x5a, 0xd0, 0x7b, 0x40, 0x3c, 0xc5, 0x79, 0x0d, - 0xe0, 0x29, 0x0d, 0x69, 0xcc, 0x1c, 0x9f, 0xc8, 0xea, 0x73, 0xda, 0xc8, 0x40, 0xbe, 0xbe, 0x1c, - 0x11, 0x9a, 0xc4, 0xdd, 0x57, 0xc6, 0x14, 0x63, 0x0e, 0x3b, 0x20, 0x38, 0x50, 0xf6, 0x13, 0x63, - 0xb4, 0x04, 0xd3, 0x11, 0xc3, 0xd6, 0xa1, 0x30, 0x56, 0xc7, 0x90, 0x1f, 0xfa, 0x7f, 0x06, 0x30, - 0xf8, 0x2c, 0x26, 0xe1, 0x28, 0xd3, 0x39, 0x88, 0x88, 0xb2, 0x4e, 0xd2, 0xfa, 0xca, 0x40, 0xf8, - 0x26, 0xee, 0x87, 0xd4, 0x33, 0xd3, 0xee, 0xd8, 0x19, 0x81, 0xd2, 0xe7, 0xc0, 0x7b, 0xb2, 0x43, - 0x86, 0x3e, 0x86, 0x99, 0x7d, 0xc7, 0x65, 0x44, 0xf6, 0xa3, 0xfa, 0x5b, 0x6f, 0x96, 0x3b, 0x61, - 0x59, 0x99, 0x9b, 0xf7, 0x04, 0xb2, 0xa1, 0x88, 0xd0, 0x1e, 0x2c, 0x3a, 0x7e, 0x20, 0xaa, 0xa1, - 0xd0, 0xc1, 0xae, 0xf3, 0x62, 0x7c, 0xf7, 0xed, 0x6f, 0xbd, 0x3b, 0x81, 0xd7, 0x7d, 0x4e, 0xb9, - 0x9b, 0x25, 0x34, 0x90, 0x53, 0x82, 0x21, 0x02, 0x4b, 0x34, 0x66, 0x65, 0x21, 0xd3, 0x42, 0xc8, - 0xd6, 0x04, 0x21, 0x0f, 0x05, 0x69, 0x5e, 0xca, 0x22, 0x2d, 0x03, 0xb5, 0x1d, 0x98, 0x91, 0xca, - 0x71, 0xf3, 0xef, 0x3b, 0xc4, 0x4d, 0x3a, 0x7a, 0xf2, 0x83, 0xa7, 0x18, 0x1a, 0x90, 0x10, 0xfb, - 0xb6, 0x4a, 0x4d, 0xc9, 0x27, 0xc7, 0x3f, 0xc2, 0x6e, 0x4c, 0x92, 0x96, 0x9e, 0xf8, 0xd0, 0xfe, - 0x3e, 0x0d, 0xa8, 0xac, 0x61, 0x72, 0xa1, 0x0f, 0x49, 0xc4, 0x9d, 0xde, 0x64, 0xa3, 0x80, 0x28, - 0x39, 0xf3, 0x19, 0xf8, 0xa3, 0x51, 0x40, 0xd0, 0xcf, 0xa0, 0x67, 0x45, 0x47, 0xa6, 0x30, 0x89, - 0x90, 0xd9, 0xdf, 0xfa, 0xf0, 0xd4, 0x26, 0xdd, 0xdc, 0xde, 0x7d, 0x2c, 0xa0, 0x46, 0xd7, 0x8a, - 0x8e, 0xc4, 0x08, 0xfd, 0x02, 0xe0, 0x57, 0x11, 0xf5, 0x15, 0x67, 0xb9, 0xf1, 0x1f, 0x9d, 0x9e, - 0xf3, 0x4f, 0x76, 0x1f, 0xee, 0x48, 0xd6, 0x3d, 0xce, 0x4e, 0xf2, 0xb6, 0x60, 0x2e, 0xc0, 0xe1, - 0xb3, 0x98, 0x30, 0xc5, 0x5e, 0xfa, 0xc2, 0xf7, 0x4f, 0xcf, 0xfe, 0xa7, 0x92, 0x8d, 0x94, 0x30, - 0x08, 0x32, 0x5f, 0xda, 0x5f, 0xcf, 0x40, 0x37, 0xd1, 0x8b, 0x17, 0x54, 0xc2, 0xc3, 0xe5, 0xb5, - 0xc2, 0x74, 0xfc, 0x7d, 0xaa, 0x2c, 0x7a, 0x96, 0xc3, 0xe5, 0xcd, 0xe2, 0xbe, 0xbf, 0x4f, 0xb9, - 0xed, 0x43, 0x62, 0xd1, 0xd0, 0xe6, 0xc7, 0x97, 0xe3, 0x39, 0xdc, 0xed, 0xe5, 0x5e, 0xce, 0x4b, - 0xf8, 0x9d, 0x04, 0x8c, 0xae, 0xc0, 0xbc, 0xd8, 0xf6, 0x0c, 0x66, 0x3b, 0xe1, 0x49, 0xdc, 0x0c, - 0xe2, 0x55, 0x58, 0x78, 0x16, 0x53, 0x46, 0x4c, 0xeb, 0x00, 0x87, 0xd8, 0x62, 0x34, 0x2d, 0xf0, - 0xe7, 0x05, 0x7c, 0x3b, 0x05, 0xa3, 0x6f, 0xc1, 0xb2, 0x44, 0x25, 0x91, 0x85, 0x83, 0x94, 0x82, - 0x84, 0xaa, 0xfe, 0x5b, 0x12, 0xb3, 0x77, 0xc5, 0xe4, 0x76, 0x32, 0x87, 0x34, 0xe8, 0x5a, 0xd4, - 0xf3, 0x88, 0xcf, 0x22, 0x91, 0x24, 0x7a, 0x46, 0xfa, 0x8d, 0x6e, 0xc1, 0x2a, 0x76, 0x5d, 0xfa, - 0xdc, 0x14, 0x94, 0xb6, 0x59, 0xd2, 0x6e, 0x56, 0x1c, 0xcf, 0x9a, 0x40, 0xfa, 0x4c, 0xe0, 0x18, - 0x79, 0x45, 0xb5, 0xcb, 0xd0, 0x4b, 0xf7, 0x91, 0x27, 0xa3, 0x8c, 0x43, 0x8a, 0xb1, 0x76, 0x16, - 0x06, 0xd9, 0x9d, 0xd0, 0xfe, 0xdd, 0x86, 0xc5, 0x8a, 0xa0, 0x42, 0x4f, 0x00, 0xb8, 0xb7, 0xca, - 0xd0, 0x52, 0xee, 0xfa, 0xbd, 0xd3, 0x07, 0x27, 0xf7, 0x57, 0x09, 0x36, 0xb8, 0xf7, 0xcb, 0x21, - 0xfa, 0x25, 0xf4, 0x85, 0xc7, 0x2a, 0xee, 0xd2, 0x65, 0x3f, 0xfe, 0x1a, 0xdc, 0xb9, 0xae, 0x8a, - 0xbd, 0x88, 0x01, 0x39, 0xd6, 0xfe, 0xd9, 0x82, 0x5e, 0x2a, 0x98, 0x5f, 0xb8, 0xe5, 0x46, 0x89, - 0xbd, 0x8e, 0x94, 0x39, 0xfa, 0x02, 0x76, 0x4f, 0x80, 0xbe, 0x91, 0xae, 0xa4, 0xbd, 0x0f, 0x30, - 0xd6, 0xbf, 0x52, 0x85, 0x56, 0xa5, 0x0a, 0xfa, 0x55, 0x98, 0xe3, 0x96, 0x75, 0x88, 0xbd, 0xcb, - 0x42, 0x27, 0x10, 0x6f, 0x13, 0x12, 0x27, 0x52, 0xb5, 0x61, 0xf2, 0xb9, 0xf5, 0xc7, 0x15, 0x18, - 0x64, 0xef, 0xb4, 0xe8, 0x4b, 0xe8, 0x67, 0xde, 0x60, 0xd0, 0x1b, 0xe5, 0x4d, 0x2b, 0xbf, 0xe9, - 0x68, 0x6f, 0x4e, 0xc0, 0x52, 0xe5, 0xdb, 0x14, 0xf2, 0xe1, 0x5c, 0xe9, 0x21, 0x03, 0x5d, 0x2b, - 0x53, 0xd7, 0x3d, 0x93, 0x68, 0xd7, 0x1b, 0xe1, 0xa6, 0xf2, 0x18, 0x2c, 0x56, 0xbc, 0x4c, 0xa0, - 0xb7, 0x27, 0x70, 0xc9, 0xbd, 0x8e, 0x68, 0xef, 0x34, 0xc4, 0x4e, 0xa5, 0x3e, 0x03, 0x54, 0x7e, - 0xb6, 0x40, 0xd7, 0x27, 0xb2, 0x19, 0x3f, 0x8b, 0x68, 0x6f, 0x37, 0x43, 0xae, 0x55, 0x54, 0x3e, - 0x68, 0x4c, 0x54, 0x34, 0xf7, 0x64, 0x32, 0x51, 0xd1, 0xc2, 0x2b, 0xc9, 0x14, 0x3a, 0x84, 0x85, - 0xe2, 0x63, 0x07, 0xba, 0x5a, 0xf7, 0x38, 0x57, 0x7a, 0x4b, 0xd1, 0xae, 0x35, 0x41, 0x4d, 0x85, - 0x11, 0x38, 0x9b, 0x7f, 0x5c, 0x40, 0x57, 0xca, 0xf4, 0x95, 0xcf, 0x2b, 0xda, 0xc6, 0x64, 0xc4, - 0xac, 0x4e, 0xc5, 0x07, 0x87, 0x2a, 0x9d, 0x6a, 0x5e, 0x33, 0xaa, 0x74, 0xaa, 0x7b, 0xbf, 0xd0, - 0xa7, 0xd0, 0xaf, 0x93, 0x2e, 0x76, 0xa1, 0x11, 0x8f, 0x36, 0xeb, 0xd8, 0x54, 0xbf, 0x04, 0x68, - 0x37, 0x1a, 0xe3, 0x27, 0xb2, 0x6f, 0xb6, 0x78, 0xac, 0x67, 0xfa, 0xf1, 0x55, 0xb1, 0x5e, 0xee, - 0xf0, 0x57, 0xc5, 0x7a, 0x55, 0x53, 0x7f, 0x0a, 0xed, 0xc1, 0x5c, 0xae, 0x43, 0x8f, 0xde, 0xaa, - 0xa3, 0xcc, 0x5f, 0xed, 0xb5, 0x2b, 0x13, 0xf1, 0x52, 0x19, 0x66, 0x92, 0xbd, 0x54, 0xba, 0xaa, - 0x5d, 0x5c, 0x3e, 0x5f, 0xbd, 0x35, 0x09, 0x2d, 0x17, 0xca, 0xa5, 0x3e, 0x7e, 0x65, 0x28, 0xd7, - 0xbd, 0x13, 0x54, 0x86, 0x72, 0xfd, 0xd3, 0xc0, 0x14, 0xfa, 0x39, 0xc0, 0xb8, 0xd7, 0x8e, 0x5e, - 0xaf, 0xa3, 0xce, 0xee, 0xfe, 0x1b, 0x27, 0x23, 0xa5, 0xac, 0x9f, 0xc3, 0x52, 0xd5, 0x15, 0x18, - 0x55, 0x04, 0xfe, 0x09, 0xf7, 0x6c, 0x6d, 0xb3, 0x29, 0x7a, 0x2a, 0xf8, 0x73, 0xe8, 0x26, 0x7d, - 0x72, 0xf4, 0x5a, 0x99, 0xba, 0xf0, 0x32, 0xa0, 0xe9, 0x27, 0xa1, 0x64, 0x1c, 0xd8, 0x4b, 0x62, - 0x75, 0xdc, 0xc0, 0xae, 0x8f, 0xd5, 0x52, 0xab, 0xbd, 0x3e, 0x56, 0xcb, 0xfd, 0x70, 0x21, 0x2e, - 0x75, 0x86, 0x6c, 0xbf, 0xb7, 0xde, 0x19, 0x2a, 0xda, 0xd9, 0xf5, 0xce, 0x50, 0xd9, 0x42, 0x9e, - 0x42, 0xbf, 0x81, 0xe5, 0xea, 0x36, 0x2f, 0xaa, 0x8d, 0xf8, 0x9a, 0x76, 0xb3, 0x76, 0xb3, 0x39, - 0x41, 0x2a, 0xfe, 0x45, 0x92, 0x9f, 0x0a, 0x6d, 0xde, 0xfa, 0xfc, 0x54, 0xdd, 0x6c, 0xd6, 0x6e, - 0x34, 0xc6, 0x2f, 0x87, 0x5e, 0xb6, 0x9f, 0x5a, 0x6f, 0xed, 0x8a, 0xd6, 0x71, 0xbd, 0xb5, 0x2b, - 0x5b, 0xb4, 0x22, 0x3e, 0xaa, 0x7a, 0xa5, 0x55, 0xf1, 0x71, 0x42, 0x33, 0x57, 0xdb, 0x6c, 0x8a, - 0x9e, 0x3b, 0xbe, 0xcb, 0xcd, 0x50, 0x34, 0x71, 0xfd, 0xb9, 0xcc, 0xfc, 0x4e, 0x43, 0xec, 0xfa, - 0xdd, 0x4d, 0x32, 0xf5, 0x44, 0x05, 0x0a, 0x19, 0xfb, 0x46, 0x63, 0xfc, 0x54, 0x76, 0x90, 0xbc, - 0x80, 0x66, 0x1a, 0x99, 0xe8, 0xda, 0x04, 0x3e, 0x99, 0x46, 0xac, 0x76, 0xbd, 0x11, 0x6e, 0x55, - 0xf4, 0x66, 0x5b, 0x8b, 0x27, 0xf9, 0x53, 0xa9, 0x1f, 0x7a, 0x92, 0x3f, 0x55, 0x74, 0x2b, 0xa7, - 0xd0, 0xa7, 0x30, 0x2d, 0xae, 0x38, 0x68, 0xed, 0xe4, 0xbb, 0x8f, 0x76, 0xb9, 0x7a, 0x3e, 0xad, - 0xe0, 0xb9, 0x02, 0x7b, 0x33, 0xe2, 0x47, 0xab, 0xf7, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0x02, - 0xd4, 0xce, 0xae, 0x7f, 0x25, 0x00, 0x00, + // 3280 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3b, 0x4d, 0x6f, 0x1c, 0xc7, + 0xb1, 0x1c, 0x2e, 0x3f, 0x76, 0x6b, 0x77, 0x45, 0xaa, 0x49, 0x53, 0xeb, 0x21, 0x29, 0x51, 0x23, + 0x7f, 0x90, 0xb2, 0x45, 0xca, 0xb4, 0xfd, 0xac, 0x27, 0x3f, 0xfb, 0x59, 0xa2, 0x44, 0x59, 0xb6, + 0x48, 0xd9, 0x43, 0x59, 0x7e, 0x2f, 0x36, 0x32, 0x18, 0xce, 0xf4, 0x92, 0x63, 0xce, 0xce, 0x8c, + 0x66, 0x7a, 0x69, 0xad, 0xe0, 0x9c, 0x1c, 0x20, 0x01, 0x82, 0xe4, 0x10, 0xe4, 0x92, 0x4b, 0x80, + 0x20, 0xf7, 0x5c, 0xf3, 0x17, 0xfc, 0x07, 0x02, 0xe4, 0x94, 0x4b, 0xce, 0x39, 0xe4, 0x10, 0x20, + 0x40, 0x2e, 0x41, 0x7f, 0xcd, 0xce, 0x27, 0x77, 0x18, 0x31, 0x08, 0x72, 0x9b, 0xae, 0xae, 0xae, + 0xea, 0xaa, 0xae, 0xaa, 0xae, 0xae, 0xda, 0x85, 0xb9, 0x63, 0xdf, 0xed, 0xf7, 0xb0, 0x11, 0xe1, + 0xf0, 0x18, 0x87, 0xeb, 0x41, 0xe8, 0x13, 0x1f, 0xcd, 0xa6, 0x80, 0x46, 0xb0, 0xaf, 0x6d, 0x00, + 0xba, 0x6d, 0x12, 0xeb, 0xf0, 0x0e, 0x76, 0x31, 0xc1, 0x3a, 0x7e, 0xd2, 0xc7, 0x11, 0x41, 0x2f, + 0x42, 0xbd, 0xeb, 0xb8, 0xd8, 0x70, 0xec, 0xa8, 0xa3, 0xac, 0xd4, 0x56, 0x1b, 0xfa, 0x34, 0x1d, + 0xdf, 0xb7, 0x23, 0xed, 0x21, 0xcc, 0xa5, 0x16, 0x44, 0x81, 0xef, 0x45, 0x18, 0xdd, 0x80, 0xe9, + 0x10, 0x47, 0x7d, 0x97, 0xf0, 0x05, 0xcd, 0xcd, 0x8b, 0xeb, 0x59, 0x5e, 0xeb, 0xf1, 0x92, 0xbe, + 0x4b, 0x74, 0x89, 0xae, 0x7d, 0xab, 0x40, 0x2b, 0x39, 0x83, 0x2e, 0xc0, 0xb4, 0x60, 0xde, 0x51, + 0x56, 0x94, 0xd5, 0x86, 0x3e, 0xc5, 0x79, 0xa3, 0x05, 0x98, 0x8a, 0x88, 0x49, 0xfa, 0x51, 0x67, + 0x7c, 0x45, 0x59, 0x9d, 0xd4, 0xc5, 0x08, 0xcd, 0xc3, 0x24, 0x0e, 0x43, 0x3f, 0xec, 0xd4, 0x18, + 0x3a, 0x1f, 0x20, 0x04, 0x13, 0x91, 0xf3, 0x0c, 0x77, 0x26, 0x56, 0x94, 0xd5, 0xb6, 0xce, 0xbe, + 0x51, 0x07, 0xa6, 0x8f, 0x71, 0x18, 0x39, 0xbe, 0xd7, 0x99, 0x64, 0x60, 0x39, 0xd4, 0x3e, 0x82, + 0x73, 0xdb, 0x8e, 0x8b, 0xef, 0x61, 0x22, 0x75, 0x50, 0xba, 0x8d, 0x4b, 0xd0, 0x34, 0x2d, 0x0b, + 0x07, 0xc4, 0x38, 0x78, 0xe6, 0x04, 0x6c, 0x2f, 0x75, 0x1d, 0x38, 0xe8, 0xde, 0x33, 0x27, 0xd0, + 0x7e, 0x54, 0x83, 0x99, 0x98, 0x98, 0xd0, 0x0f, 0x82, 0x09, 0xdb, 0x24, 0x26, 0x23, 0xd5, 0xd2, + 0xd9, 0x37, 0x7a, 0x19, 0xce, 0x59, 0xbe, 0x47, 0xb0, 0x47, 0x0c, 0x17, 0x7b, 0x07, 0xe4, 0x90, + 0xd1, 0x6a, 0xeb, 0x6d, 0x01, 0x7d, 0xc0, 0x80, 0xe8, 0x32, 0xb4, 0x24, 0x1a, 0x19, 0x04, 0x58, + 0x48, 0xd9, 0x14, 0xb0, 0x47, 0x83, 0x00, 0xa3, 0x2b, 0xd0, 0x76, 0xcd, 0x88, 0x18, 0x3d, 0xdf, + 0x76, 0xba, 0x0e, 0xb6, 0x99, 0xd0, 0x13, 0x7a, 0x8b, 0x02, 0x77, 0x04, 0x0c, 0xa9, 0xfc, 0x50, + 0x3d, 0xb3, 0x87, 0x99, 0xf4, 0x0d, 0x3d, 0x1e, 0xd3, 0xed, 0x61, 0x62, 0x1e, 0x74, 0xa6, 0x18, + 0x9c, 0x7d, 0xa3, 0x65, 0x00, 0x27, 0x62, 0x32, 0x06, 0xd8, 0xee, 0x4c, 0x33, 0x31, 0x1b, 0x4e, + 0x74, 0x8f, 0x03, 0xd0, 0x87, 0x30, 0x7d, 0x88, 0x4d, 0x1b, 0x87, 0x51, 0xa7, 0xce, 0x4e, 0x7c, + 0x3d, 0x7f, 0xe2, 0x19, 0x2d, 0xac, 0x7f, 0xc8, 0x17, 0xdc, 0xf5, 0x48, 0x38, 0xd0, 0xe5, 0x72, + 0xb4, 0x04, 0x0d, 0x76, 0x64, 0x5b, 0xbe, 0x8d, 0x3b, 0x0d, 0x76, 0xb4, 0x43, 0x80, 0x7a, 0x13, + 0x5a, 0xc9, 0x65, 0x68, 0x16, 0x6a, 0x47, 0x78, 0x20, 0xce, 0x84, 0x7e, 0xd2, 0xf3, 0x3f, 0x36, + 0xdd, 0x3e, 0x66, 0xea, 0x6b, 0xe8, 0x7c, 0x70, 0x73, 0xfc, 0x86, 0xa2, 0x4d, 0xc3, 0xe4, 0xdd, + 0x5e, 0x40, 0x06, 0xda, 0x3b, 0xd0, 0x79, 0x6c, 0x5a, 0xfd, 0x7e, 0xef, 0x31, 0xdb, 0xe2, 0xd6, + 0x21, 0xb6, 0x8e, 0xe4, 0x41, 0x2f, 0x42, 0x43, 0x6c, 0x5c, 0x1c, 0x75, 0x5b, 0xaf, 0x73, 0xc0, + 0x7d, 0x5b, 0xfb, 0x00, 0x5e, 0x2c, 0x58, 0x28, 0x0e, 0xf5, 0x0a, 0xb4, 0x0f, 0xcc, 0x70, 0xdf, + 0x3c, 0xc0, 0x46, 0x68, 0x12, 0xc7, 0x67, 0xab, 0x15, 0xbd, 0x25, 0x80, 0x3a, 0x85, 0x69, 0x5f, + 0x80, 0x9a, 0xa2, 0xe0, 0xf7, 0x02, 0xd3, 0x22, 0x55, 0x98, 0xa3, 0x15, 0x68, 0x06, 0x21, 0x36, + 0x5d, 0xd7, 0xb7, 0x4c, 0xc2, 0xc5, 0xab, 0xe9, 0x49, 0x90, 0xb6, 0x0c, 0x8b, 0x85, 0xc4, 0xf9, + 0x06, 0xb5, 0x1b, 0x99, 0xdd, 0xfb, 0xbd, 0x9e, 0x53, 0x89, 0xb5, 0xb6, 0x94, 0xdb, 0x35, 0x5b, + 0x29, 0xe8, 0xfe, 0x77, 0x66, 0xd6, 0xc5, 0xa6, 0xd7, 0x0f, 0x2a, 0x11, 0xce, 0xee, 0x58, 0x2e, + 0x8d, 0x29, 0x5f, 0xe0, 0xc1, 0x60, 0xcb, 0x77, 0x5d, 0x6c, 0x11, 0xc7, 0xf7, 0x24, 0xd9, 0x8b, + 0x00, 0x56, 0x0c, 0x14, 0xe7, 0x9f, 0x80, 0x68, 0x2a, 0x74, 0xf2, 0x4b, 0x05, 0xd9, 0x3f, 0x2a, + 0xf0, 0xc2, 0x2d, 0xa1, 0x34, 0xce, 0xb8, 0xd2, 0x01, 0xa4, 0x59, 0x8e, 0x67, 0x59, 0x66, 0x0f, + 0xa8, 0x96, 0x3b, 0x20, 0x8a, 0x11, 0xe2, 0xc0, 0x75, 0x2c, 0x93, 0x91, 0x98, 0xe0, 0xbe, 0x9b, + 0x00, 0x51, 0x7b, 0x26, 0xc4, 0x15, 0x1e, 0x49, 0x3f, 0xd1, 0x26, 0x2c, 0xf4, 0x70, 0xcf, 0x0f, + 0x07, 0x46, 0xcf, 0x0c, 0x8c, 0x9e, 0xf9, 0xd4, 0xa0, 0xc1, 0xcb, 0xe8, 0xed, 0x33, 0xf7, 0x6c, + 0xeb, 0x88, 0xcf, 0xee, 0x98, 0xc1, 0x8e, 0xf9, 0x74, 0xcf, 0x79, 0x86, 0x77, 0xf6, 0xb5, 0x0e, + 0x2c, 0x64, 0xe5, 0x13, 0xa2, 0xff, 0x17, 0x5c, 0xe0, 0x90, 0xbd, 0x81, 0x67, 0xed, 0xb1, 0x88, + 0x59, 0xe9, 0xa0, 0xfe, 0xae, 0x40, 0x27, 0xbf, 0x50, 0x58, 0xfe, 0xf3, 0x6a, 0xed, 0xd4, 0x3a, + 0xb9, 0x04, 0x4d, 0x62, 0x3a, 0xae, 0xe1, 0x77, 0xbb, 0x11, 0x26, 0x4c, 0x11, 0x13, 0x3a, 0x50, + 0xd0, 0x43, 0x06, 0x41, 0x6b, 0x30, 0x6b, 0x71, 0xeb, 0x37, 0x42, 0x7c, 0xec, 0xb0, 0x18, 0x3f, + 0xcd, 0x36, 0x36, 0x63, 0x49, 0xaf, 0xe0, 0x60, 0xa4, 0x41, 0xdb, 0xb1, 0x9f, 0x1a, 0x2c, 0xba, + 0xb3, 0x2b, 0xa2, 0xce, 0xa8, 0x35, 0x1d, 0xfb, 0x29, 0x0d, 0x58, 0x54, 0xa3, 0xda, 0x63, 0x58, + 0xe2, 0xc2, 0xdf, 0xf7, 0xac, 0x10, 0xf7, 0xb0, 0x47, 0x4c, 0x77, 0xcb, 0x0f, 0x06, 0x95, 0xcc, + 0xe6, 0x45, 0xa8, 0x47, 0x8e, 0x67, 0x61, 0xc3, 0xe3, 0x57, 0xd5, 0x84, 0x3e, 0xcd, 0xc6, 0xbb, + 0x91, 0x76, 0x1b, 0x96, 0x4b, 0xe8, 0x0a, 0xcd, 0x5e, 0x86, 0x16, 0xdb, 0x98, 0x08, 0xef, 0xe2, + 0xc2, 0x68, 0x52, 0xd8, 0x16, 0x07, 0x69, 0x6f, 0x00, 0xe2, 0x34, 0x76, 0xfc, 0xbe, 0x57, 0xcd, + 0x9d, 0x5f, 0x80, 0xb9, 0xd4, 0x12, 0x61, 0x1b, 0x6f, 0xc2, 0x3c, 0x07, 0x7f, 0xe6, 0xf5, 0x2a, + 0xd3, 0xba, 0x00, 0x2f, 0x64, 0x16, 0x09, 0x6a, 0x9b, 0x92, 0x49, 0x3a, 0x99, 0x38, 0x91, 0xd8, + 0x82, 0xdc, 0x41, 0x3a, 0x9f, 0x60, 0x91, 0x8b, 0x6f, 0xd8, 0x0c, 0x8f, 0x74, 0x6c, 0xda, 0xbe, + 0xe7, 0x0e, 0x2a, 0x47, 0xae, 0x82, 0x95, 0x82, 0xee, 0xe7, 0xb0, 0x20, 0x23, 0x9a, 0xd7, 0x75, + 0x0e, 0xfa, 0x21, 0xae, 0x1a, 0x89, 0x93, 0x26, 0x3b, 0x9e, 0x33, 0x59, 0x6d, 0x43, 0xba, 0x59, + 0x82, 0xb0, 0x38, 0xd2, 0x38, 0x3f, 0x51, 0x12, 0xf9, 0x89, 0xf6, 0x5b, 0x05, 0xce, 0xcb, 0x15, + 0x15, 0xed, 0xea, 0x94, 0x8e, 0x55, 0x2b, 0x75, 0xac, 0x89, 0xa1, 0x63, 0xad, 0xc2, 0x6c, 0xe4, + 0xf7, 0x43, 0x0b, 0x1b, 0x34, 0x27, 0x31, 0x3c, 0x7a, 0x07, 0x73, 0xbf, 0x3b, 0xc7, 0xe1, 0x77, + 0x4c, 0x62, 0xee, 0xfa, 0x36, 0xd6, 0xfe, 0x57, 0x9a, 0x5d, 0xca, 0x5e, 0xd7, 0xe0, 0x3c, 0x4b, + 0x3d, 0xcc, 0x20, 0xc0, 0x9e, 0x6d, 0x98, 0x84, 0x1a, 0xbd, 0xc2, 0x8c, 0xfe, 0x1c, 0x9d, 0xb8, + 0xc5, 0xe0, 0xb7, 0xc8, 0x6e, 0xa4, 0xfd, 0x62, 0x1c, 0x66, 0xe8, 0x5a, 0xea, 0x64, 0x95, 0xe4, + 0x9d, 0x85, 0x1a, 0x7e, 0x4a, 0x84, 0xa0, 0xf4, 0x13, 0x6d, 0xc0, 0x9c, 0xf0, 0x66, 0xc7, 0xf7, + 0x86, 0x8e, 0x5e, 0xe3, 0x71, 0x71, 0x38, 0x15, 0xfb, 0xfa, 0x25, 0x68, 0x46, 0xc4, 0x0f, 0x64, + 0xdc, 0xe0, 0x79, 0x11, 0x50, 0x90, 0x88, 0x1b, 0x69, 0x9d, 0x4e, 0x16, 0xe8, 0xb4, 0xe5, 0x44, + 0x06, 0xb6, 0x0c, 0xbe, 0x2b, 0x16, 0x79, 0xea, 0x3a, 0x38, 0xd1, 0x5d, 0x8b, 0x6b, 0x03, 0xbd, + 0x0f, 0x4b, 0xce, 0x81, 0xe7, 0x87, 0xd8, 0x10, 0x8a, 0x64, 0xfe, 0xeb, 0xf9, 0xc4, 0xe8, 0xfa, + 0x7d, 0x4f, 0x66, 0x4e, 0x1d, 0x8e, 0xb3, 0xc7, 0x50, 0xa8, 0x06, 0x76, 0x7d, 0xb2, 0x4d, 0xe7, + 0xb5, 0xb7, 0x61, 0x76, 0xa8, 0x95, 0xea, 0x51, 0xe0, 0x5b, 0x45, 0x5a, 0xdc, 0x23, 0xd3, 0x71, + 0xf7, 0xb0, 0x67, 0xe3, 0xf0, 0x39, 0xa3, 0x13, 0xba, 0x0e, 0xf3, 0x8e, 0xed, 0x62, 0x83, 0x38, + 0x3d, 0xec, 0xf7, 0x89, 0x11, 0x61, 0xcb, 0xf7, 0xec, 0x48, 0xea, 0x97, 0xce, 0x3d, 0xe2, 0x53, + 0x7b, 0x7c, 0x46, 0xfb, 0x61, 0x7c, 0x4b, 0x24, 0x77, 0x31, 0xcc, 0x8f, 0x3c, 0x8c, 0x29, 0x41, + 0x9e, 0xea, 0x09, 0x31, 0x5a, 0x1c, 0xc8, 0xb3, 0x3a, 0x7a, 0x42, 0x02, 0x69, 0xdf, 0xb7, 0x07, + 0x6c, 0x47, 0x2d, 0x1d, 0x38, 0xe8, 0xb6, 0x6f, 0x0f, 0x58, 0xb8, 0x8e, 0x0c, 0x66, 0x64, 0xd6, + 0x61, 0xdf, 0x3b, 0x62, 0xbb, 0xa9, 0xeb, 0x4d, 0x27, 0x7a, 0x60, 0x46, 0x64, 0x8b, 0x82, 0xb4, + 0xdf, 0x29, 0x32, 0x5e, 0xd0, 0x6d, 0xe8, 0xd8, 0xc2, 0xce, 0xf1, 0xbf, 0x41, 0x1d, 0x74, 0x85, + 0x30, 0x82, 0x54, 0x2e, 0x2c, 0x1c, 0x0e, 0xf1, 0x39, 0x71, 0xab, 0xb2, 0x99, 0x61, 0xb8, 0x4a, + 0x6f, 0x5c, 0x84, 0xab, 0x2f, 0xe5, 0x75, 0x71, 0xd7, 0xda, 0x3b, 0x34, 0x43, 0x3b, 0xba, 0x87, + 0x3d, 0x1c, 0x9a, 0xe4, 0x4c, 0xd2, 0x17, 0x6d, 0x05, 0x2e, 0x96, 0x51, 0x17, 0xfc, 0xbf, 0x90, + 0xd7, 0xa0, 0xc4, 0xd0, 0xf1, 0x7e, 0xdf, 0x71, 0xed, 0x33, 0x61, 0xff, 0x71, 0x56, 0xb8, 0x98, + 0xb8, 0xb0, 0x9f, 0xab, 0x70, 0x3e, 0x64, 0x20, 0x62, 0x44, 0x14, 0x21, 0x7e, 0x8f, 0xb6, 0xf5, + 0x19, 0x31, 0xc1, 0x16, 0xd2, 0x77, 0xe9, 0x4f, 0xc6, 0xa5, 0x05, 0x48, 0x6a, 0x67, 0x16, 0x56, + 0x17, 0xa1, 0x31, 0x64, 0x5f, 0x63, 0xec, 0xeb, 0x91, 0xe0, 0x4b, 0xad, 0xd3, 0xf2, 0x83, 0x81, + 0x81, 0x2d, 0x9e, 0x51, 0xb0, 0xa3, 0xae, 0xd3, 0xe7, 0x59, 0x30, 0xb8, 0x6b, 0xb1, 0x84, 0xa2, + 0x7a, 0x8c, 0x4d, 0x50, 0xfb, 0x8a, 0x53, 0x9b, 0x4a, 0x52, 0xfb, 0x8a, 0x51, 0x93, 0x38, 0xc7, + 0x4e, 0x97, 0xe3, 0x4c, 0x0f, 0x71, 0x1e, 0x3b, 0x5d, 0x8a, 0x33, 0xb4, 0xaa, 0xb4, 0x32, 0xc4, + 0xa9, 0x7e, 0x0d, 0x8b, 0xe9, 0xd9, 0xea, 0x17, 0xf6, 0x73, 0x29, 0x4b, 0xbb, 0x98, 0x35, 0xa7, + 0xcc, 0xad, 0x7f, 0x9c, 0xdd, 0x76, 0xe5, 0x0c, 0xe7, 0xf9, 0xf6, 0xb5, 0x9c, 0x55, 0x48, 0x3a, + 0x4d, 0xfa, 0xbf, 0xec, 0xb6, 0x4f, 0x91, 0x2e, 0x9d, 0xcc, 0xf8, 0x52, 0xd6, 0x05, 0xb2, 0x39, + 0xd5, 0x2f, 0xe3, 0xf8, 0x2a, 0x30, 0x68, 0x46, 0x53, 0x39, 0xae, 0x09, 0xbe, 0xa2, 0xae, 0x30, + 0x2d, 0xd8, 0xa2, 0x05, 0x98, 0x12, 0xf7, 0x21, 0x7f, 0xb1, 0x88, 0x51, 0xaa, 0x64, 0x52, 0x13, + 0x25, 0x13, 0x59, 0x0a, 0xa2, 0x6f, 0xee, 0x49, 0x1e, 0x1e, 0xe9, 0xf8, 0x63, 0x3c, 0xd0, 0x76, + 0x33, 0x1e, 0xc7, 0xb7, 0x76, 0x42, 0xc1, 0x83, 0x57, 0x14, 0x6c, 0x76, 0xe6, 0xb6, 0x28, 0x9c, + 0x34, 0x1c, 0x61, 0x04, 0xb6, 0xf6, 0x53, 0x65, 0x48, 0xf0, 0xb6, 0xeb, 0xef, 0x9f, 0xa1, 0x55, + 0x26, 0xa5, 0xa8, 0xa5, 0xa4, 0x48, 0xd6, 0x84, 0x26, 0xd2, 0x35, 0xa1, 0x84, 0x13, 0x25, 0xb7, + 0x53, 0x16, 0x9a, 0x1f, 0xf9, 0x67, 0xf7, 0xb2, 0xcc, 0x87, 0xe6, 0x21, 0x75, 0xc1, 0xff, 0x26, + 0x2c, 0x52, 0x85, 0x73, 0x28, 0x7b, 0xb7, 0x54, 0x7f, 0xdb, 0xfd, 0x79, 0x1c, 0x96, 0x8a, 0x17, + 0x57, 0x79, 0xdf, 0xbd, 0x0b, 0x6a, 0xfc, 0x7e, 0xa2, 0x57, 0x63, 0x44, 0xcc, 0x5e, 0x10, 0x5f, + 0x8e, 0xfc, 0x0e, 0xbd, 0x20, 0x1e, 0x53, 0x8f, 0xe4, 0xbc, 0xbc, 0x21, 0x73, 0x8f, 0xaf, 0x5a, + 0xee, 0xf1, 0x45, 0x19, 0xd8, 0x26, 0x29, 0x63, 0xc0, 0x73, 0xb8, 0x0b, 0xb6, 0x49, 0xca, 0x18, + 0xc4, 0x8b, 0x19, 0x03, 0x6e, 0xb5, 0x4d, 0x81, 0xcf, 0x18, 0x2c, 0x03, 0x88, 0xf4, 0xaa, 0xef, + 0xc9, 0xc7, 0x64, 0x83, 0x27, 0x57, 0x7d, 0xaf, 0x34, 0xcb, 0x9c, 0x2e, 0xcd, 0x32, 0xd3, 0xa7, + 0x59, 0xcf, 0x9d, 0xe6, 0xaf, 0x14, 0x80, 0x3b, 0x4e, 0x74, 0xc4, 0xb5, 0x4c, 0xf3, 0x5a, 0xdb, + 0x91, 0xcf, 0x01, 0xfa, 0x49, 0x21, 0xa6, 0xeb, 0x0a, 0xdd, 0xd1, 0x4f, 0xea, 0x3f, 0xfd, 0x08, + 0xdb, 0x42, 0x3d, 0xec, 0x9b, 0xc2, 0xba, 0x21, 0xc6, 0x42, 0x03, 0xec, 0x9b, 0x66, 0x8a, 0x01, + 0x0e, 0x2d, 0xec, 0x11, 0x83, 0xcd, 0x51, 0x69, 0xc7, 0xf5, 0xa6, 0x80, 0x6d, 0x67, 0x50, 0x18, + 0xc9, 0xa9, 0x14, 0xca, 0x67, 0x11, 0xb6, 0xb5, 0xdf, 0x28, 0xd0, 0xd8, 0xc1, 0x3d, 0xb1, 0xbf, + 0x8b, 0x00, 0x07, 0x7e, 0xe8, 0xf7, 0x89, 0xe3, 0x61, 0x9e, 0xcc, 0x4f, 0xea, 0x09, 0xc8, 0x73, + 0xec, 0x96, 0x46, 0x18, 0xec, 0x76, 0xc5, 0x99, 0xb0, 0x6f, 0x0a, 0x3b, 0xc4, 0x66, 0x20, 0x8e, + 0x81, 0x7d, 0xd3, 0x27, 0x53, 0x44, 0x4c, 0xeb, 0x88, 0xe9, 0x7c, 0x42, 0xe7, 0x03, 0xed, 0x0f, + 0x0a, 0x80, 0x8e, 0x7b, 0x3e, 0x61, 0x26, 0x4b, 0xe5, 0xda, 0x37, 0xad, 0x23, 0xfa, 0xec, 0x60, + 0x85, 0x51, 0xae, 0xcf, 0xa6, 0x80, 0xb1, 0xc2, 0xe8, 0x32, 0x80, 0x44, 0x11, 0x61, 0xb0, 0xa1, + 0x37, 0x04, 0x84, 0x3f, 0x30, 0x64, 0x44, 0x10, 0xb5, 0xc4, 0x61, 0x68, 0xe4, 0xdb, 0x96, 0xa1, + 0x71, 0x11, 0x1a, 0x59, 0x8b, 0x62, 0x11, 0x85, 0x99, 0xd3, 0x15, 0x68, 0xcb, 0xca, 0x2b, 0xb3, + 0x57, 0x21, 0x4a, 0x4b, 0x02, 0xa9, 0x8d, 0xb2, 0x2a, 0xe7, 0x53, 0x82, 0xbd, 0xd8, 0x94, 0x1a, + 0xfa, 0x10, 0xa0, 0x7d, 0x03, 0x20, 0xeb, 0x02, 0x5d, 0x1f, 0x6d, 0xc2, 0x24, 0x25, 0x2e, 0x6b, + 0xe9, 0x4b, 0xf9, 0xca, 0xea, 0x50, 0x0d, 0x3a, 0x47, 0x4d, 0xc6, 0xb1, 0xf1, 0x54, 0x1c, 0x1b, + 0xfd, 0x2c, 0xd4, 0xbe, 0x53, 0x60, 0x45, 0x64, 0xa1, 0x0e, 0x0e, 0x77, 0xfc, 0x63, 0x9a, 0x91, + 0x3c, 0xf2, 0x39, 0x93, 0x33, 0x09, 0xc0, 0x37, 0xa0, 0x63, 0xe3, 0x88, 0x38, 0x1e, 0x63, 0x68, + 0xc8, 0x43, 0x61, 0xc5, 0x68, 0xbe, 0xa1, 0x85, 0xc4, 0xfc, 0x6d, 0x3e, 0xbd, 0x6b, 0xf6, 0x30, + 0xba, 0x06, 0x73, 0x47, 0x18, 0x07, 0x86, 0xeb, 0x5b, 0xa6, 0x6b, 0x48, 0xd7, 0x16, 0x69, 0xd6, + 0x2c, 0x9d, 0x7a, 0x40, 0x67, 0xee, 0x70, 0xf7, 0xd6, 0x22, 0xb8, 0x7c, 0x82, 0x24, 0x22, 0xbc, + 0x2d, 0x41, 0x23, 0x08, 0x7d, 0x0b, 0x47, 0xd4, 0x66, 0x15, 0x76, 0xdb, 0x0d, 0x01, 0xe8, 0x3a, + 0xcc, 0xc5, 0x83, 0x4f, 0xb8, 0x93, 0x98, 0x07, 0xbc, 0xfc, 0x3a, 0xae, 0x17, 0x4d, 0x69, 0x3f, + 0x57, 0x40, 0xcb, 0x71, 0xdd, 0x0e, 0xfd, 0xde, 0x19, 0x6a, 0x70, 0x03, 0xe6, 0x99, 0x1e, 0x42, + 0x46, 0x72, 0xa8, 0x08, 0xfe, 0x1a, 0x3a, 0x4f, 0xe7, 0x38, 0x37, 0xa9, 0x89, 0x3e, 0x5c, 0x39, + 0x71, 0x4f, 0xff, 0x22, 0x5d, 0x2c, 0xca, 0x4b, 0x9c, 0x3f, 0x70, 0x52, 0xb7, 0x92, 0xf6, 0x6b, + 0x45, 0xde, 0xa9, 0xe9, 0x59, 0xb1, 0x97, 0x5b, 0xd0, 0xb6, 0x9d, 0xe8, 0xc8, 0xe0, 0x8d, 0x9d, + 0x93, 0xec, 0x7f, 0x18, 0x4d, 0xf5, 0x96, 0x1d, 0x7f, 0xe3, 0x08, 0x7d, 0x00, 0x6d, 0x51, 0x3c, + 0x4d, 0xf4, 0x8a, 0x9a, 0x9b, 0x8b, 0x79, 0x12, 0x71, 0xbc, 0xd3, 0x5b, 0x7c, 0x05, 0x1f, 0x69, + 0x7f, 0x6b, 0x41, 0xeb, 0xd3, 0x3e, 0x0e, 0x07, 0x89, 0xc2, 0x73, 0x84, 0xc5, 0x31, 0xc8, 0x7e, + 0x58, 0x02, 0x42, 0x6f, 0x9c, 0x6e, 0xe8, 0xf7, 0x8c, 0xb8, 0x65, 0x36, 0xce, 0x50, 0x9a, 0x14, + 0xb8, 0xcd, 0xdb, 0x66, 0xe8, 0x3d, 0x98, 0xea, 0x3a, 0x2e, 0xc1, 0xbc, 0x49, 0xd5, 0xdc, 0x7c, + 0x39, 0xbf, 0x9f, 0x24, 0xcf, 0xf5, 0x6d, 0x86, 0xac, 0x8b, 0x45, 0x68, 0x1f, 0xe6, 0x1c, 0x2f, + 0x60, 0x4f, 0xd0, 0xd0, 0x31, 0x5d, 0xe7, 0xd9, 0xb0, 0x74, 0xda, 0xdc, 0x7c, 0x63, 0x04, 0xad, + 0xfb, 0x74, 0xe5, 0x5e, 0x72, 0xa1, 0x8e, 0x9c, 0x1c, 0x0c, 0x61, 0x98, 0xf7, 0xfb, 0x24, 0xcf, + 0x64, 0x92, 0x31, 0xd9, 0x1c, 0xc1, 0xe4, 0x21, 0x5b, 0x9a, 0xe6, 0x32, 0xe7, 0xe7, 0x81, 0xea, + 0x2e, 0x4c, 0x71, 0xe1, 0x68, 0x90, 0xef, 0x3a, 0xd8, 0x95, 0xfd, 0x35, 0x3e, 0xa0, 0x71, 0xcc, + 0x0f, 0x70, 0x68, 0x7a, 0x32, 0x5e, 0xcb, 0xe1, 0xb0, 0xcf, 0x53, 0x4b, 0xf4, 0x79, 0xd4, 0xdf, + 0x4f, 0x02, 0xca, 0x4b, 0x28, 0xeb, 0xc1, 0x21, 0x8e, 0x68, 0x0c, 0x4c, 0x5e, 0x10, 0x33, 0x09, + 0x38, 0xbb, 0x24, 0x3e, 0x87, 0x86, 0x15, 0x1d, 0x1b, 0x4c, 0x25, 0xc2, 0x5c, 0x6e, 0x9e, 0x5a, + 0xa5, 0xeb, 0x5b, 0x7b, 0x8f, 0x19, 0x54, 0xaf, 0x5b, 0xd1, 0x31, 0xfb, 0x42, 0xdf, 0x03, 0xf8, + 0x2a, 0xf2, 0x3d, 0x41, 0x99, 0x1f, 0xfc, 0xbb, 0xa7, 0xa7, 0xfc, 0xd1, 0xde, 0xc3, 0x5d, 0x4e, + 0xba, 0x41, 0xc9, 0x71, 0xda, 0x16, 0xb4, 0x03, 0x33, 0x7c, 0xd2, 0xc7, 0x44, 0x90, 0xe7, 0xb6, + 0xf0, 0xfe, 0xe9, 0xc9, 0x7f, 0xc2, 0xc9, 0x70, 0x0e, 0xad, 0x20, 0x31, 0x52, 0xbf, 0x1b, 0x87, + 0xba, 0x94, 0x8b, 0xbe, 0x62, 0x99, 0x85, 0xf3, 0x5a, 0x8e, 0xe1, 0x78, 0x5d, 0x5f, 0x68, 0xf4, + 0x1c, 0x85, 0xf3, 0x72, 0x0e, 0xbb, 0xbe, 0xd6, 0x60, 0x36, 0xc4, 0x96, 0x1f, 0xda, 0x34, 0xd7, + 0x77, 0x7a, 0x0e, 0x35, 0x7b, 0x7e, 0x96, 0x33, 0x1c, 0x7e, 0x47, 0x82, 0xd1, 0xab, 0x30, 0xc3, + 0x8e, 0x3d, 0x81, 0x59, 0x93, 0x34, 0xb1, 0x9b, 0x40, 0x5c, 0x83, 0xd9, 0x27, 0x7d, 0x1a, 0xf8, + 0xac, 0x43, 0x33, 0x34, 0x2d, 0xe2, 0xc7, 0x55, 0x95, 0x19, 0x06, 0xdf, 0x8a, 0xc1, 0xe8, 0x2d, + 0x58, 0xe0, 0xa8, 0x38, 0xb2, 0xcc, 0x20, 0x5e, 0x81, 0x43, 0xf1, 0xe8, 0x9e, 0x67, 0xb3, 0x77, + 0xd9, 0xe4, 0x96, 0x9c, 0x43, 0x2a, 0xd4, 0x2d, 0xbf, 0xd7, 0xc3, 0x1e, 0x89, 0x44, 0x1b, 0x34, + 0x1e, 0xa3, 0x5b, 0xb0, 0x6c, 0xba, 0xae, 0xff, 0xb5, 0xc1, 0x56, 0xda, 0x46, 0x4e, 0x3a, 0xfe, + 0x04, 0x57, 0x19, 0xd2, 0xa7, 0x0c, 0x47, 0x4f, 0x0b, 0xaa, 0x5e, 0x82, 0x46, 0x7c, 0x8e, 0x34, + 0xe5, 0x49, 0x18, 0x24, 0xfb, 0x56, 0xcf, 0x41, 0x2b, 0x79, 0x12, 0xea, 0x5f, 0x6a, 0x30, 0x57, + 0xe0, 0x54, 0xe8, 0x0b, 0x00, 0x6a, 0xad, 0xdc, 0xb5, 0x84, 0xb9, 0xfe, 0xcf, 0xe9, 0x9d, 0x93, + 0xda, 0x2b, 0x07, 0xeb, 0xd4, 0xfa, 0xf9, 0x27, 0xfa, 0x3e, 0x34, 0x99, 0xc5, 0x0a, 0xea, 0xdc, + 0x64, 0xdf, 0xfb, 0x27, 0xa8, 0x53, 0x59, 0x05, 0x79, 0xe6, 0x03, 0xfc, 0x5b, 0xfd, 0x93, 0x02, + 0x8d, 0x98, 0x31, 0x4d, 0xe0, 0xf8, 0x41, 0xb1, 0xb3, 0x8e, 0x64, 0x02, 0xc7, 0x60, 0xdb, 0x0c, + 0xf4, 0x1f, 0x69, 0x4a, 0xea, 0x3b, 0x00, 0x43, 0xf9, 0x0b, 0x45, 0x50, 0x0a, 0x45, 0xd0, 0xd6, + 0xa0, 0x4d, 0x35, 0xeb, 0x60, 0x7b, 0x8f, 0x84, 0x4e, 0xc0, 0x7e, 0xb0, 0xc0, 0x71, 0x22, 0xf1, + 0x90, 0x96, 0xc3, 0xcd, 0xbf, 0x2e, 0x41, 0x2b, 0x79, 0x93, 0xa2, 0x2f, 0xa1, 0x99, 0xf8, 0x61, + 0x06, 0x7a, 0x29, 0x7f, 0x68, 0xf9, 0x1f, 0x7a, 0xa8, 0x2f, 0x8f, 0xc0, 0x12, 0x6f, 0xcd, 0x31, + 0xa4, 0xc3, 0xb4, 0x68, 0xe6, 0xa3, 0x95, 0x13, 0xfa, 0xfc, 0x9c, 0xea, 0xe5, 0x91, 0xbf, 0x04, + 0xd0, 0xc6, 0xae, 0x2b, 0xc8, 0x83, 0xf3, 0xb9, 0xde, 0x3a, 0xba, 0x9a, 0x5f, 0x5b, 0xd6, 0xb9, + 0x57, 0x5f, 0xab, 0x84, 0x1b, 0xcb, 0x40, 0x60, 0xae, 0xa0, 0x59, 0x8e, 0x5e, 0x1f, 0x41, 0x25, + 0xd5, 0xb0, 0x57, 0xaf, 0x55, 0xc4, 0x8e, 0xb9, 0x3e, 0x01, 0x94, 0xef, 0xa4, 0xa3, 0xd7, 0x46, + 0x92, 0x19, 0x76, 0xea, 0xd5, 0xd7, 0xab, 0x21, 0x97, 0x0a, 0xca, 0x7b, 0xec, 0x23, 0x05, 0x4d, + 0x75, 0xf1, 0x47, 0x0a, 0x9a, 0x69, 0xdc, 0x8f, 0xa1, 0x23, 0x98, 0xcd, 0xf6, 0xdf, 0xd1, 0x5a, + 0xd9, 0xaf, 0x80, 0x72, 0xed, 0x7d, 0xf5, 0x6a, 0x15, 0xd4, 0x98, 0x19, 0x86, 0x73, 0xe9, 0x7e, + 0x37, 0x7a, 0x35, 0xbf, 0xbe, 0xb0, 0xe3, 0xaf, 0xae, 0x8e, 0x46, 0x4c, 0xca, 0x94, 0xed, 0x81, + 0x17, 0xc9, 0x54, 0xd2, 0x60, 0x2f, 0x92, 0xa9, 0xac, 0xa5, 0xae, 0x8d, 0xa1, 0x6f, 0x64, 0x63, + 0x35, 0xd3, 0x1b, 0x46, 0xeb, 0x65, 0x64, 0x8a, 0x9b, 0xd3, 0xea, 0x46, 0x65, 0xfc, 0x84, 0x37, + 0x7e, 0x09, 0xcd, 0x44, 0x8b, 0xb8, 0x28, 0x7e, 0xe4, 0x9b, 0xce, 0x45, 0xf1, 0xa3, 0xa8, 0xcf, + 0x3c, 0x86, 0xf6, 0xa1, 0x9d, 0x6a, 0x1a, 0xa3, 0x57, 0xca, 0x56, 0xa6, 0x6b, 0xab, 0xea, 0xab, + 0x23, 0xf1, 0x62, 0x1e, 0x86, 0x8c, 0x88, 0x22, 0x04, 0x96, 0x6e, 0x2e, 0x1d, 0x03, 0x5f, 0x19, + 0x85, 0x96, 0x72, 0xe5, 0x5c, 0x6b, 0xb9, 0xd0, 0x95, 0xcb, 0x5a, 0xd7, 0x85, 0xae, 0x5c, 0xde, + 0xad, 0x1e, 0x43, 0x87, 0x30, 0x93, 0x69, 0x2b, 0xa3, 0xd5, 0x32, 0x12, 0xd9, 0x96, 0xb6, 0xba, + 0x56, 0x01, 0x33, 0xe6, 0xf4, 0xff, 0xb2, 0x02, 0xc1, 0x4c, 0xee, 0x4a, 0xf9, 0xd2, 0xa1, 0x9d, + 0xbd, 0x74, 0x32, 0x52, 0x4c, 0xfa, 0x6b, 0x98, 0x2f, 0xaa, 0x36, 0xa2, 0x6b, 0x45, 0x75, 0x8d, + 0xd2, 0x92, 0xa6, 0xba, 0x5e, 0x15, 0x3d, 0x66, 0xfc, 0x19, 0xd4, 0x65, 0x6b, 0x15, 0x15, 0x5c, + 0x4a, 0x99, 0x66, 0xb4, 0xaa, 0x9d, 0x84, 0x92, 0x70, 0x95, 0x9e, 0x8c, 0x0a, 0xc3, 0x9e, 0x67, + 0x79, 0x54, 0xc8, 0x75, 0x67, 0xcb, 0xa3, 0x42, 0xbe, 0x85, 0xca, 0xd8, 0xc5, 0x66, 0x97, 0x6c, + 0x11, 0x96, 0x9b, 0x5d, 0x41, 0x07, 0xb4, 0xdc, 0xec, 0x0a, 0xbb, 0x8e, 0x63, 0xe8, 0x07, 0xf2, + 0x67, 0x12, 0xd9, 0xce, 0x20, 0x2a, 0x8d, 0x2d, 0x25, 0x1d, 0x4a, 0xf5, 0x7a, 0xf5, 0x05, 0x31, + 0xfb, 0x67, 0x32, 0x12, 0x66, 0x3a, 0x83, 0xe5, 0x91, 0xb0, 0xb8, 0x3f, 0xa9, 0x6e, 0x54, 0xc6, + 0xcf, 0x3b, 0x79, 0xb2, 0x75, 0x56, 0xae, 0xed, 0x82, 0x6e, 0x63, 0xb9, 0xb6, 0x0b, 0xbb, 0x71, + 0xcc, 0x3f, 0x8a, 0xda, 0x62, 0x45, 0xfe, 0x71, 0x42, 0xdf, 0x4e, 0x5d, 0xaf, 0x8a, 0x9e, 0x4a, + 0x14, 0xf2, 0x7d, 0x2f, 0x34, 0x72, 0xff, 0xa9, 0x3b, 0xe0, 0x5a, 0x45, 0xec, 0xf2, 0xd3, 0x95, + 0x77, 0xc2, 0x48, 0x01, 0x32, 0x77, 0xc3, 0x46, 0x65, 0xfc, 0x98, 0x77, 0x20, 0x7f, 0x74, 0x93, + 0xe8, 0x59, 0xa1, 0xab, 0x23, 0xe8, 0x24, 0x7a, 0x6e, 0xea, 0x6b, 0x95, 0x70, 0x8b, 0xbc, 0x37, + 0xd9, 0x45, 0x3a, 0xc9, 0x9e, 0x72, 0xad, 0xaf, 0x93, 0xec, 0xa9, 0xa0, 0x31, 0x55, 0xe0, 0xbd, + 0xb2, 0x79, 0x34, 0xda, 0x7b, 0x33, 0x4d, 0xac, 0xd1, 0xde, 0x9b, 0xeb, 0x4b, 0x8d, 0xa1, 0x1f, + 0x0f, 0x7f, 0x8c, 0x91, 0xaf, 0xc1, 0xa2, 0xcd, 0xd2, 0x50, 0x54, 0x5a, 0x7a, 0x56, 0xdf, 0x3c, + 0xd5, 0x9a, 0x84, 0xf2, 0x7f, 0xa6, 0xc8, 0xce, 0x6e, 0x61, 0x11, 0x14, 0xbd, 0x55, 0x81, 0x70, + 0xae, 0x8e, 0xab, 0xbe, 0x7d, 0xca, 0x55, 0x45, 0xd6, 0x90, 0xac, 0x7f, 0x96, 0x5b, 0x43, 0x41, + 0x0d, 0xb5, 0xdc, 0x1a, 0x8a, 0x4a, 0xaa, 0xda, 0x18, 0x7a, 0x00, 0x93, 0xec, 0xb9, 0x8e, 0x2e, + 0x9e, 0xfc, 0x8e, 0x57, 0x2f, 0x15, 0xcf, 0xc7, 0xaf, 0x51, 0x2a, 0xc0, 0xfe, 0x14, 0xfb, 0x27, + 0xc1, 0x9b, 0xff, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x4b, 0x03, 0xaa, 0x60, 0x30, 0x00, 0x00, } diff --git a/weed/pb/volume_server_pb/volume_server_helper.go b/weed/pb/volume_server_pb/volume_server_helper.go new file mode 100644 index 000000000..356be27ff --- /dev/null +++ b/weed/pb/volume_server_pb/volume_server_helper.go @@ -0,0 +1,5 @@ +package volume_server_pb + +func (m *RemoteFile) BackendName() string { + return m.BackendType + "." + m.BackendId +} diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 7353cdc91..a91c2ddd3 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -18,10 +18,10 @@ type Replicator struct { source *source.FilerSource } -func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSink) *Replicator { +func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator { source := &source.FilerSource{} - source.Initialize(sourceConfig) + source.Initialize(sourceConfig, configPrefix) dataSink.SetSourceFiler(source) @@ -41,28 +41,28 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p key = newKey if message.OldEntry != nil && message.NewEntry == nil { glog.V(4).Infof("deleting %v", key) - return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks) + return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks) } if message.OldEntry == nil && message.NewEntry != nil { glog.V(4).Infof("creating %v", key) - return r.sink.CreateEntry(ctx, key, message.NewEntry) + return r.sink.CreateEntry(key, message.NewEntry) } if message.OldEntry == nil && message.NewEntry == nil { glog.V(0).Infof("weird message %+v", message) return nil } - foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) + foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) if foundExisting { glog.V(4).Infof("updated %v", key) return err } - err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false) + err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false) if err != nil { return fmt.Errorf("delete old entry %v: %v", key, err) } glog.V(4).Infof("creating missing %v", key) - return r.sink.CreateEntry(ctx, key, message.NewEntry) + return r.sink.CreateEntry(key, message.NewEntry) } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 6381908a1..89e04922f 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -35,12 +35,12 @@ func (g *AzureSink) GetSinkToDirectory() string { return g.dir } -func (g *AzureSink) Initialize(configuration util.Configuration) error { +func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("account_name"), - configuration.GetString("account_key"), - configuration.GetString("container"), - configuration.GetString("directory"), + configuration.GetString(prefix+"account_name"), + configuration.GetString(prefix+"account_key"), + configuration.GetString(prefix+"container"), + configuration.GetString(prefix+"directory"), ) } @@ -70,7 +70,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e return nil } -func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -78,7 +78,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de key = key + "/" } - if _, err := g.containerURL.NewBlobURL(key).Delete(ctx, + if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err) } @@ -87,7 +87,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de } -func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -102,21 +102,21 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb // Azure Storage account's container. appendBlobURL := g.containerURL.NewAppendBlobURL(key) - _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) + _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) if err != nil { return err } for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) if err != nil { return err } var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) + readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { + _, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) }) if readErr != nil { @@ -132,7 +132,7 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb } -func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 35c2230fa..df0653f73 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -31,12 +31,12 @@ func (g *B2Sink) GetSinkToDirectory() string { return g.dir } -func (g *B2Sink) Initialize(configuration util.Configuration) error { +func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("b2_account_id"), - configuration.GetString("b2_master_application_key"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"b2_account_id"), + configuration.GetString(prefix+"b2_master_application_key"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -45,8 +45,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) { } func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { - ctx := context.Background() - client, err := b2.NewClient(ctx, accountId, accountKey) + client, err := b2.NewClient(context.Background(), accountId, accountKey) if err != nil { return err } @@ -58,7 +57,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { return nil } -func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -66,18 +65,18 @@ func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, delet key = key + "/" } - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - return targetObject.Delete(ctx) + return targetObject.Delete(context.Background()) } -func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -88,23 +87,23 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - writer := targetObject.NewWriter(ctx) + writer := targetObject.NewWriter(context.Background()) for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) if err != nil { return err } var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { + readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { _, err := writer.Write(data) if err != nil { writeErr = err @@ -124,7 +123,7 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En } -func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 97e9671a3..07218b9b3 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -3,18 +3,19 @@ package filersink import ( "context" "fmt" - "google.golang.org/grpc" "strings" "sync" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { +func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) { if len(sourceChunks) == 0 { return } @@ -23,7 +24,7 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_ wg.Add(1) go func(chunk *filer_pb.FileChunk) { defer wg.Done() - replicatedChunk, e := fs.replicateOneChunk(ctx, chunk) + replicatedChunk, e := fs.replicateOneChunk(chunk, dir) if e != nil { err = e } @@ -35,9 +36,9 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_ return } -func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { +func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) { - fileId, err := fs.fetchAndWrite(ctx, sourceChunk) + fileId, err := fs.fetchAndWrite(sourceChunk, dir) if err != nil { return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err) } @@ -49,12 +50,14 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p Mtime: sourceChunk.Mtime, ETag: sourceChunk.ETag, SourceFileId: sourceChunk.GetFileIdString(), + CipherKey: sourceChunk.CipherKey, + IsGzipped: sourceChunk.IsGzipped, }, nil } -func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) { +func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) { - filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString()) + filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString()) if err != nil { return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err) } @@ -63,7 +66,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi var host string var auth security.EncodedJwt - if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -71,13 +74,17 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi Collection: fs.collection, TtlSec: fs.ttlSec, DataCenter: fs.dataCenter, + ParentPath: dir, } - resp, err := client.AssignVolume(ctx, request) + resp, err := client.AssignVolume(context.Background(), request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) @@ -90,8 +97,8 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) - uploadResult, err := operation.Upload(fileUrl, filename, readCloser, - "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) + // fetch data as is, regardless whether it is encrypted or not + uploadResult, err := operation.Upload(fileUrl, filename, false, readCloser, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) return "", fmt.Errorf("upload data: %v", err) @@ -104,9 +111,9 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi return } -func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.grpcAddress, fs.grpcDialOption) diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index f99c7fdf6..838c2c441 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -3,10 +3,11 @@ package filersink import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -38,13 +39,13 @@ func (fs *FilerSink) GetSinkToDirectory() string { return fs.dir } -func (fs *FilerSink) Initialize(configuration util.Configuration) error { +func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error { return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), - configuration.GetString("replication"), - configuration.GetString("collection"), - configuration.GetInt("ttlSec"), + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"replication"), + configuration.GetString(prefix+"collection"), + configuration.GetInt(prefix+"ttlSec"), ) } @@ -59,12 +60,12 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, fs.replication = replication fs.collection = collection fs.ttlSec = int32(ttlSec) - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") return nil } -func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { +func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { + return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -75,7 +76,7 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d } glog.V(1).Infof("delete entry: %v", request) - _, err := client.DeleteEntry(ctx, request) + _, err := client.DeleteEntry(context.Background(), request) if err != nil { glog.V(0).Infof("delete entry %s: %v", key, err) return fmt.Errorf("delete entry %s: %v", key, err) @@ -85,9 +86,9 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d }) } -func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -97,14 +98,14 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p Name: name, } glog.V(1).Infof("lookup: %v", lookupRequest) - if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil { + if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) { glog.V(0).Infof("already replicated %s", key) return nil } } - replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks) + replicatedChunks, err := fs.replicateChunks(entry.Chunks, dir) if err != nil { glog.V(0).Infof("replicate entry chunks %s: %v", key, err) @@ -124,7 +125,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p } glog.V(1).Infof("create: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } @@ -133,13 +134,13 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p }) } -func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { dir, name := filer2.FullPath(key).DirAndName() // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -147,7 +148,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } glog.V(4).Infof("lookup entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { glog.V(0).Infof("lookup %s: %v", key, err) return err @@ -183,7 +184,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // replicate the chunks that are new in the source - replicatedChunks, err := fs.replicateChunks(ctx, newChunks) + replicatedChunks, err := fs.replicateChunks(newChunks, newParentPath) if err != nil { return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } @@ -191,14 +192,14 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // save updated meta data - return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: newParentPath, Entry: existingEntry, } - if _, err := client.UpdateEntry(ctx, request); err != nil { + if _, err := client.UpdateEntry(context.Background(), request); err != nil { return fmt.Errorf("update existingEntry %s: %v", key, err) } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index abd7c49b9..694399274 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -6,13 +6,14 @@ import ( "os" "cloud.google.com/go/storage" + "google.golang.org/api/option" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/api/option" ) type GcsSink struct { @@ -34,11 +35,11 @@ func (g *GcsSink) GetSinkToDirectory() string { return g.dir } -func (g *GcsSink) Initialize(configuration util.Configuration) error { +func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -50,7 +51,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str g.bucket = bucketName g.dir = dir - ctx := context.Background() // Creates a client. if google_application_credentials == "" { var found bool @@ -59,7 +59,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml") } } - client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials)) + client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials)) if err != nil { glog.Fatalf("Failed to create client: %v", err) } @@ -69,13 +69,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str return nil } -func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { if isDirectory { key = key + "/" } - if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil { + if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil { return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err) } @@ -83,7 +83,7 @@ func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, dele } -func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { if entry.IsDirectory { return nil @@ -92,16 +92,16 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx) + wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) if err != nil { return err } - _, err = util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { + err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { wc.Write(data) }) @@ -119,7 +119,7 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E } -func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { // TODO improve efficiency return false, nil } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index dd54f0005..6d85f660a 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -1,7 +1,6 @@ package sink import ( - "context" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" @@ -9,10 +8,10 @@ import ( type ReplicationSink interface { GetName() string - Initialize(configuration util.Configuration) error - DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error - CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error - UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) + Initialize(configuration util.Configuration, prefix string) error + DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error + CreateEntry(key string, entry *filer_pb.Entry) error + UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) GetSinkToDirectory() string SetSourceFiler(s *source.FilerSource) } diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 4cff341d0..5f548559b 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -39,16 +40,16 @@ func (s3sink *S3Sink) GetSinkToDirectory() string { return s3sink.dir } -func (s3sink *S3Sink) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("sink.s3.region: %v", configuration.GetString("region")) - glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString("bucket")) - glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString("directory")) +func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket")) + glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory")) return s3sink.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -77,7 +78,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc return nil } -func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -89,7 +90,7 @@ func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, } -func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -112,7 +113,7 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_ wg.Add(1) go func(chunk *filer2.ChunkView) { defer wg.Done() - if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil { + if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr } else { parts = append(parts, part) @@ -126,11 +127,11 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_ return err } - return s3sink.completeMultipartUpload(ctx, key, uploadId, parts) + return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts) } -func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 0a190b27d..854688b1e 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -103,10 +103,10 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId } // To upload a part -func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { +func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { var readSeeker io.ReadSeeker - readSeeker, err := s3sink.buildReadSeeker(ctx, chunk) + readSeeker, err := s3sink.buildReadSeeker(chunk) if err != nil { glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) @@ -156,12 +156,12 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou return err } -func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) { - fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId) +func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) { + fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId) if err != nil { return nil, err } buf := make([]byte, chunk.Size) - util.ReadUrl(fileUrl, chunk.Offset, int(chunk.Size), buf, true) + util.ReadUrl(fileUrl, nil, false,false, chunk.Offset, int(chunk.Size), buf) return bytes.NewReader(buf), nil } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index d7b5ebc4d..90bcffdf0 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -3,13 +3,15 @@ package source import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "google.golang.org/grpc" "io" "net/http" "strings" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -25,30 +27,30 @@ type FilerSource struct { Dir string } -func (fs *FilerSource) Initialize(configuration util.Configuration) error { +func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error { return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), ) } func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { fs.grpcAddress = grpcAddress fs.Dir = dir - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") return nil } -func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) { +func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) { vid2Locations := make(map[string]*filer_pb.Locations) vid := volumeId(part) - err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -77,9 +79,9 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s return } -func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { +func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { - fileUrl, err := fs.LookupFileId(ctx, part) + fileUrl, err := fs.LookupFileId(part) if err != nil { return "", nil, nil, err } @@ -89,9 +91,9 @@ func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename stri return filename, header, readCloser, err } -func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.grpcAddress, fs.grpcDialOption) diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go index bed26c79c..06869e619 100644 --- a/weed/replication/sub/notification_aws_sqs.go +++ b/weed/replication/sub/notification_aws_sqs.go @@ -27,14 +27,14 @@ func (k *AwsSqsInput) GetName() string { return "aws_sqs" } -func (k *AwsSqsInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go index eddba9ff8..9726096e5 100644 --- a/weed/replication/sub/notification_gocdk_pub_sub.go +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -27,8 +27,8 @@ func (k *GoCDKPubSubInput) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error { - subURL := config.GetString("sub_url") +func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error { + subURL := configuration.GetString(prefix + "sub_url") glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL) sub, err := pubsub.OpenSubscription(context.Background(), subURL) if err != nil { diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go index ad6b42a2e..a950bb42b 100644 --- a/weed/replication/sub/notification_google_pub_sub.go +++ b/weed/replication/sub/notification_google_pub_sub.go @@ -27,13 +27,13 @@ func (k *GooglePubSubInput) GetName() string { return "google_pub_sub" } -func (k *GooglePubSubInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 1a86a8307..fa9cfad9b 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -28,14 +28,14 @@ func (k *KafkaInput) GetName() string { return "kafka" } -func (k *KafkaInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), - configuration.GetString("offsetFile"), - configuration.GetInt("offsetSaveIntervalSeconds"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), + configuration.GetString(prefix+"offsetFile"), + configuration.GetInt(prefix+"offsetSaveIntervalSeconds"), ) } diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go index 66fbef824..8a2668f98 100644 --- a/weed/replication/sub/notifications.go +++ b/weed/replication/sub/notifications.go @@ -9,7 +9,7 @@ type NotificationInput interface { // GetName gets the name to locate the configuration in sync.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) } diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go new file mode 100644 index 000000000..c1e8dff1e --- /dev/null +++ b/weed/s3api/auth_credentials.go @@ -0,0 +1,188 @@ +package s3api + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/jsonpb" + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +type Action string + +const ( + ACTION_READ = "Read" + ACTION_WRITE = "Write" + ACTION_ADMIN = "Admin" +) + +type Iam interface { + Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc +} + +type IdentityAccessManagement struct { + identities []*Identity + domain string +} + +type Identity struct { + Name string + Credentials []*Credential + Actions []Action +} + +type Credential struct { + AccessKey string + SecretKey string +} + +func NewIdentityAccessManagement(fileName string, domain string) *IdentityAccessManagement { + iam := &IdentityAccessManagement{ + domain: domain, + } + if fileName == "" { + return iam + } + if err := iam.loadS3ApiConfiguration(fileName); err != nil { + glog.Fatalf("fail to load config file %s: %v", fileName, err) + } + return iam +} + +func (iam *IdentityAccessManagement) loadS3ApiConfiguration(fileName string) error { + + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} + + rawData, readErr := ioutil.ReadFile(fileName) + if readErr != nil { + glog.Warningf("fail to read %s : %v", fileName, readErr) + return fmt.Errorf("fail to read %s : %v", fileName, readErr) + } + + glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) + if err := jsonpb.Unmarshal(bytes.NewReader(rawData), s3ApiConfiguration); err != nil { + glog.Warningf("unmarshal error: %v", err) + return fmt.Errorf("unmarshal %s error: %v", fileName, err) + } + + for _, ident := range s3ApiConfiguration.Identities { + t := &Identity{ + Name: ident.Name, + Credentials: nil, + Actions: nil, + } + for _, action := range ident.Actions { + t.Actions = append(t.Actions, Action(action)) + } + for _, cred := range ident.Credentials { + t.Credentials = append(t.Credentials, &Credential{ + AccessKey: cred.AccessKey, + SecretKey: cred.SecretKey, + }) + } + iam.identities = append(iam.identities, t) + } + + return nil +} + +func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) { + for _, ident := range iam.identities { + for _, cred := range ident.Credentials { + if cred.AccessKey == accessKey { + return ident, cred, true + } + } + } + return nil, nil, false +} + +func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc { + + if len(iam.identities) == 0 { + return f + } + + return func(w http.ResponseWriter, r *http.Request) { + errCode := iam.authRequest(r, action) + if errCode == ErrNone { + f(w, r) + return + } + writeErrorResponse(w, errCode, r.URL) + } +} + +// check whether the request has valid access keys +func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) ErrorCode { + var identity *Identity + var s3Err ErrorCode + switch getRequestAuthType(r) { + case authTypeStreamingSigned: + return ErrNone + case authTypeUnknown: + glog.V(3).Infof("unknown auth type") + return ErrAccessDenied + case authTypePresignedV2, authTypeSignedV2: + glog.V(3).Infof("v2 auth type") + identity, s3Err = iam.isReqAuthenticatedV2(r) + case authTypeSigned, authTypePresigned: + glog.V(3).Infof("v4 auth type") + identity, s3Err = iam.reqSignatureV4Verify(r) + case authTypePostPolicy: + glog.V(3).Infof("post policy auth type") + return ErrNotImplemented + case authTypeJWT: + glog.V(3).Infof("jwt auth type") + return ErrNotImplemented + case authTypeAnonymous: + return ErrAccessDenied + default: + return ErrNotImplemented + } + + glog.V(3).Infof("auth error: %v", s3Err) + if s3Err != ErrNone { + return s3Err + } + + glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + if !identity.canDo(action, bucket) { + return ErrAccessDenied + } + + return ErrNone + +} + +func (identity *Identity) canDo(action Action, bucket string) bool { + for _, a := range identity.Actions { + if a == "Admin" { + return true + } + } + for _, a := range identity.Actions { + if a == action { + return true + } + } + if bucket == "" { + return false + } + limitedByBucket := string(action) + ":" + bucket + for _, a := range identity.Actions { + if string(a) == limitedByBucket { + return true + } + } + return false +} diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go new file mode 100644 index 000000000..c6f76560c --- /dev/null +++ b/weed/s3api/auth_credentials_test.go @@ -0,0 +1,68 @@ +package s3api + +import ( + "testing" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +func TestIdentityListFileFormat(t *testing.T) { + + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} + + identity1 := &iam_pb.Identity{ + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + } + identity2 := &iam_pb.Identity{ + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_READ, + }, + } + identity3 := &iam_pb.Identity{ + Name: "some_normal_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key2", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_WRITE, + }, + } + + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3) + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, _ := m.MarshalToString(s3ApiConfiguration) + + println(text) + +} diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go new file mode 100644 index 000000000..151a9ec26 --- /dev/null +++ b/weed/s3api/auth_signature_v2.go @@ -0,0 +1,412 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "time" +) + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted +var resourceList = []string{ + "acl", + "delete", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// Verify if request has valid AWS Signature Version '2'. +func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, ErrorCode) { + if isRequestSignatureV2(r) { + return iam.doesSignV2Match(r) + } + return iam.doesPresignV2SignatureMatch(r) +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// doesSignV2Match - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// returns true if matches, false otherwise. if error is not nil then it is always false + +func validateV2AuthHeader(v2Auth string) (accessKey string, errCode ErrorCode) { + if v2Auth == "" { + return "", ErrAuthHeaderEmpty + } + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v2Auth, signV2Algorithm) { + return "", ErrSignatureVersionNotSupported + } + + // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). + // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature + authFields := strings.Split(v2Auth, " ") + if len(authFields) != 2 { + return "", ErrMissingFields + } + + // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. + keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") + if len(keySignFields) != 2 { + return "", ErrMissingFields + } + + return keySignFields[0], ErrNone +} + +func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, ErrorCode) { + v2Auth := r.Header.Get("Authorization") + + accessKey, apiError := validateV2AuthHeader(v2Auth) + if apiError != ErrNone { + return nil, apiError + } + + // Access credentials. + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + unescapedQueries, err := unescapeQueries(encodedQuery) + if err != nil { + return nil, ErrInvalidQueryParams + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, ErrInvalidRequest + } + + prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) + if !strings.HasPrefix(v2Auth, prefix) { + return nil, ErrSignatureDoesNotMatch + } + v2Auth = v2Auth[len(prefix):] + expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) + if !compareSignatureV2(v2Auth, expectedAuth) { + return nil, ErrSignatureDoesNotMatch + } + return ident, ErrNone +} + +// doesPresignV2SignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth +// returns ErrNone if matches. S3 errors otherwise. +func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, ErrorCode) { + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + var ( + filteredQueries []string + gotSignature string + expires string + accessKey string + err error + ) + + var unescapedQueries []string + unescapedQueries, err = unescapeQueries(encodedQuery) + if err != nil { + return nil, ErrInvalidQueryParams + } + + // Extract the necessary values from presigned query, construct a list of new filtered queries. + for _, query := range unescapedQueries { + keyval := strings.SplitN(query, "=", 2) + if len(keyval) != 2 { + return nil, ErrInvalidQueryParams + } + switch keyval[0] { + case "AWSAccessKeyId": + accessKey = keyval[1] + case "Signature": + gotSignature = keyval[1] + case "Expires": + expires = keyval[1] + default: + filteredQueries = append(filteredQueries, query) + } + } + + // Invalid values returns error. + if accessKey == "" || gotSignature == "" || expires == "" { + return nil, ErrInvalidQueryParams + } + + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Make sure the request has not expired. + expiresInt, err := strconv.ParseInt(expires, 10, 64) + if err != nil { + return nil, ErrMalformedExpires + } + + // Check if the presigned URL has expired. + if expiresInt < time.Now().UTC().Unix() { + return nil, ErrExpiredPresignRequest + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, ErrInvalidRequest + } + + expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) + if !compareSignatureV2(gotSignature, expectedSignature) { + return nil, ErrSignatureDoesNotMatch + } + + return ident, ErrNone +} + +// Escape encodedQuery string into unescaped list of query params, returns error +// if any while unescaping the values. +func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { + for _, query := range strings.Split(encodedQuery, "&") { + var unescapedQuery string + unescapedQuery, err = url.QueryUnescape(query) + if err != nil { + return nil, err + } + unescapedQueries = append(unescapedQueries, unescapedQuery) + } + return unescapedQueries, nil +} + +// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. +func getResource(path string, host string, domain string) (string, error) { + if domain == "" { + return path, nil + } + // If virtual-host-style is enabled construct the "resource" properly. + if strings.Contains(host, ":") { + // In bucket.mydomain.com:9000, strip out :9000 + var err error + if host, _, err = net.SplitHostPort(host); err != nil { + return "", err + } + } + if !strings.HasSuffix(host, "."+domain) { + return path, nil + } + bucket := strings.TrimSuffix(host, "."+domain) + return "/" + pathJoin(bucket, path), nil +} + +// pathJoin - like path.Join() but retains trailing "/" of the last element +func pathJoin(elem ...string) string { + trailingSlash := "" + if len(elem) > 0 { + if strings.HasSuffix(elem[len(elem)-1], "/") { + trailingSlash = "/" + } + } + return path.Join(elem...) + trailingSlash +} + +// Return the signature v2 of a given request. +func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") + signature := calculateSignatureV2(stringToSign, cred.SecretKey) + return signature +} + +// Return string to sign under two different conditions. +// - if expires string is set then string to sign includes date instead of the Date header. +// - if expires string is empty then string to sign includes date header instead. +func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { + canonicalHeaders := canonicalizedAmzHeadersV2(headers) + if len(canonicalHeaders) > 0 { + canonicalHeaders += "\n" + } + + date := expires // Date is set to expires date for presign operations. + if date == "" { + // If expires date is empty then request header Date is used. + date = headers.Get("Date") + } + + // From the Amazon docs: + // + // StringToSign = HTTP-Verb + "\n" + + // Content-Md5 + "\n" + + // Content-Type + "\n" + + // Date/Expires + "\n" + + // CanonicalizedProtocolHeaders + + // CanonicalizedResource; + stringToSign := strings.Join([]string{ + method, + headers.Get("Content-MD5"), + headers.Get("Content-Type"), + date, + canonicalHeaders, + }, "\n") + + return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) +} + +// Return canonical resource string. +func canonicalizedResourceV2(encodedResource, encodedQuery string) string { + queries := strings.Split(encodedQuery, "&") + keyval := make(map[string]string) + for _, query := range queries { + key := query + val := "" + index := strings.Index(query, "=") + if index != -1 { + key = query[:index] + val = query[index+1:] + } + keyval[key] = val + } + + var canonicalQueries []string + for _, key := range resourceList { + val, ok := keyval[key] + if !ok { + continue + } + if val == "" { + canonicalQueries = append(canonicalQueries, key) + continue + } + canonicalQueries = append(canonicalQueries, key+"="+val) + } + + // The queries will be already sorted as resourceList is sorted, if canonicalQueries + // is empty strings.Join returns empty. + canonicalQuery := strings.Join(canonicalQueries, "&") + if canonicalQuery != "" { + return encodedResource + "?" + canonicalQuery + } + return encodedResource +} + +// Return canonical headers. +func canonicalizedAmzHeadersV2(headers http.Header) string { + var keys []string + keyval := make(map[string]string) + for key := range headers { + lkey := strings.ToLower(key) + if !strings.HasPrefix(lkey, "x-amz-") { + continue + } + keys = append(keys, lkey) + keyval[lkey] = strings.Join(headers[key], ",") + } + sort.Strings(keys) + var canonicalHeaders []string + for _, key := range keys { + canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) + } + return strings.Join(canonicalHeaders, "\n") +} + +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +// compareSignatureV2 returns true if and only if both signatures +// are equal. The signatures are expected to be base64 encoded strings +// according to the AWS S3 signature V2 spec. +func compareSignatureV2(sig1, sig2 string) bool { + // Decode signature string to binary byte-sequence representation is required + // as Base64 encoding of a value is not unique: + // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. + signature1, err := base64.StdEncoding.DecodeString(sig1) + if err != nil { + return false + } + signature2, err := base64.StdEncoding.DecodeString(sig2) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(signature1, signature2) == 1 +} + +// Return signature-v2 for the presigned request. +func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) + return calculateSignatureV2(stringToSign, cred.SecretKey) +} diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go new file mode 100644 index 000000000..cdfd8be1d --- /dev/null +++ b/weed/s3api/auth_signature_v4.go @@ -0,0 +1,720 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, ErrorCode) { + sha256sum := getContentSha256Cksum(r) + switch { + case isRequestSignatureV4(r): + return iam.doesSignatureMatch(sha256sum, r) + case isRequestPresignedSignatureV4(r): + return iam.doesPresignedSignatureMatch(sha256sum, r) + } + return nil, ErrAccessDenied +} + +// Streaming AWS Signature Version '4' constants. +const ( + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + + // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the + // client did not calculate sha256 of the payload. + unsignedPayload = "UNSIGNED-PAYLOAD" +) + +// Returns SHA256 for calculating canonical-request. +func getContentSha256Cksum(r *http.Request) string { + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.URL.Query()["X-Amz-Content-Sha256"] + if !ok { + v, ok = r.Header["X-Amz-Content-Sha256"] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = emptySHA256 + v, ok = r.Header["X-Amz-Content-Sha256"] + } + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, err := parseSignV4(v4Auth) + if err != ErrNone { + return nil, err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != ErrNone { + return nil, errCode + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" { + if date = r.Header.Get("Date"); date == "" { + return nil, ErrMissingDateHeader + } + } + // Parse date header. + t, e := time.Parse(iso8601Format, date) + if e != nil { + return nil, ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, ErrSignatureDoesNotMatch + } + + // Return error none. + return identity, ErrNone +} + +// credentialHeader data type represents structured form of Credential +// string from authorization header. +type credentialHeader struct { + accessKey string + scope struct { + date time.Time + region string + service string + request string + } +} + +// signValues data type represents structured form of AWS Signature V4 header. +type signValues struct { + Credential credentialHeader + SignedHeaders []string + Signature string +} + +// Return scope string. +func (c credentialHeader) getScope() string { + return strings.Join([]string{ + c.scope.date.Format(yyyymmdd), + c.scope.region, + c.scope.service, + c.scope.request, + }, "/") +} + +// Authorization: algorithm Credential=accessKeyID/credScope, \ +// SignedHeaders=signedHeaders, Signature=signature +// +func parseSignV4(v4Auth string) (sv signValues, aec ErrorCode) { + // Replace all spaced strings, some clients can send spaced + // parameters and some won't. So we pro-actively remove any spaces + // to make parsing easier. + v4Auth = strings.Replace(v4Auth, " ", "", -1) + if v4Auth == "" { + return sv, ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v4Auth, signV4Algorithm) { + return sv, ErrSignatureVersionNotSupported + } + + // Strip off the Algorithm prefix. + v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return sv, ErrMissingFields + } + + // Initialize signature version '4' structured header. + signV4Values := signValues{} + + var err ErrorCode + // Save credentail values. + signV4Values.Credential, err = parseCredentialHeader(authFields[0]) + if err != ErrNone { + return sv, err + } + + // Save signed headers. + signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1]) + if err != ErrNone { + return sv, err + } + + // Save signature. + signV4Values.Signature, err = parseSignature(authFields[2]) + if err != ErrNone { + return sv, err + } + + // Return the structure here. + return signV4Values, ErrNone +} + +// parse credentialHeader string into its structured form. +func parseCredentialHeader(credElement string) (ch credentialHeader, aec ErrorCode) { + creds := strings.Split(strings.TrimSpace(credElement), "=") + if len(creds) != 2 { + return ch, ErrMissingFields + } + if creds[0] != "Credential" { + return ch, ErrMissingCredTag + } + credElements := strings.Split(strings.TrimSpace(creds[1]), "/") + if len(credElements) != 5 { + return ch, ErrCredMalformed + } + // Save access key id. + cred := credentialHeader{ + accessKey: credElements[0], + } + var e error + cred.scope.date, e = time.Parse(yyyymmdd, credElements[1]) + if e != nil { + return ch, ErrMalformedCredentialDate + } + + cred.scope.region = credElements[2] + cred.scope.service = credElements[3] // "s3" + cred.scope.request = credElements[4] // "aws4_request" + return cred, ErrNone +} + +// Parse slice of signed headers from signed headers tag. +func parseSignedHeader(signedHdrElement string) ([]string, ErrorCode) { + signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") + if len(signedHdrFields) != 2 { + return nil, ErrMissingFields + } + if signedHdrFields[0] != "SignedHeaders" { + return nil, ErrMissingSignHeadersTag + } + if signedHdrFields[1] == "" { + return nil, ErrMissingFields + } + signedHeaders := strings.Split(signedHdrFields[1], ";") + return signedHeaders, ErrNone +} + +// Parse signature from signature tag. +func parseSignature(signElement string) (string, ErrorCode) { + signFields := strings.Split(strings.TrimSpace(signElement), "=") + if len(signFields) != 2 { + return "", ErrMissingFields + } + if signFields[0] != "Signature" { + return "", ErrMissingSignTag + } + if signFields[1] == "" { + return "", ErrMissingFields + } + signature := signFields[1] + return signature, ErrNone +} + +// check query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) { + + // Copy request + req := *r + + // Parse request query string. + pSignValues, err := parsePreSignV4(req.URL.Query()) + if err != ErrNone { + return nil, err + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) + if errCode != ErrNone { + return nil, errCode + } + // Construct new query. + query := make(url.Values) + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + query.Set("X-Amz-Content-Sha256", hashedPayload) + } + + query.Set("X-Amz-Algorithm", signV4Algorithm) + + now := time.Now().UTC() + + // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the + // request should still be allowed. + if pSignValues.Date.After(now.Add(15 * time.Minute)) { + return nil, ErrRequestNotReadyYet + } + + if now.Sub(pSignValues.Date) > pSignValues.Expires { + return nil, ErrExpiredPresignRequest + } + + // Save the date and expires. + t := pSignValues.Date + expireSeconds := int(pSignValues.Expires / time.Second) + + // Construct the query. + query.Set("X-Amz-Date", t.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) + query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders)) + query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region)) + + // Save other headers available in the request parameters. + for k, v := range req.URL.Query() { + + // Handle the metadata in presigned put query string + if strings.Contains(strings.ToLower(k), "x-amz-meta-") { + query.Set(k, v[0]) + } + + if strings.HasPrefix(strings.ToLower(k), "x-amz") { + continue + } + query[k] = v + } + + // Get the encoded query. + encodedQuery := query.Encode() + + // Verify if date query is same. + if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if expires query is same. + if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if signed headers query is same. + if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if credential query is same. + if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if sha256 payload query is same. + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") { + return nil, ErrContentSHA256Mismatch + } + } + + /// Verify finally if signature is same. + + // Get canonical request. + presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) + + // Get hmac presigned signing key. + presignedSigningKey := getSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region) + + // Get new signature. + newSignature := getSignature(presignedSigningKey, presignedStringToSign) + + // Verify signature. + if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) { + return nil, ErrSignatureDoesNotMatch + } + return identity, ErrNone +} + +func contains(list []string, elem string) bool { + for _, t := range list { + if t == elem { + return true + } + } + return false +} + +// preSignValues data type represents structued form of AWS Signature V4 query string. +type preSignValues struct { + signValues + Date time.Time + Expires time.Duration +} + +// Parses signature version '4' query string of the following form. +// +// querystring = X-Amz-Algorithm=algorithm +// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) +// querystring += &X-Amz-Date=date +// querystring += &X-Amz-Expires=timeout interval +// querystring += &X-Amz-SignedHeaders=signed_headers +// querystring += &X-Amz-Signature=signature +// +// verifies if any of the necessary query params are missing in the presigned request. +func doesV4PresignParamsExist(query url.Values) ErrorCode { + v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"} + for _, v4PresignQueryParam := range v4PresignQueryParams { + if _, ok := query[v4PresignQueryParam]; !ok { + return ErrInvalidQueryParams + } + } + return ErrNone +} + +// Parses all the presigned signature values into separate elements. +func parsePreSignV4(query url.Values) (psv preSignValues, aec ErrorCode) { + var err ErrorCode + // verify whether the required query params exist. + err = doesV4PresignParamsExist(query) + if err != ErrNone { + return psv, err + } + + // Verify if the query algorithm is supported or not. + if query.Get("X-Amz-Algorithm") != signV4Algorithm { + return psv, ErrInvalidQuerySignatureAlgo + } + + // Initialize signature version '4' structured header. + preSignV4Values := preSignValues{} + + // Save credential. + preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential")) + if err != ErrNone { + return psv, err + } + + var e error + // Save date in native time.Time. + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date")) + if e != nil { + return psv, ErrMalformedPresignedDate + } + + // Save expires in native time.Duration. + preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s") + if e != nil { + return psv, ErrMalformedExpires + } + + if preSignV4Values.Expires < 0 { + return psv, ErrNegativeExpires + } + + // Check if Expiry time is less than 7 days (value in seconds). + if preSignV4Values.Expires.Seconds() > 604800 { + return psv, ErrMaximumExpires + } + + // Save signed headers. + preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders")) + if err != ErrNone { + return psv, err + } + + // Save signature. + preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature")) + if err != ErrNone { + return psv, err + } + + // Return structed form of signature query string. + return preSignV4Values, ErrNone +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, ErrorCode) { + reqHeaders := r.Header + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if ok { + for _, enc := range val { + extractedSignedHeaders.Add(header, enc) + } + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + for _, enc := range r.TransferEncoding { + extractedSignedHeaders.Add(header, enc) + } + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, ErrUnsignedHeaders + } + } + return extractedSignedHeaders, ErrNone +} + +// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func getSignedHeaders(signedHeaders http.Header) string { + var headers []string + for k := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + return scope +} + +// getCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +// +func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { + rawQuery := strings.Replace(queryStr, "+", "%20", -1) + encodedPath := encodePath(urlPath) + canonicalRequest := strings.Join([]string{ + method, + encodedPath, + rawQuery, + getCanonicalHeaders(extractedSignedHeaders), + getSignedHeaders(extractedSignedHeaders), + payload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSign(canonicalRequest string, t time.Time, scope string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) + return stringToSign +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secretKey string, t time.Time, region string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) + regionBytes := sumHMAC(date, []byte(region)) + service := sumHMAC(regionBytes, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getCanonicalHeaders generate a list of request headers with their values +func getCanonicalHeaders(signedHeaders http.Header) string { + var headers []string + vals := make(http.Header) + for k, vv := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + return buf.String() +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func encodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// compareSignatureV4 returns true if and only if both signatures +// are equal. The signatures are expected to be HEX encoded strings +// according to the AWS S3 signature V4 spec. +func compareSignatureV4(sig1, sig2 string) bool { + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. + return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 +} diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go new file mode 100644 index 000000000..036b5c052 --- /dev/null +++ b/weed/s3api/auto_signature_v4_test.go @@ -0,0 +1,418 @@ +package s3api + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "testing" + "time" + "unicode/utf8" +) + +// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. +func TestIsRequestPresignedSignatureV4(t *testing.T) { + testCases := []struct { + inputQueryKey string + inputQueryValue string + expectedResult bool + }{ + // Test case - 1. + // Test case with query key ""X-Amz-Credential" set. + {"", "", false}, + // Test case - 2. + {"X-Amz-Credential", "", true}, + // Test case - 3. + {"X-Amz-Content-Sha256", "", false}, + } + + for i, testCase := range testCases { + // creating an input HTTP request. + // Only the query parameters are relevant for this particular test. + inputReq, err := http.NewRequest("GET", "http://example.com", nil) + if err != nil { + t.Fatalf("Error initializing input HTTP request: %v", err) + } + q := inputReq.URL.Query() + q.Add(testCase.inputQueryKey, testCase.inputQueryValue) + inputReq.URL.RawQuery = q.Encode() + + actualResult := isRequestPresignedSignatureV4(inputReq) + if testCase.expectedResult != actualResult { + t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult) + } + } +} + +// Tests is requested authenticated function, tests replies for s3 errors. +func TestIsReqAuthenticated(t *testing.T) { + iam := NewIdentityAccessManagement("", "") + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + // List of test cases for validating http request authentication. + testCases := []struct { + req *http.Request + s3Error ErrorCode + }{ + // When request is unsigned, access denied is returned. + {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied}, + // When request is properly signed, error is none. + {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone}, + } + + // Validates all testcases. + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error { + ioutil.ReadAll(testCase.req.Body) + t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error) + } + } +} + +func TestCheckAdminRequestAuthType(t *testing.T) { + iam := NewIdentityAccessManagement("", "") + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + testCases := []struct { + Request *http.Request + ErrCode ErrorCode + }{ + {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied}, + {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone}, + {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone}, + } + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode { + t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error) + } + } +} + +// Provides a fully populated http request instance, fails otherwise. +func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req, err := newTestRequest(method, urlStr, contentLength, body) + if err != nil { + t.Fatalf("Unable to initialize new http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is signed with AWS Signature V4, fails if not able to do so. +func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is presigned with AWS Signature V4, fails if not able to do so. +func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// Returns new HTTP request object. +func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { + if method == "" { + method = "POST" + } + + // Save for subsequent use + var hashedPayload string + var md5Base64 string + switch { + case body == nil: + hashedPayload = getSHA256Hash([]byte{}) + default: + payloadBytes, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + hashedPayload = getSHA256Hash(payloadBytes) + md5Base64 = getMD5HashBase64(payloadBytes) + } + // Seek back to beginning. + if body != nil { + body.Seek(0, 0) + } else { + body = bytes.NewReader([]byte("")) + } + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + if md5Base64 != "" { + req.Header.Set("Content-Md5", md5Base64) + } + req.Header.Set("x-amz-content-sha256", hashedPayload) + + // Add Content-Length + req.ContentLength = contentLength + + return req, nil +} + +// getSHA256Hash returns SHA-256 hash in hex encoding of given data. +func getSHA256Hash(data []byte) string { + return hex.EncodeToString(getSHA256Sum(data)) +} + +// getMD5HashBase64 returns MD5 hash in base64 encoding of given data. +func getMD5HashBase64(data []byte) string { + return base64.StdEncoding.EncodeToString(getMD5Sum(data)) +} + +// getSHA256Hash returns SHA-256 sum of given data. +func getSHA256Sum(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Sum returns MD5 sum of given data. +func getMD5Sum(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Hash returns MD5 hash in hex encoding of given data. +func getMD5Hash(data []byte) string { + return hex.EncodeToString(getMD5Sum(data)) +} + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// Sign given request using Signature V4. +func signRequestV4(req *http.Request, accessKey, secretKey string) error { + // Get hashed payload. + hashedPayload := req.Header.Get("x-amz-content-sha256") + if hashedPayload == "" { + return fmt.Errorf("Invalid hashed payload") + } + + currTime := time.Now() + + // Set x-amz-date. + req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) + + // Get header map. + headerMap := make(map[string][]string) + for k, vv := range req.Header { + // If request header key is not in ignored headers, then add it. + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok { + headerMap[strings.ToLower(k)] = vv + } + } + + // Get header keys. + headers := []string{"host"} + for k := range headerMap { + headers = append(headers, k) + } + sort.Strings(headers) + + region := "us-east-1" + + // Get canonical headers. + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range headerMap[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + canonicalHeaders := buf.String() + + // Get signed headers. + signedHeaders := strings.Join(headers, ";") + + // Get canonical query string. + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + + // Get canonical URI. + canonicalURI := EncodePath(req.URL.Path) + + // Get canonical request. + // canonicalRequest = + // \n + // \n + // \n + // \n + // \n + // + // + canonicalRequest := strings.Join([]string{ + req.Method, + canonicalURI, + req.URL.RawQuery, + canonicalHeaders, + signedHeaders, + hashedPayload, + }, "\n") + + // Get scope. + scope := strings.Join([]string{ + currTime.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + + stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) + + date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) + regionHMAC := sumHMAC(date, []byte(region)) + service := sumHMAC(regionHMAC, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + + signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) + + // final Authorization header + parts := []string{ + "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return nil +} + +// preSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return errors.New("Presign cannot be generated without access and secret keys") + } + + region := "us-east-1" + date := time.Now().UTC() + scope := getScope(date, region) + credential := fmt.Sprintf("%s/%s", accessKeyID, scope) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", date.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", "host") + query.Set("X-Amz-Credential", credential) + query.Set("X-Amz-Content-Sha256", unsignedPayload) + + // "host" is the only header required to be signed for Presigned URLs. + extractedSignedHeaders := make(http.Header) + extractedSignedHeaders.Set("host", req.Host) + + queryStr := strings.Replace(query.Encode(), "+", "%20", -1) + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) + stringToSign := getStringToSign(canonicalRequest, date, scope) + signingKey := getSigningKey(secretAccessKey, date, region) + signature := getSignature(signingKey, stringToSign) + + req.URL.RawQuery = query.Encode() + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature) + + // Construct the final presigned URL. + return nil +} + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index 061fd4a92..76c4394c2 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -21,17 +21,115 @@ package s3api import ( "bufio" "bytes" + "crypto/sha256" + "encoding/hex" "errors" - "github.com/dustin/go-humanize" + "hash" "io" "net/http" -) + "time" -// Streaming AWS Signature Version '4' constants. -const ( - streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + "github.com/dustin/go-humanize" ) +// getChunkSignature - get chunk signature. +func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string { + + // Calculate string to sign. + stringToSign := signV4ChunkedAlgorithm + "\n" + + date.Format(iso8601Format) + "\n" + + getScope(date, region) + "\n" + + seedSignature + "\n" + + emptySHA256 + "\n" + + hashedChunk + + // Get hmac signing key. + signingKey := getSigningKey(secretKey, date, region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + return newSignature +} + +// calculateSeedSignature - Calculate seed signature in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +// returns signature, error otherwise if the signature mismatches or any other +// error while parsing and validating. +func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, errCode := parseSignV4(v4Auth) + if errCode != ErrNone { + return nil, "", "", time.Time{}, errCode + } + + // Payload streaming. + payload := streamingContentSHA256 + + // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' + if payload != req.Header.Get("X-Amz-Content-Sha256") { + return nil, "", "", time.Time{}, ErrContentSHA256Mismatch + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != ErrNone { + return nil, "", "", time.Time{}, errCode + } + // Verify if the access key id matches. + _, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, "", "", time.Time{}, ErrInvalidAccessKeyID + } + + // Verify if region is valid. + region = signV4Values.Credential.scope.region + + // Extract date, if not present throw error. + var dateStr string + if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" { + if dateStr = r.Header.Get("Date"); dateStr == "" { + return nil, "", "", time.Time{}, ErrMissingDateHeader + } + } + // Parse date header. + var err error + date, err = time.Parse(iso8601Format, dateStr) + if err != nil { + return nil, "", "", time.Time{}, ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, "", "", time.Time{}, ErrSignatureDoesNotMatch + } + + // Return caculated signature. + return cred, newSignature, region, date, ErrNone +} + const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB // lineTooLong is generated as chunk header is bigger than 4KiB. @@ -43,22 +141,36 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -func newSignV4ChunkedReader(req *http.Request) io.ReadCloser { - return &s3ChunkedReader{ - reader: bufio.NewReader(req.Body), - state: readChunkHeader, +func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, ErrorCode) { + ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req) + if errCode != ErrNone { + return nil, errCode } + return &s3ChunkedReader{ + cred: ident, + reader: bufio.NewReader(req.Body), + seedSignature: seedSignature, + seedDate: seedDate, + region: region, + chunkSHA256Writer: sha256.New(), + state: readChunkHeader, + }, ErrNone } // Represents the overall state that is required for decoding a // AWS Signature V4 chunked reader. type s3ChunkedReader struct { - reader *bufio.Reader - state chunkState - lastChunk bool - chunkSignature string - n uint64 // Unread bytes in chunk - err error + cred *Credential + reader *bufio.Reader + seedSignature string + seedDate time.Time + region string + state chunkState + lastChunk bool + chunkSignature string + chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. + n uint64 // Unread bytes in chunk + err error } // Read chunk reads the chunk token signature portion. @@ -157,6 +269,9 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { return 0, cr.err } + // Calculate sha256. + cr.chunkSHA256Writer.Write(rbuf[:n0]) + // Update the bytes read into request buffer so far. n += n0 buf = buf[n0:] @@ -169,6 +284,19 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { continue } case verifyChunk: + // Calculate the hashed chunk. + hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)) + // Calculate the chunk signature. + newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk) + if !compareSignatureV4(cr.chunkSignature, newSignature) { + // Chunk signature doesn't match we return signature does not match. + cr.err = errors.New("chunk signature does not match") + return 0, cr.err + } + // Newly calculated signature becomes the seed for the next chunk + // this follows the chaining. + cr.seedSignature = newSignature + cr.chunkSHA256Writer.Reset() if cr.lastChunk { cr.state = eofChunk } else { diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index d3bde66ee..792127771 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,7 +1,6 @@ package s3api import ( - "context" "encoding/xml" "fmt" "path/filepath" @@ -11,10 +10,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "github.com/google/uuid" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/google/uuid" ) type InitiateMultipartUploadResult struct { @@ -22,11 +22,11 @@ type InitiateMultipartUploadResult struct { s3.CreateMultipartUploadOutput } -func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { uploadId, _ := uuid.NewRandom() uploadIdString := uploadId.String() - if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { + if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if entry.Extended == nil { entry.Extended = make(map[string][]byte) } @@ -52,11 +52,11 @@ type CompleteMultipartUploadResult struct { s3.CompleteMultipartUploadOutput } -func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId - entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0) + entries, err := s3a.list(uploadDirectory, "", "", false, 0) if err != nil { glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload @@ -96,7 +96,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C dirName = dirName[:len(dirName)-1] } - err = s3a.mkFile(ctx, dirName, entryName, finalParts) + err = s3a.mkFile(dirName, entryName, finalParts) if err != nil { glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) @@ -112,22 +112,22 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C }, } - if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { + if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } return } -func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { +func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { - exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) + exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) if err != nil { glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload } if exists { - err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) + err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) } if err != nil { glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) @@ -142,7 +142,7 @@ type ListMultipartUploadsResult struct { s3.ListMultipartUploadsOutput } -func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { +func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { output = &ListMultipartUploadsResult{ ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{ @@ -155,7 +155,7 @@ func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.List }, } - entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) + entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) if err != nil { glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return @@ -179,7 +179,7 @@ type ListPartsResult struct { s3.ListPartsOutput } -func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { +func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { output = &ListPartsResult{ ListPartsOutput: s3.ListPartsOutput{ Bucket: input.Bucket, @@ -190,8 +190,7 @@ func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListParts }, } - entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, - "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) + entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index b93b603e2..ec1eedcb4 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -3,6 +3,7 @@ package s3api import ( "context" "fmt" + "io" "os" "strings" "time" @@ -11,8 +12,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { +func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { + return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: dirName, @@ -36,7 +37,7 @@ func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, d } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("mkdir %v: %v", request, err) return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) } @@ -45,8 +46,8 @@ func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, d }) } -func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { +func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { + return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: fileName, @@ -67,7 +68,7 @@ func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, } glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("create file %v:%v", request, err) return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) } @@ -76,9 +77,9 @@ func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, }) } -func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { +func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: parentDirectoryPath, @@ -89,13 +90,25 @@ func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, s } glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) + stream, err := client.ListEntries(context.Background(), request) if err != nil { glog.V(0).Infof("read directory %v: %v", request, err) return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) } - entries = resp.Entries + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + entries = append(entries, resp.Entry) + + } return nil }) @@ -104,9 +117,9 @@ func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, s } -func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { +func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDirectory, isDeleteData, isRecursive bool) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: parentDirectoryPath, @@ -116,7 +129,7 @@ func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entr } glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) - if _, err := client.DeleteEntry(ctx, request); err != nil { + if _, err := client.DeleteEntry(context.Background(), request); err != nil { glog.V(0).Infof("delete entry %v: %v", request, err) return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) } @@ -126,9 +139,62 @@ func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entr } -func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { +func (s3a *S3ApiServer) streamRemove(quiet bool, fn func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool), respFn func(err string)) error { + + return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + stream, err := client.StreamDeleteEntries(context.Background()) + if err != nil { + glog.V(0).Infof("stream delete entry: %v", err) + return fmt.Errorf("stream delete entry: %v", err) + } + + waitc := make(chan struct{}) + go func() { + for { + resp, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + glog.V(0).Infof("streamRemove: %v", err) + return + } + respFn(resp.Error) + } + }() + + for { + finished, parentDirectoryPath, entryName, isDeleteData, isRecursive := fn() + if finished { + break + } + err = stream.Send(&filer_pb.DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + IgnoreRecursiveError: quiet, + }) + if err != nil { + glog.V(0).Infof("streamRemove: %v", err) + break + } + + } + stream.CloseSend() + <-waitc + return err + + }) + +} + +func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: parentDirectoryPath, @@ -136,8 +202,12 @@ func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, } glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { + if err == filer_pb.ErrNotFound { + exists = false + return nil + } glog.V(0).Infof("exists entry %v: %v", request, err) return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) } diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go index b680fe1e1..bf5cf5fab 100644 --- a/weed/s3api/s3api_auth.go +++ b/weed/s3api/s3api_auth.go @@ -9,6 +9,8 @@ import ( const ( signV4Algorithm = "AWS4-HMAC-SHA256" signV2Algorithm = "AWS" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" ) // Verify if request has JWT. @@ -23,8 +25,8 @@ func isRequestSignatureV4(r *http.Request) bool { // Verify if request has AWS Signature Version '2'. func isRequestSignatureV2(r *http.Request) bool { - return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && - strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm)) + return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) } // Verify if request has AWS PreSign Version '4'. diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 492d94616..3e5089bed 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -11,9 +11,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" ) var ( @@ -31,7 +32,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques var response ListAllMyBucketsResult - entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32) + entries, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -65,7 +66,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) bucket := vars["bucket"] // create the folder for bucket, but lazily create actual collection - if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil { + if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil { writeErrorResponse(w, ErrInternalError, r.URL) return } @@ -78,8 +79,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque vars := mux.Vars(r) bucket := vars["bucket"] - ctx := context.Background() - err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ @@ -87,14 +87,14 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque } glog.V(1).Infof("delete collection: %v", deleteCollectionRequest) - if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil { + if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil { return fmt.Errorf("delete collection %s: %v", bucket, err) } return nil }) - err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true) + err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -109,9 +109,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request vars := mux.Vars(r) bucket := vars["bucket"] - ctx := context.Background() - - err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: s3a.option.BucketsPath, @@ -119,7 +117,10 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request } glog.V(1).Infof("lookup bucket: %v", request) - if _, err := client.LookupDirectoryEntry(ctx, request); err != nil { + if _, err := filer_pb.LookupEntry(client, request); err != nil { + if err == filer_pb.ErrNotFound { + return filer_pb.ErrNotFound + } return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) } diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go index 7ba55ed28..3f97c73cb 100644 --- a/weed/s3api/s3api_errors.go +++ b/weed/s3api/s3api_errors.go @@ -27,6 +27,7 @@ type ErrorCode int // Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html const ( ErrNone ErrorCode = iota + ErrAccessDenied ErrMethodNotAllowed ErrBucketNotEmpty ErrBucketAlreadyExists @@ -41,12 +42,43 @@ const ( ErrInvalidPartNumberMarker ErrInvalidPart ErrInternalError + ErrInvalidCopyDest + ErrInvalidCopySource + ErrAuthHeaderEmpty + ErrSignatureVersionNotSupported + ErrMissingFields + ErrMissingCredTag + ErrCredMalformed + ErrMalformedXML + ErrMalformedDate + ErrMalformedPresignedDate + ErrMalformedCredentialDate + ErrMissingSignHeadersTag + ErrMissingSignTag + ErrUnsignedHeaders + ErrInvalidQueryParams + ErrInvalidQuerySignatureAlgo + ErrExpiredPresignRequest + ErrMalformedExpires + ErrNegativeExpires + ErrMaximumExpires + ErrSignatureDoesNotMatch + ErrContentSHA256Mismatch + ErrInvalidAccessKeyID + ErrRequestNotReadyYet + ErrMissingDateHeader + ErrInvalidRequest ErrNotImplemented ) // error code to APIError structure, these fields carry respective // descriptions for all the error responses. var errorCodeResponse = map[ErrorCode]APIError{ + ErrAccessDenied: { + Code: "AccessDenied", + Description: "Access Denied.", + HTTPStatusCode: http.StatusForbidden, + }, ErrMethodNotAllowed: { Code: "MethodNotAllowed", Description: "The specified method is not allowed against this resource.", @@ -118,6 +150,139 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", HTTPStatusCode: http.StatusBadRequest, }, + + ErrInvalidCopyDest: { + Code: "InvalidRequest", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopySource: { + Code: "InvalidArgument", + Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrMalformedXML: { + Code: "MalformedXML", + Description: "The XML you provided was not well-formed or did not validate against our published schema.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrAuthHeaderEmpty: { + Code: "InvalidArgument", + Description: "Authorization header is invalid -- one and only one ' ' (space) required.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSignatureVersionNotSupported: { + Code: "InvalidRequest", + Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingFields: { + Code: "MissingFields", + Description: "Missing fields in request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingCredTag: { + Code: "InvalidRequest", + Description: "Missing Credential field for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrCredMalformed: { + Code: "AuthorizationQueryParametersError", + Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedDate: { + Code: "MalformedDate", + Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPresignedDate: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignHeadersTag: { + Code: "InvalidArgument", + Description: "Signature header missing SignedHeaders field.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignTag: { + Code: "AccessDenied", + Description: "Signature header missing Signature field.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrUnsignedHeaders: { + Code: "AccessDenied", + Description: "There were headers present in the request which were not signed", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQueryParams: { + Code: "AuthorizationQueryParametersError", + Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQuerySignatureAlgo: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrExpiredPresignRequest: { + Code: "AccessDenied", + Description: "Request has expired", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMalformedExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires should be a number", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNegativeExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be non-negative", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMaximumExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidAccessKeyID: { + Code: "InvalidAccessKeyId", + Description: "The access key ID you provided does not exist in our records.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrRequestNotReadyYet: { + Code: "AccessDenied", + Description: "Request is not valid yet", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrSignatureDoesNotMatch: { + Code: "SignatureDoesNotMatch", + Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrContentSHA256Mismatch: { + Code: "XAmzContentSHA256Mismatch", + Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingDateHeader: { + Code: "AccessDenied", + Description: "AWS authentication requires a valid Date or x-amz-date header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequest: { + Code: "InvalidRequest", + Description: "Invalid Request", + HTTPStatusCode: http.StatusBadRequest, + }, ErrNotImplemented: { Code: "NotImplemented", Description: "A header you provided implies functionality that is not implemented", diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 127be07e3..d7212d5e3 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -2,17 +2,18 @@ package s3api import ( "bytes" - "context" "encoding/base64" "encoding/xml" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" "net/http" "net/url" "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) type mimeType string @@ -37,9 +38,9 @@ func encodeResponse(response interface{}) []byte { return bytesBuffer.Bytes() } -func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go new file mode 100644 index 000000000..b8fb3f6a4 --- /dev/null +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -0,0 +1,151 @@ +package s3api + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + + vars := mux.Vars(r) + dstBucket := vars["bucket"] + dstObject := getObject(vars) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + if srcBucket == dstBucket && srcObject == dstObject { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", + s3a.option.Filer, s3a.option.BucketsPath, dstBucket, dstObject, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + _, _, dataReader, err := util.DownloadFile(srcUrl) + if err != nil { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + defer dataReader.Close() + + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyObjectResult{ + ETag: etag, + LastModified: time.Now(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + if len(parts) == 2 { + return parts[0], "/" + parts[1] + } + return parts[0], "/" +} + +type CopyPartResult struct { + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + vars := mux.Vars(r) + dstBucket := vars["bucket"] + // dstObject := getObject(vars) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + uploadID := r.URL.Query().Get("uploadId") + partIDString := r.URL.Query().Get("partNumber") + + partID, err := strconv.Atoi(partIDString) + if err != nil { + writeErrorResponse(w, ErrInvalidPart, r.URL) + return + } + + // check partID with maximum part ID for multipart objects + if partID > globalMaxPartID { + writeErrorResponse(w, ErrInvalidMaxParts, r.URL) + return + } + + rangeHeader := r.Header.Get("x-amz-copy-source-range") + + dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID-1, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader) + if err != nil { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + defer dataReader.Close() + + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyPartResult{ + ETag: etag, + LastModified: time.Now(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 44e93d297..9d03cdbe3 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -3,15 +3,18 @@ package s3api import ( "crypto/md5" "encoding/json" + "encoding/xml" "fmt" "io" "io/ioutil" "net/http" "strings" + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/server" - "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -40,12 +43,17 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) rAuthType := getRequestAuthType(r) dataReader := r.Body + var s3ErrCode ErrorCode if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) } + if s3ErrCode != ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return + } + defer dataReader.Close() - uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket) + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) @@ -108,10 +116,97 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque } +/// ObjectIdentifier carries key name for the object to delete. +type ObjectIdentifier struct { + ObjectName string `xml:"Key"` +} + +// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. +type DeleteObjectsRequest struct { + // Element to enable quiet mode for the request + Quiet bool + // List of objects to be deleted + Objects []ObjectIdentifier `xml:"Object"` +} + +// DeleteError structure. +type DeleteError struct { + Code string + Message string + Key string +} + +// DeleteObjectsResponse container for multiple object deletes. +type DeleteObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` + + // Collection of all deleted objects + DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + + // Collection of errors deleting certain objects. + Errors []DeleteError `xml:"Error,omitempty"` +} + // DeleteMultipleObjectsHandler - Delete multiple objects func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - // TODO - writeErrorResponse(w, ErrNotImplemented, r.URL) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + deleteXMLBytes, err := ioutil.ReadAll(r.Body) + if err != nil { + writeErrorResponse(w, ErrInternalError, r.URL) + return + } + + deleteObjects := &DeleteObjectsRequest{} + if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { + writeErrorResponse(w, ErrMalformedXML, r.URL) + return + } + + var index int + + var deletedObjects []ObjectIdentifier + var deleteErrors []DeleteError + s3a.streamRemove(deleteObjects.Quiet, func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool) { + if index >= len(deleteObjects.Objects) { + finished = true + return + } + + object := deleteObjects.Objects[index] + + lastSeparator := strings.LastIndex(object.ObjectName, "/") + parentDirectoryPath, entryName, isDeleteData, isRecursive = "/", object.ObjectName, true, false + if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { + entryName = object.ObjectName[lastSeparator+1:] + parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] + } + parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) + return + }, func(err string) { + object := deleteObjects.Objects[index] + if err == "" { + deletedObjects = append(deletedObjects, object) + } else { + deleteErrors = append(deleteErrors, DeleteError{ + Code: "", + Message: err, + Key: object.ObjectName, + }) + } + index++ + }) + + deleteResp := DeleteObjectsResponse{} + if !deleteObjects.Quiet { + deleteResp.DeletedObjects = deletedObjects + } + deleteResp.Errors = deleteErrors + + writeSuccessResponseXML(w, encodeResponse(deleteResp)) + } func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) { @@ -128,7 +223,6 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des proxyReq.Header.Set("Host", s3a.option.Filer) proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - proxyReq.Header.Set("Etag-MD5", "True") for header, values := range r.Header { for _, value := range values { @@ -143,9 +237,10 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des writeErrorResponse(w, ErrInternalError, r.URL) return } - defer resp.Body.Close() + defer util.CloseResponse(resp) responseFn(resp, w) + } func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) { for k, v := range proxyResonse.Header { @@ -155,10 +250,10 @@ func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) { io.Copy(w, proxyResonse.Body) } -func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.ReadCloser) (etag string, code ErrorCode) { +func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code ErrorCode) { hash := md5.New() - var body io.Reader = io.TeeReader(dataReader, hash) + var body = io.TeeReader(dataReader, hash) proxyReq, err := http.NewRequest("PUT", uploadUrl, body) @@ -178,8 +273,6 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp, postErr := client.Do(proxyReq) - dataReader.Close() - if postErr != nil { glog.Errorf("post to filer: %v", postErr) return "", ErrInternalError diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 72a25e4a5..3282e4176 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -1,22 +1,22 @@ package s3api import ( - "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/gorilla/mux" "net/http" "net/url" "strconv" "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/gorilla/mux" ) const ( - maxObjectList = 1000 // Limit number of objects in a listObjectsResponse. - maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse. - maxPartsList = 1000 // Limit number of parts in a listPartsResponse. - globalMaxPartID = 10000 + maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse. + maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. + maxPartsList = 10000 // Limit number of parts in a listPartsResponse. + globalMaxPartID = 100000 ) // NewMultipartUploadHandler - New multipart upload. @@ -26,7 +26,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http bucket = vars["bucket"] object = vars["object"] - response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{ + response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), }) @@ -51,7 +51,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{ + response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), @@ -77,7 +77,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ + response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), @@ -112,7 +112,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht } } - response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{ + response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{ Bucket: aws.String(bucket), Delimiter: aws.String(delimiter), EncodingType: aws.String(encodingType), @@ -149,7 +149,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re return } - response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{ + response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), MaxParts: aws.Int64(int64(maxParts)), @@ -175,10 +175,8 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ rAuthType := getRequestAuthType(r) - ctx := context.Background() - uploadID := r.URL.Query().Get("uploadId") - exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true) + exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true) if !exists { writeErrorResponse(w, ErrNoSuchUpload, r.URL) return @@ -195,10 +193,16 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ return } + var s3ErrCode ErrorCode dataReader := r.Body if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + } + if s3ErrCode != ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return } + defer dataReader.Close() uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket) diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 1fc8b6b37..5006df6a0 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -3,6 +3,7 @@ package s3api import ( "context" "fmt" + "io" "net/http" "net/url" "path/filepath" @@ -10,14 +11,11 @@ import ( "strings" "time" + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" -) - -const ( - maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse. ) func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { @@ -45,9 +43,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ marker = startAfter } - ctx := context.Background() - - response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -65,8 +61,6 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ vars := mux.Vars(r) bucket := vars["bucket"] - ctx := context.Background() - originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) if maxKeys < 0 { @@ -78,7 +72,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ return } - response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -88,7 +82,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ writeSuccessResponseXML(w, encodeResponse(response)) } -func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { +func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { // convert full path prefix into directory name and prefix for entry name dir, prefix := filepath.Split(originalPrefix) @@ -97,7 +91,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr } // check filer - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir), @@ -107,7 +101,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr InclusiveStartFrom: false, } - resp, err := client.ListEntries(ctx, request) + stream, err := client.ListEntries(context.Background(), request) if err != nil { return fmt.Errorf("list buckets: %v", err) } @@ -117,7 +111,18 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr var counter int var lastEntryName string var isTruncated bool - for _, entry := range resp.Entries { + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + entry := resp.Entry counter++ if counter > maxKeys { isTruncated = true @@ -143,6 +148,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr StorageClass: "STANDARD", }) } + } response = ListBucketResult{ diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index 24458592d..773094a5f 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -1,20 +1,16 @@ package s3api import ( - _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" - _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" - _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" - _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" + "net/http" + "github.com/gorilla/mux" "google.golang.org/grpc" - "net/http" ) type S3ApiServerOption struct { Filer string FilerGrpcAddress string + Config string DomainName string BucketsPath string GrpcDialOption grpc.DialOption @@ -22,11 +18,13 @@ type S3ApiServerOption struct { type S3ApiServer struct { option *S3ApiServerOption + iam *IdentityAccessManagement } func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) { s3ApiServer = &S3ApiServer{ option: option, + iam: NewIdentityAccessManagement(option.Config, option.DomainName), } s3ApiServer.registerRouter(router) @@ -46,48 +44,47 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { for _, bucket := range routers { // HeadObject - bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler) + bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ)) // HeadBucket - bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler) + bucket.Methods("HEAD").HandlerFunc(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN)) + // CopyObjectPart + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // NewMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE)).Queries("uploads", "") // AbortMultipartUpload - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // ListObjectParts - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // ListMultipartUploads - bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "") + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE)).Queries("uploads", "") + // CopyObject + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE)) // PutObject - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler) + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE)) // PutBucket - bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler) + bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN)) // DeleteObject - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler) + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE)) // DeleteBucket - bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler) + bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE)) // ListObjectsV2 - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2") + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ)).Queries("list-type", "2") // GetObject, but directory listing is not supported - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler) + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ)) // ListObjectsV1 (Legacy) - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler) + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ)) // DeleteMultipleObjects - bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "") + bucket.Methods("POST").HandlerFunc(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)).Queries("delete", "") /* - // CopyObject - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) - - // CopyObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // not implemented // GetBucketLocation @@ -109,7 +106,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { } // ListBuckets - apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler) + apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN)) // NotFound apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go new file mode 100644 index 000000000..026766beb --- /dev/null +++ b/weed/s3api/s3api_test.go @@ -0,0 +1,32 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestCopyObjectResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + + response := CopyObjectResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} + +func TestCopyPartResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + + response := CopyPartResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} diff --git a/weed/security/tls.go b/weed/security/tls.go index e81ba4831..1832e6e07 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -3,12 +3,14 @@ package security import ( "crypto/tls" "crypto/x509" - "github.com/spf13/viper" "io/ioutil" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/spf13/viper" + "google.golang.org/grpc" "google.golang.org/grpc/credentials" + + "github.com/chrislusf/seaweedfs/weed/glog" ) func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { @@ -19,12 +21,12 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { // load cert/key, ca cert cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) if err != nil { - glog.Errorf("load cert/key error: %v", err) + glog.V(1).Infof("load cert/key error: %v", err) return nil } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) if err != nil { - glog.Errorf("read ca cert file error: %v", err) + glog.V(1).Infof("read ca cert file error: %v", err) return nil } caCertPool := x509.NewCertPool() @@ -46,12 +48,12 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { // load cert/key, cacert cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) if err != nil { - glog.Errorf("load cert/key error: %v", err) + glog.V(1).Infof("load cert/key error: %v", err) return grpc.WithInsecure() } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) if err != nil { - glog.Errorf("read ca cert file error: %v", err) + glog.V(1).Infof("read ca cert file error: %v", err) return grpc.WithInsecure() } caCertPool := x509.NewCertPool() diff --git a/weed/server/common.go b/weed/server/common.go index d50c283f2..e06142d7f 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -1,27 +1,29 @@ package weed_server import ( - "bytes" "encoding/json" "errors" "fmt" + "io" + "mime/multipart" "net/http" "path/filepath" "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/storage/needle" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" - _ "github.com/chrislusf/seaweedfs/weed/statik" "github.com/gorilla/mux" statik "github.com/rakyll/statik/fs" + + _ "github.com/chrislusf/seaweedfs/weed/statik" ) var serverStats *stats.ServerStats @@ -76,7 +78,8 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter // wrapper for writeJson - just logs errors func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { if err := writeJson(w, r, httpStatus, obj); err != nil { - glog.V(0).Infof("error writing JSON %+v status %d: %v", obj, httpStatus, err) + glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err) + glog.V(1).Infof("JSON content: %+v", obj) } } func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) { @@ -97,13 +100,13 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r) + pu, pe := needle.ParseUpload(r, 256*1024*1024) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return } - debug("assigning file id for", fname) + debug("assigning file id for", pu.FileName) r.ParseForm() count := uint64(1) if r.FormValue("count") != "" { @@ -115,6 +118,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } ar := &operation.VolumeAssignRequest{ Count: count, + DataCenter: r.FormValue("dataCenter"), Replication: r.FormValue("replication"), Collection: r.FormValue("collection"), Ttl: r.FormValue("ttl"), @@ -126,21 +130,21 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } url := "http://" + assignResult.Url + "/" + assignResult.Fid - if lastModified != 0 { - url = url + "?ts=" + strconv.FormatUint(lastModified, 10) + if pu.ModifiedTime != 0 { + url = url + "?ts=" + strconv.FormatUint(pu.ModifiedTime, 10) } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth) + uploadResult, err := operation.UploadData(url, pu.FileName, false, pu.Data, pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return } - m["fileName"] = fname + m["fileName"] = pu.FileName m["fid"] = assignResult.Fid m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid - m["size"] = originalDataSize + m["size"] = pu.OriginalDataSize m["eTag"] = uploadResult.ETag writeJsonQuiet(w, r, http.StatusCreated, m) return @@ -207,3 +211,107 @@ func handleStaticResources2(r *mux.Router) { r.Handle("/favicon.ico", http.FileServer(statikFS)) r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(statikFS))) } + +func adjustHeadersAfterHEAD(w http.ResponseWriter, r *http.Request, filename string) { + if filename != "" { + contentDisposition := "inline" + if r.FormValue("dl") != "" { + if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { + contentDisposition = "attachment" + } + } + w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) + } +} + +func processRangeRequst(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { + rangeReq := r.Header.Get("Range") + + if rangeReq == "" { + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + if err := writeFn(w, 0, totalSize); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + //the rest is dealing with partial content request + //mostly copy from src/pkg/net/http/fs.go + ranges, err := parseRange(rangeReq, totalSize) + if err != nil { + http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) + return + } + if sumRangesSize(ranges) > totalSize { + // The total number of bytes in all the ranges + // is larger than the size of the file by + // itself, so this is probably an attack, or a + // dumb client. Ignore the range request. + return + } + if len(ranges) == 0 { + return + } + if len(ranges) == 1 { + // RFC 2616, Section 14.16: + // "When an HTTP message includes the content of a single + // range (for example, a response to a request for a + // single range, or to a request for a set of ranges + // that overlap without any holes), this content is + // transmitted with a Content-Range header, and a + // Content-Length header showing the number of bytes + // actually transferred. + // ... + // A response to a request for a single range MUST NOT + // be sent using the multipart/byteranges media type." + ra := ranges[0] + w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) + w.Header().Set("Content-Range", ra.contentRange(totalSize)) + w.WriteHeader(http.StatusPartialContent) + + err = writeFn(w, ra.start, ra.length) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + // process multiple ranges + for _, ra := range ranges { + if ra.start > totalSize { + http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) + return + } + } + sendSize := rangesMIMESize(ranges, mimeType, totalSize) + pr, pw := io.Pipe() + mw := multipart.NewWriter(pw) + w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) + sendContent := pr + defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. + go func() { + for _, ra := range ranges { + part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) + if e != nil { + pw.CloseWithError(e) + return + } + if e = writeFn(part, ra.start, ra.length); e != nil { + pw.CloseWithError(e) + return + } + } + mw.Close() + pw.Close() + }() + if w.Header().Get("Content-Encoding") == "" { + w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) + } + w.WriteHeader(http.StatusPartialContent) + if _, err := io.CopyN(w, sendContent, sendSize); err != nil { + http.Error(w, "Internal Error", http.StatusInternalServerError) + return + } +} diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index feab11c79..b904c1393 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -19,7 +19,11 @@ import ( func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) + if err == filer_pb.ErrNotFound { + return &filer_pb.LookupDirectoryEntryResponse{}, nil + } if err != nil { + glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) return nil, err } @@ -29,32 +33,33 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L IsDirectory: entry.IsDirectory(), Attributes: filer2.EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, }, }, nil } -func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntriesRequest) (*filer_pb.ListEntriesResponse, error) { +func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error { limit := int(req.Limit) if limit == 0 { limit = fs.option.DirListingLimit } - paginationLimit := 1024 + paginationLimit := filer2.PaginationSize if limit < paginationLimit { paginationLimit = limit } - resp := &filer_pb.ListEntriesResponse{} lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(ctx, filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) + entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) + if err != nil { - return nil, err + return err } if len(entries) == 0 { - return resp, nil + return nil } includeLastFile = false @@ -69,15 +74,21 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie } } - resp.Entries = append(resp.Entries, &filer_pb.Entry{ - Name: entry.Name(), - IsDirectory: entry.IsDirectory(), - Chunks: entry.Chunks, - Attributes: filer2.EntryAttributeToPb(entry), - }) + if err := stream.Send(&filer_pb.ListEntriesResponse{ + Entry: &filer_pb.Entry{ + Name: entry.Name(), + IsDirectory: entry.IsDirectory(), + Chunks: entry.Chunks, + Attributes: filer2.EntryAttributeToPb(entry), + Extended: entry.Extended, + }, + }); err != nil { + return err + } + limit-- if limit == 0 { - return resp, nil + return nil } } @@ -87,7 +98,7 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie } - return resp, nil + return nil } func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) { @@ -123,24 +134,31 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { + resp = &filer_pb.CreateEntryResponse{} + fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) if req.Entry.Attributes == nil { - return nil, fmt.Errorf("can not create entry with empty attributes") + glog.V(3).Infof("CreateEntry %s: nil attributes", filepath.Join(req.Directory, req.Entry.Name)) + resp.Error = fmt.Sprintf("can not create entry with empty attributes") + return } - err = fs.filer.CreateEntry(ctx, &filer2.Entry{ + createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{ FullPath: fullpath, Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), Chunks: chunks, - }) + }, req.OExcl) - if err == nil { - fs.filer.DeleteChunks(fullpath, garbages) + if createErr == nil { + fs.filer.DeleteChunks(garbages) + } else { + glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) + resp.Error = createErr.Error() } - return &filer_pb.CreateEntryResponse{}, err + return } func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { @@ -159,12 +177,14 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr newEntry := &filer2.Entry{ FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))), Attr: entry.Attr, + Extended: req.Entry.Extended, Chunks: chunks, } - glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v", + glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v", fullpath, entry.Attr, len(entry.Chunks), entry.Chunks, - req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks) + req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks, + entry.Extended, req.Entry.Extended) if req.Entry.Attributes != nil { if req.Entry.Attributes.Mtime != 0 { @@ -186,8 +206,10 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { - fs.filer.DeleteChunks(entry.FullPath, unusedChunks) - fs.filer.DeleteChunks(entry.FullPath, garbages) + fs.filer.DeleteChunks(unusedChunks) + fs.filer.DeleteChunks(garbages) + } else { + glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) } fs.filer.NotifyUpdateEvent(entry, newEntry, true) @@ -197,7 +219,30 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) - return &filer_pb.DeleteEntryResponse{}, err + resp = &filer_pb.DeleteEntryResponse{} + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +func (fs *FilerServer) StreamDeleteEntries(stream filer_pb.SeaweedFiler_StreamDeleteEntriesServer) error { + for { + req, err := stream.Recv() + if err != nil { + return fmt.Errorf("receive delete entry request: %v", err) + } + fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))) + err = fs.filer.DeleteEntryMetaAndData(context.Background(), fullpath, req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) + resp := &filer_pb.DeleteEntryResponse{} + if err != nil { + resp.Error = err.Error() + } + if err := stream.Send(resp); err != nil { + return err + } + } + return nil } func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { @@ -206,6 +251,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol if req.TtlSec > 0 { ttlStr = strconv.Itoa(int(req.TtlSec)) } + collection, replication := fs.detectCollection(req.ParentPath, req.Collection, req.Replication) var altRequest *operation.VolumeAssignRequest @@ -216,41 +262,45 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol assignRequest := &operation.VolumeAssignRequest{ Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, + Replication: replication, + Collection: collection, Ttl: ttlStr, DataCenter: dataCenter, } if dataCenter != "" { altRequest = &operation.VolumeAssignRequest{ Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, + Replication: replication, + Collection: collection, Ttl: ttlStr, DataCenter: "", } } assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) if err != nil { - return nil, fmt.Errorf("assign volume: %v", err) + glog.V(3).Infof("AssignVolume: %v", err) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } if assignResult.Error != "" { - return nil, fmt.Errorf("assign volume result: %v", assignResult.Error) + glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil } return &filer_pb.AssignVolumeResponse{ - FileId: assignResult.Fid, - Count: int32(assignResult.Count), - Url: assignResult.Url, - PublicUrl: assignResult.PublicUrl, - Auth: string(assignResult.Auth), - }, err + FileId: assignResult.Fid, + Count: int32(assignResult.Count), + Url: assignResult.Url, + PublicUrl: assignResult.PublicUrl, + Auth: string(assignResult.Auth), + Collection: collection, + Replication: replication, + }, nil } func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - err = fs.filer.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ Name: req.GetCollection(), }) return err @@ -286,5 +336,8 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb. Collection: fs.option.Collection, Replication: fs.option.DefaultReplication, MaxMb: uint32(fs.option.MaxMB), + DirBuckets: fs.filer.DirBucketsPath, + DirQueues: fs.filer.DirQueuesPath, + Cipher: fs.filer.Cipher, }, nil } diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index dfa59e7fe..0669a26f1 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -107,7 +107,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullP Attr: entry.Attr, Chunks: entry.Chunks, } - createErr := fs.filer.CreateEntry(ctx, newEntry) + createErr := fs.filer.CreateEntry(ctx, newEntry, false) if createErr != nil { return createErr } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 2cf26b1bb..70da9094b 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -7,18 +7,18 @@ import ( "os" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/filer2" _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" _ "github.com/chrislusf/seaweedfs/weed/filer2/etcd" _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2" - _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb" _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" @@ -31,21 +31,21 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" _ "github.com/chrislusf/seaweedfs/weed/notification/log" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" ) type FilerOption struct { Masters []string Collection string DefaultReplication string - RedirectOnRead bool DisableDirListing bool MaxMB int DirListingLimit int DataCenter string DefaultLevelDbDir string DisableHttp bool - Port int + Port uint32 + recursiveDelete bool + Cipher bool } type FilerServer struct { @@ -59,18 +59,19 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) fs = &FilerServer{ option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), } if len(option.Masters) == 0 { glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption) + fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Port+10000) + fs.filer.Cipher = option.Cipher go fs.filer.KeepConnectedToMaster() - v := viper.GetViper() + v := util.GetViper() if !util.LoadConfiguration("filer", false) { v.Set("leveldb2.enabled", true) v.Set("leveldb2.dir", option.DefaultLevelDbDir) @@ -81,9 +82,14 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) } util.LoadConfiguration("notification", false) + fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") + v.Set("filer.option.buckets_folder", "/buckets") + v.Set("filer.option.queues_folder", "/queues") + fs.filer.DirBucketsPath = v.GetString("filer.option.buckets_folder") + fs.filer.DirQueuesPath = v.GetString("filer.option.queues_folder") fs.filer.LoadConfiguration(v) - notification.LoadConfiguration(v.Sub("notification")) + notification.LoadConfiguration(v, "notification.") handleStaticResources(defaultMux) if !option.DisableHttp { @@ -93,6 +99,8 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } + fs.filer.LoadBuckets(fs.filer.DirBucketsPath) + maybeStartMetrics(fs, option) return fs, nil diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index ba21298ba..5967535b8 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -3,19 +3,16 @@ package weed_server import ( "context" "io" - "io/ioutil" "mime" - "mime/multipart" "net/http" - "net/url" - "path" + "path/filepath" "strconv" "strings" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) { @@ -32,7 +29,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, fs.listDirectoryHandler(w, r) return } - if err == filer2.ErrNotFound { + if err == filer_pb.ErrNotFound { glog.V(1).Infof("Not found %s: %v", path, err) stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc() w.WriteHeader(http.StatusNotFound) @@ -66,188 +63,35 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, } w.Header().Set("Accept-Ranges", "bytes") - if r.Method == "HEAD" { - w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) - w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) - setEtag(w, filer2.ETag(entry.Chunks)) - return - } - - if len(entry.Chunks) == 1 { - fs.handleSingleChunk(w, r, entry) - return - } - - fs.handleMultipleChunks(w, r, entry) - -} - -func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { - - fileId := entry.Chunks[0].GetFileIdString() - - urlString, err := fs.filer.MasterClient.LookupFileId(fileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err) - w.WriteHeader(http.StatusNotFound) - return - } - - if fs.option.RedirectOnRead { - stats.FilerRequestCounter.WithLabelValues("redirect").Inc() - http.Redirect(w, r, urlString, http.StatusFound) - return - } - - u, _ := url.Parse(urlString) - q := u.Query() - for key, values := range r.URL.Query() { - for _, value := range values { - q.Add(key, value) - } - } - u.RawQuery = q.Encode() - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - glog.V(3).Infoln("retrieving from", u) - resp, do_err := util.Do(request) - if do_err != nil { - glog.V(0).Infoln("failing to connect to volume server", do_err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, do_err) - return - } - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - for k, v := range resp.Header { - w.Header()[k] = v - } - if entry.Attr.Mime != "" { - w.Header().Set("Content-Type", entry.Attr.Mime) - } - w.WriteHeader(resp.StatusCode) - io.Copy(w, resp.Body) -} - -func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { + w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) + // mime type mimeType := entry.Attr.Mime if mimeType == "" { - if ext := path.Ext(entry.Name()); ext != "" { + if ext := filepath.Ext(entry.Name()); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } - setEtag(w, filer2.ETag(entry.Chunks)) - totalSize := int64(filer2.TotalSize(entry.Chunks)) - - rangeReq := r.Header.Get("Range") - - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - return - } - - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return - } - if len(ranges) == 0 { - return - } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - - err = fs.writeContent(w, entry, ra.start, int(ra.length)) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - return - } + // set etag + setEtag(w, filer2.ETag(entry.Chunks)) - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil { - pw.CloseWithError(e) - return - } - } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - if _, err := io.CopyN(w, sendContent, sendSize); err != nil { - http.Error(w, "Internal Error", http.StatusInternalServerError) + if r.Method == "HEAD" { + w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) return } -} + filename := entry.Name() + adjustHeadersAfterHEAD(w, r, filename) -func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error { + totalSize := int64(filer2.TotalSize(entry.Chunks)) - return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, size) + processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, int(size)) + }) } + diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 5d95a5d7e..5cd174b17 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -2,6 +2,7 @@ package weed_server import ( "context" + "crypto/md5" "encoding/json" "errors" "fmt" @@ -22,6 +23,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -32,13 +34,13 @@ var ( type FilerPostResult struct { Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` + Size int64 `json:"size,omitempty"` Error string `json:"error,omitempty"` Fid string `json:"fid,omitempty"` Url string `json:"url,omitempty"` } -func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, auth security.EncodedJwt, err error) { +func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string) (fileId, urlLocation string, auth security.EncodedJwt, err error) { stats.FilerRequestCounter.WithLabelValues("assign").Inc() start := time.Now() @@ -48,7 +50,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, Count: 1, Replication: replication, Collection: collection, - Ttl: r.URL.Query().Get("ttl"), + Ttl: ttlString, DataCenter: dataCenter, } var altRequest *operation.VolumeAssignRequest @@ -57,7 +59,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, Count: 1, Replication: replication, Collection: collection, - Ttl: r.URL.Query().Get("ttl"), + Ttl: ttlString, DataCenter: "", } } @@ -80,57 +82,59 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { ctx := context.Background() query := r.URL.Query() - replication := query.Get("replication") - if replication == "" { - replication = fs.option.DefaultReplication - } - collection := query.Get("collection") - if collection == "" { - collection = fs.option.Collection - } + collection, replication := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication")) dataCenter := query.Get("dataCenter") if dataCenter == "" { dataCenter = fs.option.DataCenter } + ttlString := r.URL.Query().Get("ttl") + + // read ttl in seconds + ttl, err := needle.ReadTTL(ttlString) + ttlSeconds := int32(0) + if err == nil { + ttlSeconds = int32(ttl.Minutes()) * 60 + } + + if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString); autoChunked { + return + } + + if fs.option.Cipher { + reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString) + if err != nil { + writeJsonError(w, r, http.StatusInternalServerError, err) + } else if reply != nil { + writeJsonQuiet(w, r, http.StatusCreated, reply) + } - if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter); autoChunked { return } - fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString) if err != nil || fileId == "" || urlLocation == "" { glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) + writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)) return } glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) u, _ := url.Parse(urlLocation) - - // This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off - // because they need to provide FIDs instead of file paths... - cm, _ := strconv.ParseBool(query.Get("cm")) - if cm { - q := u.Query() - q.Set("cm", "true") - u.RawQuery = q.Encode() - } - glog.V(4).Infoln("post to", u) - ret, err := fs.uploadToVolumeServer(r, u, auth, w, fileId) if err != nil { return } - if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId); err != nil { + if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId, ttlSeconds); err != nil { return } // send back post result reply := FilerPostResult{ Name: ret.Name, - Size: ret.Size, + Size: int64(ret.Size), Error: ret.Error, Fid: fileId, Url: urlLocation, @@ -141,7 +145,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { // update metadata in filer store func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, - replication string, collection string, ret operation.UploadResult, fileId string) (err error) { + replication string, collection string, ret *operation.UploadResult, fileId string, ttlSeconds int32) (err error) { stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc() start := time.Now() @@ -149,6 +153,16 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds()) }() + modeStr := r.URL.Query().Get("mode") + if modeStr == "" { + modeStr = "0660" + } + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + mode = 0660 + } + path := r.URL.Path if strings.HasSuffix(path, "/") { if ret.Name != "" { @@ -165,12 +179,13 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w Attr: filer2.Attr{ Mtime: time.Now(), Crtime: crTime, - Mode: 0660, + Mode: os.FileMode(mode), Uid: OS_UID, Gid: OS_GID, Replication: replication, Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + TtlSec: ttlSeconds, + Mime: ret.Mime, }, Chunks: []*filer_pb.FileChunk{{ FileId: fileId, @@ -179,12 +194,14 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w ETag: ret.ETag, }}, } - if ext := filenamePath.Ext(path); ext != "" { - entry.Attr.Mime = mime.TypeByExtension(ext) + if entry.Attr.Mime == "" { + if ext := filenamePath.Ext(path); ext != "" { + entry.Attr.Mime = mime.TypeByExtension(ext) + } } // glog.V(4).Infof("saving %s => %+v", path, entry) - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { - fs.filer.DeleteChunks(entry.FullPath, entry.Chunks) + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) writeJsonError(w, r, http.StatusInternalServerError, dbErr) err = dbErr @@ -195,12 +212,16 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w } // send request to volume server -func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret operation.UploadResult, err error) { +func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, err error) { stats.FilerRequestCounter.WithLabelValues("postUpload").Inc() start := time.Now() defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }() + ret = &operation.UploadResult{} + hash := md5.New() + var body = ioutil.NopCloser(io.TeeReader(r.Body, hash)) + request := &http.Request{ Method: r.Method, URL: u, @@ -208,10 +229,11 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se ProtoMajor: r.ProtoMajor, ProtoMinor: r.ProtoMinor, Header: r.Header, - Body: r.Body, + Body: body, Host: r.Host, ContentLength: r.ContentLength, } + if auth != "" { request.Header.Set("Authorization", "BEARER "+string(auth)) } @@ -226,7 +248,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() }() - etag := resp.Header.Get("ETag") + respBody, raErr := ioutil.ReadAll(resp.Body) if raErr != nil { glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error()) @@ -234,6 +256,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se err = raErr return } + glog.V(4).Infoln("post result", string(respBody)) unmarshalErr := json.Unmarshal(respBody, &ret) if unmarshalErr != nil { @@ -261,26 +284,65 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se return } } - if etag != "" { - ret.ETag = etag - } + // use filer calculated md5 ETag, instead of the volume server crc ETag + ret.ETag = fmt.Sprintf("%x", hash.Sum(nil)) return } // curl -X DELETE http://localhost:8888/path/to // curl -X DELETE http://localhost:8888/path/to?recursive=true // curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true +// curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { isRecursive := r.FormValue("recursive") == "true" + if !isRecursive && fs.option.recursiveDelete { + if r.FormValue("recursive") != "false" { + isRecursive = true + } + } ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" + skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" - err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, true) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion) if err != nil { glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, err) + httpStatus := http.StatusInternalServerError + if err == filer_pb.ErrNotFound { + httpStatus = http.StatusNotFound + } + writeJsonError(w, r, httpStatus, err) return } w.WriteHeader(http.StatusNoContent) } + +func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string) { + // default + collection = fs.option.Collection + replication = fs.option.DefaultReplication + + // get default collection settings + if qCollection != "" { + collection = qCollection + } + if qReplication != "" { + replication = qReplication + } + + // required by buckets folder + if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") { + bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 { + collection = bucketAndObjectKey + } + if t > 0 { + collection = bucketAndObjectKey[:t] + } + replication = fs.filer.ReadBucketOption(collection) + } + + return +} diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 492b55943..a2672b836 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -1,10 +1,9 @@ package weed_server import ( - "bytes" "context" + "fmt" "io" - "io/ioutil" "net/http" "path" "strconv" @@ -17,11 +16,10 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, - replication string, collection string, dataCenter string) bool { + replication string, collection string, dataCenter string, ttlSec int32, ttlString string) bool { if r.Method != "POST" { glog.V(4).Infoln("AutoChunking not supported for method", r.Method) return false @@ -57,7 +55,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * return false } - reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter) + reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else if reply != nil { @@ -67,7 +65,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * } func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, - contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { + contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string) (filerResult *FilerPostResult, replyerr error) { stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc() start := time.Now() @@ -89,68 +87,55 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r if fileName != "" { fileName = path.Base(fileName) } + contentType := part1.Header.Get("Content-Type") + + fmt.Printf("autochunk part header: %+v\n", part1.Header) var fileChunks []*filer_pb.FileChunk - totalBytesRead := int64(0) - tmpBufferSize := int32(1024 * 1024) - tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize)) - chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow - chunkBufOffset := int32(0) chunkOffset := int64(0) - writtenChunks := 0 - filerResult = &FilerPostResult{ - Name: fileName, - } + for chunkOffset < contentLength { + limitedReader := io.LimitReader(part1, int64(chunkSize)) - for totalBytesRead < contentLength { - tmpBuffer.Reset() - bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize)) - readFully := readErr != nil && readErr == io.EOF - tmpBuf := tmpBuffer.Bytes() - bytesToCopy := tmpBuf[0:int(bytesRead)] - - copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy) - chunkBufOffset = chunkBufOffset + int32(bytesRead) - - if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { - writtenChunks = writtenChunks + 1 - fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) - if assignErr != nil { - return nil, assignErr - } - - // upload the chunk to the volume server - chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId, auth) - if uploadErr != nil { - return nil, uploadErr - } - - // Save to chunk manifest structure - fileChunks = append(fileChunks, - &filer_pb.FileChunk{ - FileId: fileId, - Offset: chunkOffset, - Size: uint64(chunkBufOffset), - Mtime: time.Now().UnixNano(), - }, - ) - - // reset variables for the next chunk - chunkBufOffset = 0 - chunkOffset = totalBytesRead + int64(bytesRead) + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString) + if assignErr != nil { + return nil, assignErr } - totalBytesRead = totalBytesRead + int64(bytesRead) + // upload the chunk to the volume server + uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth) + if uploadErr != nil { + return nil, uploadErr + } - if bytesRead == 0 || readFully { + // if last chunk exhausted the reader exactly at the border + if uploadResult.Size == 0 { break } - if readErr != nil { - return nil, readErr + // Save to chunk manifest structure + fileChunks = append(fileChunks, + &filer_pb.FileChunk{ + FileId: fileId, + Offset: chunkOffset, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, + }, + ) + + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size), contentLength) + + // reset variables for the next chunk + chunkOffset = chunkOffset + int64(uploadResult.Size) + + // if last chunk was not at full chunk size, but already exhausted the reader + if int64(uploadResult.Size) < int64(chunkSize) { + break } } @@ -172,12 +157,19 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r Gid: OS_GID, Replication: replication, Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + TtlSec: ttlSec, + Mime: contentType, }, Chunks: fileChunks, } - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { - fs.filer.DeleteChunks(entry.FullPath, entry.Chunks) + + filerResult = &FilerPostResult{ + Name: fileName, + Size: chunkOffset, + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) replyerr = dbErr filerResult.Error = dbErr.Error() glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) @@ -187,8 +179,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r return } -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, - chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error) { stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() start := time.Now() @@ -196,13 +187,5 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) }() - ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth) - if uploadResult != nil { - glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) - } - if uploadError != nil { - err = uploadError - } - return + return operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth) } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go new file mode 100644 index 000000000..06670399c --- /dev/null +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -0,0 +1,98 @@ +package weed_server + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +// handling single chunk POST or PUT upload +func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, + replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string) (filerResult *FilerPostResult, err error) { + + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString) + + if err != nil || fileId == "" || urlLocation == "" { + return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) + } + + glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) + + // Note: encrypt(gzip(data)), encrypt data first, then gzip + + sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024 + + pu, err := needle.ParseUpload(r, sizeLimit) + uncompressedData := pu.Data + if pu.IsGzipped { + uncompressedData = pu.UncompressedData + } + if pu.MimeType == "" { + pu.MimeType = http.DetectContentType(uncompressedData) + } + + uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth) + if uploadError != nil { + return nil, fmt.Errorf("upload to volume server: %v", uploadError) + } + + // Save to chunk manifest structure + fileChunks := []*filer_pb.FileChunk{ + { + FileId: fileId, + Offset: 0, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.Md5, + CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, + }, + } + + fmt.Printf("uploaded: %+v\n", uploadResult) + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + if pu.FileName != "" { + path += pu.FileName + } + } + + entry := &filer2.Entry{ + FullPath: filer2.FullPath(path), + Attr: filer2.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: 0660, + Uid: OS_UID, + Gid: OS_GID, + Replication: replication, + Collection: collection, + TtlSec: ttlSeconds, + Mime: pu.MimeType, + }, + Chunks: fileChunks, + } + + filerResult = &FilerPostResult{ + Name: pu.FileName, + Size: int64(pu.OriginalDataSize), + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) + err = dbErr + filerResult.Error = dbErr.Error() + return + } + + return +} diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index 55a1909a8..2f0df7f91 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -14,10 +14,14 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { parts := strings.Split(fullpath, "/") for i := 0; i < len(parts); i++ { - crumbs = append(crumbs, Breadcrumb{ - Name: parts[i] + "/", + crumb := Breadcrumb{ + Name: parts[i] + " /", Link: "/" + filepath.ToSlash(filepath.Join(parts[0:i+1]...)), - }) + } + if !strings.HasSuffix(crumb.Link, "/") { + crumb.Link += "/" + } + crumbs = append(crumbs, crumb) } return diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go index 884798936..e532b27e2 100644 --- a/weed/server/filer_ui/templates.go +++ b/weed/server/filer_ui/templates.go @@ -50,7 +50,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{ range $entry := .Breadcrumbs }} - + {{ $entry.Name }} {{ end }} @@ -78,20 +78,19 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` {{end}} - + {{if $entry.IsDirectory}} {{else}} - {{ $entry.Mime }} + {{ $entry.Mime }}  {{end}} - + {{if $entry.IsDirectory}} {{else}} - {{ $entry.Size | humanizeBytes }} -     + {{ $entry.Size | humanizeBytes }}  {{end}} - + {{ $entry.Timestamp.Format "2006-01-02 15:04" }} diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 82a190e39..84087df8b 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -1,17 +1,20 @@ package weed_server import ( + "context" "fmt" "net" "strings" "time" "github.com/chrislusf/raft" + "google.golang.org/grpc/peer" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" - "google.golang.org/grpc/peer" ) func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error { @@ -60,14 +63,6 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ t.Sequence.SetMax(heartbeat.MaxFileKey) if dn == nil { - if heartbeat.Ip == "" { - if pr, ok := peer.FromContext(stream.Context()); ok { - if pr.Addr != net.Addr(nil) { - heartbeat.Ip = pr.Addr.String()[0:strings.LastIndex(pr.Addr.String(), ":")] - glog.V(0).Infof("remote IP address is detected as %v", heartbeat.Ip) - } - } - } dcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack) dc := t.GetOrCreateDataCenter(dcName) rack := dc.GetOrCreateRack(rackName) @@ -76,7 +71,10 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ int64(heartbeat.MaxVolumeCount)) glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort()) if err := stream.Send(&master_pb.HeartbeatResponse{ - VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, + VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, + MetricsAddress: ms.option.MetricsAddress, + MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + StorageBackends: backend.ToPbStorageBackends(), }); err != nil { glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err) return err @@ -164,9 +162,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ return err } if err := stream.Send(&master_pb.HeartbeatResponse{ - Leader: newLeader, - MetricsAddress: ms.option.MetricsAddress, - MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + Leader: newLeader, }); err != nil { glog.Warningf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err) return err @@ -187,35 +183,13 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ return ms.informNewLeader(stream) } - // remember client address - ctx := stream.Context() - // fmt.Printf("FromContext %+v\n", ctx) - pr, ok := peer.FromContext(ctx) - if !ok { - glog.Error("failed to get peer from ctx") - return fmt.Errorf("failed to get peer from ctx") - } - if pr.Addr == net.Addr(nil) { - glog.Error("failed to get peer address") - return fmt.Errorf("failed to get peer address") - } + peerAddress := findClientAddress(stream.Context(), req.GrpcPort) - clientName := req.Name + pr.Addr.String() - glog.V(0).Infof("+ client %v", clientName) - - messageChan := make(chan *master_pb.VolumeLocation) stopChan := make(chan bool) - ms.clientChansLock.Lock() - ms.clientChans[clientName] = messageChan - ms.clientChansLock.Unlock() + clientName, messageChan := ms.addClient(req.Name, peerAddress) - defer func() { - glog.V(0).Infof("- client %v", clientName) - ms.clientChansLock.Lock() - delete(ms.clientChans, clientName) - ms.clientChansLock.Unlock() - }() + defer ms.deleteClient(clientName) for _, message := range ms.Topo.ToVolumeLocations() { if err := stream.Send(message); err != nil { @@ -267,3 +241,57 @@ func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedSe } return nil } + +func (ms *MasterServer) addClient(clientType string, clientAddress string) (clientName string, messageChan chan *master_pb.VolumeLocation) { + clientName = clientType + "@" + clientAddress + glog.V(0).Infof("+ client %v", clientName) + + messageChan = make(chan *master_pb.VolumeLocation) + + ms.clientChansLock.Lock() + ms.clientChans[clientName] = messageChan + ms.clientChansLock.Unlock() + return +} + +func (ms *MasterServer) deleteClient(clientName string) { + glog.V(0).Infof("- client %v", clientName) + ms.clientChansLock.Lock() + delete(ms.clientChans, clientName) + ms.clientChansLock.Unlock() +} + +func findClientAddress(ctx context.Context, grpcPort uint32) string { + // fmt.Printf("FromContext %+v\n", ctx) + pr, ok := peer.FromContext(ctx) + if !ok { + glog.Error("failed to get peer from ctx") + return "" + } + if pr.Addr == net.Addr(nil) { + glog.Error("failed to get peer address") + return "" + } + if grpcPort == 0 { + return pr.Addr.String() + } + if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok { + externalIP := tcpAddr.IP + return fmt.Sprintf("%s:%d", externalIP, grpcPort) + } + return pr.Addr.String() + +} + +func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) { + resp := &master_pb.ListMasterClientsResponse{} + ms.clientChansLock.RLock() + defer ms.clientChansLock.RUnlock() + + for k := range ms.clientChans { + if strings.HasPrefix(k, req.ClientType+"@") { + resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:]) + } + } + return resp, nil +} diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go index f8e0785f6..b92d6bcbe 100644 --- a/weed/server/master_grpc_server_collection.go +++ b/weed/server/master_grpc_server_collection.go @@ -4,6 +4,7 @@ import ( "context" "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index 8fc56e9b8..856c07890 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -5,10 +5,11 @@ import ( "fmt" "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/topology" ) @@ -52,7 +53,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest if req.Replication == "" { req.Replication = ms.option.DefaultReplicaPlacement } - replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication) + replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication) if err != nil { return nil, err } @@ -108,7 +109,7 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic if req.Replication == "" { req.Replication = ms.option.DefaultReplicaPlacement } - replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication) + replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication) if err != nil { return nil, err } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 33a5129da..a9ae6b888 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -1,7 +1,6 @@ package weed_server import ( - "context" "fmt" "net/http" "net/http/httputil" @@ -14,6 +13,9 @@ import ( "time" "github.com/chrislusf/raft" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" @@ -22,9 +24,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc" ) const ( @@ -69,7 +68,7 @@ type MasterServer struct { func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer { - v := viper.GetViper() + v := util.GetViper() signingKey := v.GetString("jwt.signing.key") v.SetDefault("jwt.signing.expires_after_seconds", 10) expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") @@ -83,13 +82,13 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20) } - grpcDialOption := security.LoadClientTLS(v.Sub("grpc"), "master") + grpcDialOption := security.LoadClientTLS(v, "grpc.master") ms := &MasterServer{ option: option, preallocateSize: preallocateSize, clientChans: make(map[string]chan *master_pb.VolumeLocation), grpcDialOption: grpcDialOption, - MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers), + MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", 0, peers), } ms.bounedLeaderChan = make(chan int, 16) @@ -115,9 +114,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) - r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) - r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) - r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) + /* + r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) + r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) + r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) + */ r.HandleFunc("/{fileId}", ms.redirectHandler) } @@ -183,7 +184,7 @@ func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Requ func (ms *MasterServer) startAdminScripts() { var err error - v := viper.GetViper() + v := util.GetViper() adminScripts := v.GetString("master.maintenance.scripts") glog.V(0).Infof("adminScripts:\n%v", adminScripts) if adminScripts == "" { @@ -201,7 +202,7 @@ func (ms *MasterServer) startAdminScripts() { masterAddress := "localhost:" + strconv.Itoa(ms.option.Port) var shellOptions shell.ShellOptions - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "master") + shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master") shellOptions.Masters = &masterAddress shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, err = util.ParseFilerUrl(filerURL) @@ -220,7 +221,7 @@ func (ms *MasterServer) startAdminScripts() { commandEnv.MasterClient.WaitUntilConnected() c := time.Tick(time.Duration(sleepMinutes) * time.Minute) - for _ = range c { + for range c { if ms.Topo.IsLeader() { for _, line := range scriptLines { @@ -250,7 +251,7 @@ func (ms *MasterServer) startAdminScripts() { func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer { var seq sequence.Sequencer - v := viper.GetViper() + v := util.GetViper() seqType := strings.ToLower(v.GetString(SequencerType)) glog.V(1).Infof("[%s] : [%s]", SequencerType, seqType) switch strings.ToLower(seqType) { diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index a5d976008..5d0986f97 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -10,9 +10,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -61,7 +61,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque return } } - glog.Infoln("garbageThreshold =", gcThreshold) + // glog.Infoln("garbageThreshold =", gcThreshold) ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.preallocateSize) ms.dirStatusHandler(w, r) } @@ -145,7 +145,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr if replicationString == "" { replicationString = ms.option.DefaultReplicaPlacement } - replicaPlacement, err := storage.NewReplicaPlacementFromString(replicationString) + replicaPlacement, err := super_block.NewReplicaPlacementFromString(replicationString) if err != nil { return nil, err } diff --git a/weed/server/msg_broker_grpc_server.go b/weed/server/msg_broker_grpc_server.go new file mode 100644 index 000000000..8b13aac76 --- /dev/null +++ b/weed/server/msg_broker_grpc_server.go @@ -0,0 +1,23 @@ +package weed_server + +import ( + "context" + + "github.com/chrislusf/seaweedfs/weed/pb/queue_pb" +) + +func (broker *MessageBroker) ConfigureTopic(context.Context, *queue_pb.ConfigureTopicRequest) (*queue_pb.ConfigureTopicResponse, error) { + panic("implement me") +} + +func (broker *MessageBroker) DeleteTopic(context.Context, *queue_pb.DeleteTopicRequest) (*queue_pb.DeleteTopicResponse, error) { + panic("implement me") +} + +func (broker *MessageBroker) StreamWrite(queue_pb.SeaweedQueue_StreamWriteServer) error { + panic("implement me") +} + +func (broker *MessageBroker) StreamRead(*queue_pb.ReadMessageRequest, queue_pb.SeaweedQueue_StreamReadServer) error { + panic("implement me") +} diff --git a/weed/server/msg_broker_server.go b/weed/server/msg_broker_server.go new file mode 100644 index 000000000..a9d908581 --- /dev/null +++ b/weed/server/msg_broker_server.go @@ -0,0 +1,121 @@ +package weed_server + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MessageBrokerOption struct { + Filers []string + DefaultReplication string + MaxMB int + Port int +} + +type MessageBroker struct { + option *MessageBrokerOption + grpcDialOption grpc.DialOption +} + +func NewMessageBroker(option *MessageBrokerOption) (messageBroker *MessageBroker, err error) { + + messageBroker = &MessageBroker{ + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_broker"), + } + + go messageBroker.loopForEver() + + return messageBroker, nil +} + +func (broker *MessageBroker) loopForEver() { + + for { + broker.checkPeers() + time.Sleep(3 * time.Second) + } + +} + +func (broker *MessageBroker) checkPeers() { + + // contact a filer about masters + var masters []string + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + masters = append(masters, resp.Masters...) + return nil + }) + if err != nil { + fmt.Printf("failed to read masters from %+v: %v\n", broker.option.Filers, err) + return + } + } + + // contact each masters for filers + var filers []string + for _, master := range masters { + err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error { + resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{ + ClientType: "filer", + }) + if err != nil { + return err + } + + fmt.Printf("filers: %+v\n", resp.GrpcAddresses) + filers = append(filers, resp.GrpcAddresses...) + + return nil + }) + if err != nil { + fmt.Printf("failed to list filers: %v\n", err) + return + } + } + + // contact each filer about brokers + for _, filer := range filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + masters = append(masters, resp.Masters...) + return nil + }) + if err != nil { + fmt.Printf("failed to read masters from %+v: %v\n", broker.option.Filers, err) + return + } + } + +} + +func (broker *MessageBroker) withFilerClient(filer string, fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(filer, broker.grpcDialOption, fn) + +} + +func (broker *MessageBroker) withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error { + + return pb.WithMasterClient(master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error { + return fn(client) + }) + +} diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 53289f1c1..0381c7feb 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -2,8 +2,6 @@ package weed_server import ( "encoding/json" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" "io/ioutil" "os" "path" @@ -11,7 +9,12 @@ import ( "sort" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/topology" ) @@ -61,7 +64,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d s.raftServer.Start() for _, peer := range s.peers { - s.raftServer.AddPeer(peer, util.ServerToGrpcAddress(peer)) + s.raftServer.AddPeer(peer, pb.ServerToGrpcAddress(peer)) } s.GrpcServer = raft.NewGrpcServer(s.raftServer) @@ -72,7 +75,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ Name: s.raftServer.Name(), - ConnectionString: util.ServerToGrpcAddress(s.serverAddr), + ConnectionString: pb.ServerToGrpcAddress(s.serverAddr), }) if err != nil { diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index c631d2535..27b21ac09 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -2,10 +2,14 @@ package weed_server import ( "context" + "fmt" + "path/filepath" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) { @@ -96,6 +100,41 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. } +func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_pb.VolumeConfigureRequest) (*volume_server_pb.VolumeConfigureResponse, error) { + + resp := &volume_server_pb.VolumeConfigureResponse{} + + // check replication format + if _, err := super_block.NewReplicaPlacementFromString(req.Replication); err != nil { + resp.Error = fmt.Sprintf("volume configure replication %v: %v", req, err) + return resp, nil + } + + // unmount + if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure unmount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err) + return resp, nil + } + + // modify the volume info file + if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil { + glog.Errorf("volume configure %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure %v: %v", req, err) + return resp, nil + } + + // mount + if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure mount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err) + return resp, nil + } + + return resp, nil + +} + func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) { resp := &volume_server_pb.VolumeMarkReadonlyResponse{} @@ -111,3 +150,19 @@ func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_serv return resp, err } + +func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_server_pb.VolumeServerStatusRequest) (*volume_server_pb.VolumeServerStatusResponse, error) { + + resp := &volume_server_pb.VolumeServerStatusResponse{} + + for _, loc := range vs.store.Locations { + if dir, e := filepath.Abs(loc.Directory); e == nil { + resp.DiskStatuses = append(resp.DiskStatuses, stats.NewDiskStatus(dir)) + } + } + + resp.MemoryStatus = stats.MemStat() + + return resp, nil + +} diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 731675b48..1f4d9df10 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -5,15 +5,18 @@ import ( "net" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" - "github.com/spf13/viper" - "google.golang.org/grpc" + + "golang.org/x/net/context" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" - "golang.org/x/net/context" ) func (vs *VolumeServer) GetMaster() string { @@ -25,7 +28,7 @@ func (vs *VolumeServer) heartbeat() { vs.store.SetDataCenter(vs.dataCenter) vs.store.SetRack(vs.rack) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "volume") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.volume") var err error var newLeader string @@ -34,13 +37,13 @@ func (vs *VolumeServer) heartbeat() { if newLeader != "" { master = newLeader } - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(master) if parseErr != nil { glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr) continue } vs.store.MasterAddress = master - newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) + newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) if err != nil { glog.V(0).Infof("heartbeat error: %v", err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) @@ -51,16 +54,16 @@ func (vs *VolumeServer) heartbeat() { } } -func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { +func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { - grpcConection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption) + grpcConection, err := pb.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) } defer grpcConection.Close() client := master_pb.NewSeaweedClient(grpcConection) - stream, err := client.SendHeartbeat(ctx) + stream, err := client.SendHeartbeat(context.Background()) if err != nil { glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err) return "", err @@ -90,6 +93,9 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA vs.MetricsAddress = in.GetMetricsAddress() vs.MetricsIntervalSec = int(in.GetMetricsIntervalSeconds()) } + if len(in.StorageBackends) > 0 { + backend.LoadFromPbStorageBackends(in.StorageBackends) + } } }() diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index 711a3ebad..5cc75e74c 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -20,7 +20,7 @@ import ( const BufferSizeLimit = 1024 * 1024 * 2 -// VolumeCopy copy the .idx .dat files, and mount the volume +// VolumeCopy copy the .idx .dat .vif files, and mount the volume func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) { v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) @@ -43,7 +43,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo var volumeFileName, idxFileName, datFileName string err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { var err error - volFileInfoResp, err = client.ReadVolumeFileStatus(ctx, + volFileInfoResp, err = client.ReadVolumeFileStatus(context.Background(), &volume_server_pb.ReadVolumeFileStatusRequest{ VolumeId: req.VolumeId, }) @@ -55,11 +55,15 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo // println("source:", volFileInfoResp.String()) // copy ecx file - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false); err != nil { + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil { return err } - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false); err != nil { + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil { + return err + } + + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil { return err } @@ -70,12 +74,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo datFileName = volumeFileName + ".dat" if err != nil && volumeFileName != "" { - if idxFileName != "" { - os.Remove(idxFileName) - } - if datFileName != "" { - os.Remove(datFileName) - } + os.Remove(idxFileName) + os.Remove(datFileName) + os.Remove(volumeFileName + ".vif") return nil, err } @@ -94,16 +95,16 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo }, err } -func (vs *VolumeServer) doCopyFile(ctx context.Context, client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid uint32, - compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool) error { +func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) error { - copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ - VolumeId: vid, - Ext: ext, - CompactionRevision: compactRevision, - StopOffset: stopOffset, - Collection: collection, - IsEcVolume: isEcVolume, + copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{ + VolumeId: vid, + Ext: ext, + CompactionRevision: compactRevision, + StopOffset: stopOffset, + Collection: collection, + IsEcVolume: isEcVolume, + IgnoreSourceFileNotFound: ignoreSourceFileNotFound, }) if err != nil { return fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err) @@ -213,6 +214,9 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v } } if fileName == "" { + if req.IgnoreSourceFileNotFound { + return nil + } return fmt.Errorf("CopyFile not found ec volume id %d", req.VolumeId) } } @@ -221,6 +225,9 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v file, err := os.Open(fileName) if err != nil { + if req.IgnoreSourceFileNotFound && err == os.ErrNotExist { + return nil + } return err } defer file.Close() diff --git a/weed/server/volume_grpc_copy_incremental.go b/weed/server/volume_grpc_copy_incremental.go index 6c5bb8a62..6d6c3daa3 100644 --- a/weed/server/volume_grpc_copy_incremental.go +++ b/weed/server/volume_grpc_copy_incremental.go @@ -47,7 +47,7 @@ func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server } -func sendFileContent(datBackend backend.DataStorageBackend, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error { +func sendFileContent(datBackend backend.BackendStorageFile, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error { var blockSizeLimit = int64(len(buf)) for i := int64(0); i < stopOffset-startOffset; i += blockSizeLimit { n, readErr := datBackend.ReadAt(buf, startOffset+i) diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 242480197..66dd5bf8d 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -8,10 +8,12 @@ import ( "math" "os" "path" + "path/filepath" "strings" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" @@ -24,7 +26,7 @@ import ( Steps to apply erasure coding to .dat .idx files 0. ensure the volume is readonly -1. client call VolumeEcShardsGenerate to generate the .ecx and .ec01~.ec14 files +1. client call VolumeEcShardsGenerate to generate the .ecx and .ec00 ~ .ec13 files 2. client ask master for possible servers to hold the ec files, at least 4 servers 3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server 4. target servers report the new ec files to the master @@ -33,7 +35,7 @@ Steps to apply erasure coding to .dat .idx files */ -// VolumeEcShardsGenerate generates the .ecx and .ec01 ~ .ec14 files +// VolumeEcShardsGenerate generates the .ecx and .ec00 ~ .ec13 files func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) { v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) @@ -47,19 +49,24 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_ } // write .ecx file - if err := erasure_coding.WriteSortedEcxFile(baseFileName); err != nil { - return nil, fmt.Errorf("WriteSortedEcxFile %s: %v", baseFileName, err) + if err := erasure_coding.WriteSortedFileFromIdx(baseFileName, ".ecx"); err != nil { + return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", baseFileName, err) } - // write .ec01 ~ .ec14 files + // write .ec00 ~ .ec13 files if err := erasure_coding.WriteEcFiles(baseFileName); err != nil { return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) } + // write .vif files + if err := pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + } + return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil } -// VolumeEcShardsRebuild generates the any of the missing .ec01 ~ .ec14 files +// VolumeEcShardsRebuild generates the any of the missing .ec00 ~ .ec13 files func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) { baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) @@ -68,7 +75,7 @@ func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_s for _, location := range vs.store.Locations { if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) { - // write .ec01 ~ .ec14 files + // write .ec00 ~ .ec13 files baseFileName = path.Join(location.Directory, baseFileName) if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil { return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err) @@ -103,23 +110,32 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv // copy ec data slices for _, shardId := range req.ShardIds { - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil { return err } } - if !req.CopyEcxFile { + if req.CopyEcxFile { + + // copy ecx file + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil { + return err + } return nil } - // copy ecx file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false); err != nil { - return err + if req.CopyEcjFile { + // copy ecj file + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil { + return err + } } - // copy ecj file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true); err != nil { - return err + if req.CopyVifFile { + // copy vif file + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil { + return err + } } return nil @@ -137,6 +153,8 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + glog.V(0).Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds) + found := false for _, location := range vs.store.Locations { if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) { @@ -153,21 +171,27 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se return nil, nil } - // check whether to delete the ecx file also + // check whether to delete the .ecx and .ecj file also hasEcxFile := false + hasIdxFile := false existingShardCount := 0 + bName := filepath.Base(baseFilename) for _, location := range vs.store.Locations { fileInfos, err := ioutil.ReadDir(location.Directory) if err != nil { continue } for _, fileInfo := range fileInfos { - if fileInfo.Name() == baseFilename+".ecx" { + if fileInfo.Name() == bName+".ecx" || fileInfo.Name() == bName+".ecj" { hasEcxFile = true continue } - if strings.HasPrefix(fileInfo.Name(), baseFilename+".ec") { + if fileInfo.Name() == bName+".idx" { + hasIdxFile = true + continue + } + if strings.HasPrefix(fileInfo.Name(), bName+".ec") { existingShardCount++ } } @@ -181,6 +205,10 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se return nil, err } } + if !hasIdxFile { + // .vif is used for ec volumes and normal volumes + os.Remove(baseFilename + ".vif") + } return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil } @@ -317,3 +345,35 @@ func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_serv return resp, nil } + +// VolumeEcShardsToVolume generates the .idx, .dat files from .ecx, .ecj and .ec01 ~ .ec14 files +func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_server_pb.VolumeEcShardsToVolumeRequest) (*volume_server_pb.VolumeEcShardsToVolumeResponse, error) { + + v, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId)) + if !found { + return nil, fmt.Errorf("ec volume %d not found", req.VolumeId) + } + baseFileName := v.FileName() + + if v.Collection != req.Collection { + return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // calculate .dat file size + datFileSize, err := erasure_coding.FindDatFileSize(baseFileName) + if err != nil { + return nil, fmt.Errorf("FindDatFileSize %s: %v", baseFileName, err) + } + + // write .dat file from .ec00 ~ .ec09 files + if err := erasure_coding.WriteDatFile(baseFileName, datFileSize); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + } + + // write .idx file from .ecx and .ecj files + if err := erasure_coding.WriteIdxFileFromEcIndex(baseFileName); err != nil { + return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", baseFileName, err) + } + + return &volume_server_pb.VolumeEcShardsToVolumeResponse{}, nil +} diff --git a/weed/server/volume_grpc_file.go b/weed/server/volume_grpc_file.go new file mode 100644 index 000000000..4d71ddeb1 --- /dev/null +++ b/weed/server/volume_grpc_file.go @@ -0,0 +1,129 @@ +package weed_server + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (vs *VolumeServer) FileGet(req *volume_server_pb.FileGetRequest, stream volume_server_pb.VolumeServer_FileGetServer) error { + + headResponse := &volume_server_pb.FileGetResponse{} + n := new(needle.Needle) + + commaIndex := strings.LastIndex(req.FileId, ",") + vid := req.FileId[:commaIndex] + fid := req.FileId[commaIndex+1:] + + volumeId, err := needle.NewVolumeId(vid) + if err != nil { + headResponse.ErrorCode = http.StatusBadRequest + return stream.Send(headResponse) + } + err = n.ParsePath(fid) + if err != nil { + headResponse.ErrorCode = http.StatusBadRequest + return stream.Send(headResponse) + } + + hasVolume := vs.store.HasVolume(volumeId) + _, hasEcVolume := vs.store.FindEcVolume(volumeId) + + if !hasVolume && !hasEcVolume { + headResponse.ErrorCode = http.StatusMovedPermanently + return stream.Send(headResponse) + } + + cookie := n.Cookie + var count int + if hasVolume { + count, err = vs.store.ReadVolumeNeedle(volumeId, n) + } else if hasEcVolume { + count, err = vs.store.ReadEcShardNeedle(volumeId, n) + } + + if err != nil || count < 0 { + headResponse.ErrorCode = http.StatusNotFound + return stream.Send(headResponse) + } + if n.Cookie != cookie { + headResponse.ErrorCode = http.StatusNotFound + return stream.Send(headResponse) + } + + if n.LastModified != 0 { + headResponse.LastModified = n.LastModified + } + + headResponse.Etag = n.Etag() + + if n.HasPairs() { + pairMap := make(map[string]string) + err = json.Unmarshal(n.Pairs, &pairMap) + if err != nil { + glog.V(0).Infoln("Unmarshal pairs error:", err) + } + headResponse.Headers = pairMap + } + + /* + // skip this, no redirection + if vs.tryHandleChunkedFile(n, filename, w, r) { + return + } + */ + + if n.NameSize > 0 { + headResponse.Filename = string(n.Name) + } + mtype := "" + if n.MimeSize > 0 { + mt := string(n.Mime) + if !strings.HasPrefix(mt, "application/octet-stream") { + mtype = mt + } + } + headResponse.ContentType = mtype + + headResponse.IsGzipped = n.IsGzipped() + + if n.IsGzipped() && req.AcceptGzip { + if n.Data, err = util.UnGzipData(n.Data); err != nil { + glog.V(0).Infof("ungzip %s error: %v", req.FileId, err) + } + } + + headResponse.ContentLength = uint32(len(n.Data)) + bytesToRead := len(n.Data) + bytesRead := 0 + + t := headResponse + + for bytesRead < bytesToRead { + + stopIndex := bytesRead + BufferSizeLimit + if stopIndex > bytesToRead { + stopIndex = bytesToRead + } + + if t == nil { + t = &volume_server_pb.FileGetResponse{} + } + t.Data = n.Data[bytesRead:stopIndex] + + err = stream.Send(t) + t = nil + if err != nil { + return err + } + + bytesRead = stopIndex + } + + return nil +} diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go index 1bf61e1c7..c26d6ed8f 100644 --- a/weed/server/volume_grpc_tail.go +++ b/weed/server/volume_grpc_tail.go @@ -10,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderRequest, stream volume_server_pb.VolumeServer_VolumeTailSenderServer) error { @@ -101,7 +102,7 @@ type VolumeFileScanner4Tailing struct { lastProcessedTimestampNs uint64 } -func (scanner *VolumeFileScanner4Tailing) VisitSuperBlock(superBlock storage.SuperBlock) error { +func (scanner *VolumeFileScanner4Tailing) VisitSuperBlock(superBlock super_block.SuperBlock) error { return nil } diff --git a/weed/server/volume_grpc_tier_download.go b/weed/server/volume_grpc_tier_download.go new file mode 100644 index 000000000..7b3982e40 --- /dev/null +++ b/weed/server/volume_grpc_tier_download.go @@ -0,0 +1,85 @@ +package weed_server + +import ( + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +// VolumeTierMoveDatFromRemote copy dat file from a remote tier to local volume server +func (vs *VolumeServer) VolumeTierMoveDatFromRemote(req *volume_server_pb.VolumeTierMoveDatFromRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatFromRemoteServer) error { + + // find existing volume + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("volume %d not found", req.VolumeId) + } + + // verify the collection + if v.Collection != req.Collection { + return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // locate the disk file + storageName, storageKey := v.RemoteStorageNameKey() + if storageName == "" || storageKey == "" { + return fmt.Errorf("volume %d is already on local disk", req.VolumeId) + } + + // check whether the local .dat already exists + _, ok := v.DataBackend.(*backend.DiskFile) + if ok { + return fmt.Errorf("volume %d is already on local disk", req.VolumeId) + } + + // check valid storage backend type + backendStorage, found := backend.BackendStorages[storageName] + if !found { + var keys []string + for key := range backend.BackendStorages { + keys = append(keys, key) + } + return fmt.Errorf("remote storage %s not found from suppported: %v", storageName, keys) + } + + startTime := time.Now() + fn := func(progressed int64, percentage float32) error { + now := time.Now() + if now.Sub(startTime) < time.Second { + return nil + } + startTime = now + return stream.Send(&volume_server_pb.VolumeTierMoveDatFromRemoteResponse{ + Processed: progressed, + ProcessedPercentage: percentage, + }) + } + // copy the data file + _, err := backendStorage.DownloadFile(v.FileName()+".dat", storageKey, fn) + if err != nil { + return fmt.Errorf("backend %s copy file %s: %v", storageName, v.FileName()+".dat", err) + } + + if req.KeepRemoteDatFile { + return nil + } + + // remove remote file + if err := backendStorage.DeleteFile(storageKey); err != nil { + return fmt.Errorf("volume %d fail to delete remote file %s: %v", v.Id, storageKey, err) + } + + // forget remote file + v.GetVolumeInfo().Files = v.GetVolumeInfo().Files[1:] + if err := v.SaveVolumeInfo(); err != nil { + return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err) + } + + v.DataBackend.Close() + v.DataBackend = nil + + return nil +} diff --git a/weed/server/volume_grpc_tier_upload.go b/weed/server/volume_grpc_tier_upload.go new file mode 100644 index 000000000..c9694df59 --- /dev/null +++ b/weed/server/volume_grpc_tier_upload.go @@ -0,0 +1,100 @@ +package weed_server + +import ( + "fmt" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +// VolumeTierMoveDatToRemote copy dat file to a remote tier +func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTierMoveDatToRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatToRemoteServer) error { + + // find existing volume + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("volume %d not found", req.VolumeId) + } + + // verify the collection + if v.Collection != req.Collection { + return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // locate the disk file + diskFile, ok := v.DataBackend.(*backend.DiskFile) + if !ok { + return fmt.Errorf("volume %d is not on local disk", req.VolumeId) + } + + // check valid storage backend type + backendStorage, found := backend.BackendStorages[req.DestinationBackendName] + if !found { + var keys []string + for key := range backend.BackendStorages { + keys = append(keys, key) + } + return fmt.Errorf("destination %s not found, suppported: %v", req.DestinationBackendName, keys) + } + + // check whether the existing backend storage is the same as requested + // if same, skip + backendType, backendId := backend.BackendNameToTypeId(req.DestinationBackendName) + for _, remoteFile := range v.GetVolumeInfo().GetFiles() { + if remoteFile.BackendType == backendType && remoteFile.BackendId == backendId { + return fmt.Errorf("destination %s already exists", req.DestinationBackendName) + } + } + + startTime := time.Now() + fn := func(progressed int64, percentage float32) error { + now := time.Now() + if now.Sub(startTime) < time.Second { + return nil + } + startTime = now + return stream.Send(&volume_server_pb.VolumeTierMoveDatToRemoteResponse{ + Processed: progressed, + ProcessedPercentage: percentage, + }) + } + + // remember the file original source + attributes := make(map[string]string) + attributes["volumeId"] = v.Id.String() + attributes["collection"] = v.Collection + attributes["ext"] = ".dat" + // copy the data file + key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn) + if err != nil { + return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err) + } + + // save the remote file to volume tier info + v.GetVolumeInfo().Files = append(v.GetVolumeInfo().GetFiles(), &volume_server_pb.RemoteFile{ + BackendType: backendType, + BackendId: backendId, + Key: key, + Offset: 0, + FileSize: uint64(size), + ModifiedTime: uint64(time.Now().Unix()), + Extension: ".dat", + }) + + if err := v.SaveVolumeInfo(); err != nil { + return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err) + } + + if err := v.LoadRemoteFile(); err != nil { + return fmt.Errorf("volume %d fail to load remote file: %v", v.Id, err) + } + + if !req.KeepLocalDatFile { + os.Remove(v.FileName() + ".dat") + } + + return nil +} diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 6cf654738..2d716edc1 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -4,13 +4,14 @@ import ( "fmt" "net/http" - "github.com/chrislusf/seaweedfs/weed/stats" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/spf13/viper" ) type VolumeServer struct { @@ -29,6 +30,7 @@ type VolumeServer struct { compactionBytePerSecond int64 MetricsAddress string MetricsIntervalSec int + fileSizeLimitBytes int64 } func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, @@ -41,9 +43,10 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, fixJpgOrientation bool, readRedirect bool, compactionMBPerSecond int, + fileSizeLimitMB int, ) *VolumeServer { - v := viper.GetViper() + v := util.GetViper() signingKey := v.GetString("jwt.signing.key") v.SetDefault("jwt.signing.expires_after_seconds", 10) expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") @@ -60,8 +63,9 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, needleMapKind: needleMapKind, FixJpgOrientation: fixJpgOrientation, ReadRedirect: readRedirect, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.volume"), compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024, + fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024, } vs.SeedMasterNodes = masterNodes vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) @@ -73,9 +77,11 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, // only expose the volume server details for safe environments adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) - adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) - adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) - adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + /* + adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) + adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) + adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + */ } adminMux.HandleFunc("/", vs.privateStoreHandler) if publicMux != adminMux { diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go index 25b6582f7..89bc051c5 100644 --- a/weed/server/volume_server_handlers_admin.go +++ b/weed/server/volume_server_handlers_admin.go @@ -12,7 +12,14 @@ import ( func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) m["Version"] = util.VERSION - m["Volumes"] = vs.store.Status() + var ds []*volume_server_pb.DiskStatus + for _, loc := range vs.store.Locations { + if dir, e := filepath.Abs(loc.Directory); e == nil { + ds = append(ds, stats.NewDiskStatus(dir)) + } + } + m["DiskStatuses"] = ds + m["Volumes"] = vs.store.VolumeInfos() writeJsonQuiet(w, r, http.StatusOK, m) } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index cd11356b9..6e603d158 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -2,21 +2,18 @@ package weed_server import ( "bytes" - "context" + "encoding/json" "errors" "fmt" "io" "mime" - "mime/multipart" "net/http" "net/url" - "path" + "path/filepath" "strconv" "strings" "time" - "encoding/json" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/operation" @@ -54,7 +51,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } - glog.V(4).Infoln("volume", volumeId, "reading", n) + // glog.V(4).Infoln("volume", volumeId, "reading", n) hasVolume := vs.store.HasVolume(volumeId) _, hasEcVolume := vs.store.FindEcVolume(volumeId) if !hasVolume && !hasEcVolume { @@ -86,9 +83,9 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) if hasVolume { count, err = vs.store.ReadVolumeNeedle(volumeId, n) } else if hasEcVolume { - count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n) + count, err = vs.store.ReadEcShardNeedle(volumeId, n) } - glog.V(4).Infoln("read bytes", count, "error", err) + // glog.V(4).Infoln("read bytes", count, "error", err) if err != nil || count < 0 { glog.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) w.WriteHeader(http.StatusNotFound) @@ -114,11 +111,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.WriteHeader(http.StatusNotModified) return } - if r.Header.Get("ETag-MD5") == "True" { - setEtag(w, n.MD5()) - } else { - setEtag(w, n.Etag()) - } + setEtag(w, n.Etag()) if n.HasPairs() { pairMap := make(map[string]string) @@ -138,7 +131,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) if n.NameSize > 0 && filename == "" { filename = string(n.Name) if ext == "" { - ext = path.Ext(filename) + ext = filepath.Ext(filename) } } mtype := "" @@ -182,7 +175,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, fileName = chunkManifest.Name } - ext := path.Ext(fileName) + ext := filepath.Ext(fileName) mType := "" if chunkManifest.Mime != "" { @@ -229,113 +222,28 @@ func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext strin func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error { totalSize, e := rs.Seek(0, 2) if mimeType == "" { - if ext := path.Ext(filename); ext != "" { + if ext := filepath.Ext(filename); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } - if filename != "" { - contentDisposition := "inline" - if r.FormValue("dl") != "" { - if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { - contentDisposition = "attachment" - } - } - w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) - } w.Header().Set("Accept-Ranges", "bytes") + if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) return nil } - rangeReq := r.Header.Get("Range") - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if _, e = rs.Seek(0, 0); e != nil { - return e - } - _, e = io.Copy(w, rs) - return e - } - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return nil - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return nil - } - if len(ranges) == 0 { - return nil - } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - if _, e = rs.Seek(ra.start, 0); e != nil { + adjustHeadersAfterHEAD(w, r, filename) + + processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + if _, e = rs.Seek(offset, 0); e != nil { return e } - - _, e = io.CopyN(w, rs, ra.length) + _, e = io.CopyN(writer, rs, size) return e - } - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return nil - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if _, e = rs.Seek(ra.start, 0); e != nil { - pw.CloseWithError(e) - return - } - if _, e = io.CopyN(part, rs, ra.length); e != nil { - pw.CloseWithError(e) - return - } - } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - _, e = io.CopyN(w, sendContent, sendSize) - return e + }) + return nil } diff --git a/weed/server/volume_server_handlers_ui.go b/weed/server/volume_server_handlers_ui.go index 852f0b751..8d35c9c8b 100644 --- a/weed/server/volume_server_handlers_ui.go +++ b/weed/server/volume_server_handlers_ui.go @@ -8,6 +8,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ui "github.com/chrislusf/seaweedfs/weed/server/volume_server_ui" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -20,19 +21,30 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) ds = append(ds, stats.NewDiskStatus(dir)) } } + volumeInfos := vs.store.VolumeInfos() + var normalVolumeInfos, remoteVolumeInfos []*storage.VolumeInfo + for _, vinfo := range volumeInfos { + if vinfo.IsRemote() { + remoteVolumeInfos = append(remoteVolumeInfos, vinfo) + } else { + normalVolumeInfos = append(normalVolumeInfos, vinfo) + } + } args := struct { - Version string - Masters []string - Volumes interface{} - EcVolumes interface{} - DiskStatuses interface{} - Stats interface{} - Counters *stats.ServerStats + Version string + Masters []string + Volumes interface{} + EcVolumes interface{} + RemoteVolumes interface{} + DiskStatuses interface{} + Stats interface{} + Counters *stats.ServerStats }{ util.VERSION, vs.SeedMasterNodes, - vs.store.Status(), + normalVolumeInfos, vs.store.EcVolumes(), + remoteVolumeInfos, ds, infos, serverStats, diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index db8fcb555..101be4c43 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -1,7 +1,6 @@ package weed_server import ( - "context" "errors" "fmt" "net/http" @@ -43,7 +42,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { return } - needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation) + needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation, vs.fileSizeLimitBytes) if ne != nil { writeJsonError(w, r, http.StatusBadRequest, ne) return @@ -51,10 +50,15 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { ret := operation.UploadResult{} _, isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r) - httpStatus := http.StatusCreated - if isUnchanged { - httpStatus = http.StatusNotModified + + // http 204 status code does not allow body + if writeError == nil && isUnchanged { + setEtag(w, needle.Etag()) + w.WriteHeader(http.StatusNoContent) + return } + + httpStatus := http.StatusCreated if writeError != nil { httpStatus = http.StatusInternalServerError ret.Error = writeError.Error() @@ -64,6 +68,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { } ret.Size = uint32(originalSize) ret.ETag = needle.Etag() + ret.Mime = string(needle.Mime) setEtag(w, ret.ETag) writeJsonQuiet(w, r, httpStatus, ret) } @@ -93,7 +98,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { ecVolume, hasEcVolume := vs.store.FindEcVolume(volumeId) if hasEcVolume { - count, err := vs.store.DeleteEcShardNeedle(context.Background(), ecVolume, n, cookie) + count, err := vs.store.DeleteEcShardNeedle(ecVolume, n, cookie) writeDeleteResult(err, count, w, r) return } diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index eafc0aaeb..1c1394369 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -1,11 +1,29 @@ package master_ui import ( + "fmt" "html/template" "strconv" "strings" ) +func bytesToHumanReadble(b uint64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := uint64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.2f %ciB", float64(b)/float64(div), "KMGTPE"[exp]) +} + +func percentFrom(total uint64, part_of uint64) string { + return fmt.Sprintf("%.2f", (float64(part_of)/float64(total))*100) +} + func join(data []int64) string { var ret []string for _, d := range data { @@ -15,7 +33,9 @@ func join(data []int64) string { } var funcMap = template.FuncMap{ - "join": join, + "join": join, + "bytesToHumanReadble": bytesToHumanReadble, + "percentFrom": percentFrom, } var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` @@ -57,13 +77,25 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`

Disk Stats

- +
+ + + + + + + + + {{ range .DiskStatuses }} - - + + + + {{ end }} +
PathTotalFreeUsage
{{ .Dir }}{{ .Free }} Bytes Free{{ .Dir }}{{ bytesToHumanReadble .All }}{{ bytesToHumanReadble .Free }}{{ percentFrom .All .Used}}%
@@ -107,10 +139,11 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` Id Collection - Size + Data Size Files Trash TTL + ReadOnly @@ -122,6 +155,37 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`{{ .FileCount }} {{ .DeleteCount }} / {{.DeletedByteCount}} Bytes {{ .Ttl }} + {{ .ReadOnly }} + + {{ end }} + + +
+ +
+

Remote Volumes

+ + + + + + + + + + + + + + {{ range .RemoteVolumes }} + + + + + + + + {{ end }} diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 151b48a78..1fb0912c5 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -1,7 +1,6 @@ package weed_server import ( - "bytes" "context" "fmt" "io" @@ -10,16 +9,17 @@ import ( "strings" "time" + "golang.org/x/net/webdav" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "golang.org/x/net/webdav" - "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" ) type WebDavOption struct { @@ -31,6 +31,7 @@ type WebDavOption struct { Collection string Uid uint32 Gid uint32 + Cipher bool } type WebDavServer struct { @@ -47,7 +48,7 @@ func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) { ws = &WebDavServer{ option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), Handler: &webdav.Handler{ FileSystem: fs, LockSystem: webdav.NewMemLS(), @@ -96,14 +97,17 @@ func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { }, nil } -func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) } +func (fs *WebDavFileSystem) AdjustedUrl(hostAndPort string) string { + return hostAndPort +} func clearName(name string) (string, error) { slashed := strings.HasSuffix(name, "/") @@ -135,7 +139,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm return os.ErrExist } - return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(fullDirPath).DirAndName() request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -153,7 +157,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("mkdir %s/%s: %v", dir, name, err) } @@ -163,7 +167,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) { - glog.V(2).Infof("WebDavFileSystem.OpenFile %v", fullFilePath) + glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag) var err error if fullFilePath, err = clearName(fullFilePath); err != nil { @@ -175,12 +179,6 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f if strings.HasSuffix(fullFilePath, "/") { return nil, os.ErrInvalid } - // based directory should be exists. - dir, _ := path.Split(fullFilePath) - _, err := fs.stat(ctx, dir) - if err != nil { - return nil, os.ErrInvalid - } _, err = fs.stat(ctx, fullFilePath) if err == nil { if flag&os.O_EXCL != 0 { @@ -190,8 +188,8 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f } dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ Name: name, @@ -255,7 +253,7 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) } dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir, @@ -314,7 +312,7 @@ func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) oldDir, oldBaseName := filer2.FullPath(oldName).DirAndName() newDir, newBaseName := filer2.FullPath(newName).DirAndName() - return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: oldDir, @@ -339,8 +337,10 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F return nil, err } + fullpath := filer2.FullPath(fullFilePath) + var fi FileInfo - entry, err := filer2.GetEntry(ctx, fs, fullFilePath) + entry, err := filer2.GetEntry(fs, fullpath) if entry == nil { return nil, os.ErrNotExist } @@ -348,14 +348,12 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F return nil, err } fi.size = int64(filer2.TotalSize(entry.GetChunks())) - fi.name = fullFilePath + fi.name = string(fullpath) fi.mode = os.FileMode(entry.Attributes.FileMode) fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0) fi.isDirectory = entry.IsDirectory - _, fi.name = path.Split(path.Clean(fi.name)) - if fi.name == "" { - fi.name = "/" + if fi.name == "/" { fi.modifiledTime = time.Now() fi.isDirectory = true } @@ -373,10 +371,12 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { glog.V(2).Infof("WebDavFileSystem.Write %v", f.name) + dir, _ := filer2.FullPath(f.name).DirAndName() + var err error ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + f.entry, err = filer2.GetEntry(f.fs, filer2.FullPath(f.name)) } if f.entry == nil { @@ -388,13 +388,15 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { var fileId, host string var auth security.EncodedJwt + var collection, replication string - if err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, - Replication: "000", + Replication: "", Collection: f.fs.option.Collection, + ParentPath: dir, } resp, err := client.AssignVolume(ctx, request) @@ -402,8 +404,12 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + collection, replication = resp.Collection, resp.Replication return nil }); err != nil { @@ -411,8 +417,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, f.name, bufReader, false, "application/octet-stream", nil, auth) + uploadResult, err := operation.UploadData(fileUrl, f.name, f.fs.option.Cipher, buf, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err) return 0, fmt.Errorf("upload data: %v", err) @@ -423,18 +428,21 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { } chunk := &filer_pb.FileChunk{ - FileId: fileId, - Offset: f.off, - Size: uint64(len(buf)), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + FileId: fileId, + Offset: f.off, + Size: uint64(len(buf)), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, } f.entry.Chunks = append(f.entry.Chunks, chunk) - dir, _ := filer2.FullPath(f.name).DirAndName() - err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { f.entry.Attributes.Mtime = time.Now().Unix() + f.entry.Attributes.Collection = collection + f.entry.Attributes.Replication = replication request := &filer_pb.UpdateEntryRequest{ Directory: dir, @@ -448,9 +456,11 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { return nil }) - if err != nil { + if err == nil { + glog.V(3).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf))) f.off += int64(len(buf)) } + return len(buf), err } @@ -469,10 +479,9 @@ func (f *WebDavFile) Close() error { func (f *WebDavFile) Read(p []byte) (readSize int, err error) { glog.V(2).Infof("WebDavFileSystem.Read %v", f.name) - ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + f.entry, err = filer2.GetEntry(f.fs, filer2.FullPath(f.name)) } if f.entry == nil { return 0, err @@ -488,30 +497,29 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { } chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, f.off, len(p)) - totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, f.name, p, chunkViews, f.off) + totalRead, err := filer2.ReadIntoBuffer(f.fs, filer2.FullPath(f.name), p, chunkViews, f.off) if err != nil { return 0, err } readSize = int(totalRead) + glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+totalRead) + f.off += totalRead if readSize == 0 { return 0, io.EOF } + return } func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) - ctx := context.Background() - dir := f.name - if dir != "/" && strings.HasSuffix(dir, "/") { - dir = dir[:len(dir)-1] - } + dir, _ := filer2.FullPath(f.name).DirAndName() - err = filer2.ReadDirAllEntries(ctx, f.fs, dir, func(entry *filer_pb.Entry) { + err = filer2.ReadDirAllEntries(f.fs, filer2.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) { fi := FileInfo{ size: int64(filer2.TotalSize(entry.GetChunks())), name: entry.Name, diff --git a/weed/shell/command_bucket_create.go b/weed/shell/command_bucket_create.go new file mode 100644 index 000000000..3546528aa --- /dev/null +++ b/weed/shell/command_bucket_create.go @@ -0,0 +1,88 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketCreate{}) +} + +type commandBucketCreate struct { +} + +func (c *commandBucketCreate) Name() string { + return "bucket.create" +} + +func (c *commandBucketCreate) Help() string { + return `create a bucket with a given name + + Example: + bucket.create -name -replication 001 +` +} + +func (c *commandBucketCreate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + bucketName := bucketCommand.String("name", "", "bucket name") + replication := bucketCommand.String("replication", "", "replication setting for the bucket") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + if *bucketName == "" { + return fmt.Errorf("empty bucket name") + } + + filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) + } + filerBucketsPath := resp.DirBuckets + + println("create bucket under", filerBucketsPath) + + entry := &filer_pb.Entry{ + Name: *bucketName, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0777 | os.ModeDir), + Collection: *bucketName, + Replication: *replication, + }, + } + + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ + Directory: filerBucketsPath, + Entry: entry, + }); err != nil { + return err + } + + println("created bucket", *bucketName) + + return nil + + }) + + return err + +} diff --git a/weed/shell/command_bucket_delete.go b/weed/shell/command_bucket_delete.go new file mode 100644 index 000000000..c57ce7221 --- /dev/null +++ b/weed/shell/command_bucket_delete.go @@ -0,0 +1,71 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketDelete{}) +} + +type commandBucketDelete struct { +} + +func (c *commandBucketDelete) Name() string { + return "bucket.delete" +} + +func (c *commandBucketDelete) Help() string { + return `delete a bucket by a given name + + bucket.delete -name +` +} + +func (c *commandBucketDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + bucketName := bucketCommand.String("name", "", "bucket name") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + if *bucketName == "" { + return fmt.Errorf("empty bucket name") + } + + filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) + } + filerBucketsPath := resp.DirBuckets + + if _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ + Directory: filerBucketsPath, + Name: *bucketName, + IsDeleteData: false, + IsRecursive: true, + IgnoreRecursiveError: true, + }); err != nil { + return err + } + + return nil + + }) + + return err + +} diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go new file mode 100644 index 000000000..5eb5972ce --- /dev/null +++ b/weed/shell/command_bucket_list.go @@ -0,0 +1,81 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "math" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketList{}) +} + +type commandBucketList struct { +} + +func (c *commandBucketList) Name() string { + return "bucket.list" +} + +func (c *commandBucketList) Help() string { + return `list all buckets + +` +} + +func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) + } + filerBucketsPath := resp.DirBuckets + + stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ + Directory: filerBucketsPath, + Limit: math.MaxUint32, + }) + if err != nil { + return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err) + } + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + if resp.Entry.Attributes.Replication == "" || resp.Entry.Attributes.Replication == "000" { + fmt.Fprintf(writer, " %s\n", resp.Entry.Name) + } else { + fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", resp.Entry.Name, resp.Entry.Attributes.Replication) + } + } + + return nil + + }) + + return err + +} diff --git a/weed/shell/command_collection_delete.go b/weed/shell/command_collection_delete.go index fbaddcd51..4b3d7f0be 100644 --- a/weed/shell/command_collection_delete.go +++ b/weed/shell/command_collection_delete.go @@ -34,9 +34,8 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ collectionName := args[0] - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - _, err = client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ Name: collectionName, }) return err diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index c4325c66f..2a114e61b 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -41,9 +41,8 @@ func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEcVolumes bool) (collections []string, err error) { var resp *master_pb.CollectionListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{ + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.CollectionList(context.Background(), &master_pb.CollectionListRequest{ IncludeNormalVolumes: includeNormalVolumes, IncludeEcVolumes: includeEcVolumes, }) diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go index 96599372e..299d44fed 100644 --- a/weed/shell/command_ec_balance.go +++ b/weed/shell/command_ec_balance.go @@ -1,7 +1,6 @@ package shell import ( - "context" "flag" "fmt" "io" @@ -107,10 +106,8 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W return nil } - ctx := context.Background() - // collect all ec nodes - allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, *dc) + allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, *dc) if err != nil { return err } @@ -138,7 +135,7 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W } } - if err := balanceEcRacks(ctx, commandEnv, racks, *applyBalancing); err != nil { + if err := balanceEcRacks(commandEnv, racks, *applyBalancing); err != nil { return fmt.Errorf("balance ec racks: %v", err) } @@ -162,38 +159,36 @@ func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack { func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { - ctx := context.Background() - fmt.Printf("balanceEcVolumes %s\n", collection) - if err := deleteDuplicatedEcShards(ctx, commandEnv, allEcNodes, collection, applyBalancing); err != nil { + if err := deleteDuplicatedEcShards(commandEnv, allEcNodes, collection, applyBalancing); err != nil { return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err) } - if err := balanceEcShardsAcrossRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + if err := balanceEcShardsAcrossRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) } - if err := balanceEcShardsWithinRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + if err := balanceEcShardsWithinRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) } return nil } -func deleteDuplicatedEcShards(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error { +func deleteDuplicatedEcShards(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error { // vid => []ecNode vidLocations := collectVolumeIdToEcNodes(allEcNodes) // deduplicate ec shards for vid, locations := range vidLocations { - if err := doDeduplicateEcShards(ctx, commandEnv, collection, vid, locations, applyBalancing); err != nil { + if err := doDeduplicateEcShards(commandEnv, collection, vid, locations, applyBalancing); err != nil { return err } } return nil } -func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error { +func doDeduplicateEcShards(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error { // check whether this volume has ecNodes that are over average shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount) @@ -215,10 +210,10 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti duplicatedShardIds := []uint32{uint32(shardId)} for _, ecNode := range ecNodes[1:] { - if err := unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + if err := unmountEcShards(commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil { return err } - if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + if err := sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil { return err } ecNode.deleteEcVolumeShards(vid, duplicatedShardIds) @@ -227,19 +222,19 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti return nil } -func balanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { +func balanceEcShardsAcrossRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { // collect vid => []ecNode, since previous steps can change the locations vidLocations := collectVolumeIdToEcNodes(allEcNodes) // spread the ec shards evenly for vid, locations := range vidLocations { - if err := doBalanceEcShardsAcrossRacks(ctx, commandEnv, collection, vid, locations, racks, applyBalancing); err != nil { + if err := doBalanceEcShardsAcrossRacks(commandEnv, collection, vid, locations, racks, applyBalancing); err != nil { return err } } return nil } -func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { +func doBalanceEcShardsAcrossRacks(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { // calculate average number of shards an ec rack should have for one volume averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks)) @@ -274,7 +269,7 @@ func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, c for _, n := range racks[rackId].ecNodes { possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) } - err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) if err != nil { return err } @@ -306,7 +301,7 @@ func pickOneRack(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]i return "" } -func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { +func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { // collect vid => []ecNode, since previous steps can change the locations vidLocations := collectVolumeIdToEcNodes(allEcNodes) @@ -330,7 +325,7 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all } sourceEcNodes := rackEcNodesWithVid[rackId] averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes)) - if err := doBalanceEcShardsWithinOneRack(ctx, commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil { + if err := doBalanceEcShardsWithinOneRack(commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil { return err } } @@ -338,7 +333,7 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all return nil } -func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { +func doBalanceEcShardsWithinOneRack(commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { for _, ecNode := range existingLocations { @@ -353,7 +348,7 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId) - err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) if err != nil { return err } @@ -365,18 +360,18 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, return nil } -func balanceEcRacks(ctx context.Context, commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error { +func balanceEcRacks(commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error { // balance one rack for all ec shards for _, ecRack := range racks { - if err := doBalanceEcRack(ctx, commandEnv, ecRack, applyBalancing); err != nil { + if err := doBalanceEcRack(commandEnv, ecRack, applyBalancing); err != nil { return err } } return nil } -func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error { +func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error { if len(ecRack.ecNodes) <= 1 { return nil @@ -421,7 +416,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id) - err := moveMountedShardToEcNode(ctx, commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing) + err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing) if err != nil { return err } @@ -440,7 +435,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack return nil } -func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { +func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes) @@ -458,7 +453,7 @@ func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, a fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id) - err := moveMountedShardToEcNode(ctx, commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing) + err := moveMountedShardToEcNode(commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing) if err != nil { return err } diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index cfe14fed5..0db119d3c 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -15,26 +15,26 @@ import ( "google.golang.org/grpc" ) -func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) { +func moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) { copiedShardIds := []uint32{uint32(shardId)} if applyBalancing { // ask destination node to copy shard and the ecx file from source node, and mount it - copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, uint32(shardId), 1, vid, collection, existingLocation.info.Id) + copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id) if err != nil { return err } // unmount the to be deleted shards - err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds) + err = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds) if err != nil { return err } // ask source node to delete the shard, and maybe the ecx file - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds) if err != nil { return err } @@ -50,14 +50,10 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist } -func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServer *EcNode, startFromShardId uint32, shardCount int, +func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption, + targetServer *EcNode, shardIdsToCopy []uint32, volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) { - var shardIdsToCopy []uint32 - for shardId := startFromShardId; shardId < startFromShardId+uint32(shardCount); shardId++ { - shardIdsToCopy = append(shardIdsToCopy, shardId) - } fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { @@ -65,11 +61,13 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption if targetServer.info.Id != existingLocation { fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) - _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: shardIdsToCopy, CopyEcxFile: true, + CopyEcjFile: true, + CopyVifFile: true, SourceDataNode: existingLocation, }) if copyErr != nil { @@ -78,7 +76,7 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption } fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id) - _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ + _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: shardIdsToCopy, @@ -180,12 +178,12 @@ type EcRack struct { freeEcSlot int } -func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { +func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { // list all possible locations var resp *master_pb.VolumeListResponse - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -213,13 +211,12 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCen return } -func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error { +func sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error { fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation) return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{ + _, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: toBeDeletedShardIds, @@ -229,13 +226,12 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt } -func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error { +func unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error { fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation) return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{ + _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{ VolumeId: uint32(volumeId), ShardIds: toBeUnmountedhardIds, }) @@ -243,13 +239,12 @@ func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, }) } -func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error { +func mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error { fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation) return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ + _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: toBeMountedhardIds, diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go new file mode 100644 index 000000000..b69e403cb --- /dev/null +++ b/weed/shell/command_ec_decode.go @@ -0,0 +1,264 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandEcDecode{}) +} + +type commandEcDecode struct { +} + +func (c *commandEcDecode) Name() string { + return "ec.decode" +} + +func (c *commandEcDecode) Help() string { + return `decode a erasure coded volume into a normal volume + + ec.decode [-collection=""] [-volumeId=] + +` +} + +func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := encodeCommand.Int("volumeId", 0, "the volume id") + collection := encodeCommand.String("collection", "", "the collection name") + if err = encodeCommand.Parse(args); err != nil { + return nil + } + + vid := needle.VolumeId(*volumeId) + + // collect topology information + topologyInfo, err := collectTopologyInfo(commandEnv) + if err != nil { + return err + } + + // volumeId is provided + if vid != 0 { + return doEcDecode(commandEnv, topologyInfo, *collection, vid) + } + + // apply to all volumes in the collection + volumeIds := collectEcShardIds(topologyInfo, *collection) + fmt.Printf("ec encode volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { + // find volume location + nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid) + + fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits) + + // collect ec shards to the server with most space + targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid) + if err != nil { + return fmt.Errorf("collectEcShards for volume %d: %v", vid, err) + } + + // generate a normal volume + err = generateNormalVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation) + if err != nil { + return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err) + } + + // delete the previous ec shards + err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) + if err != nil { + return fmt.Errorf("delete ec shards for volume %d: %v", vid, err) + } + + return nil +} + +func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { + + // mount volume + if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ + VolumeId: uint32(vid), + }) + return mountErr + }); err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err) + } + + // unmount ec shards + for location, ecIndexBits := range nodeToEcIndexBits { + fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) + err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) + if err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err) + } + } + // delete ec shards + for location, ecIndexBits := range nodeToEcIndexBits { + fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) + err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) + if err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err) + } + } + + return nil +} + +func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { + + fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer) + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{ + VolumeId: uint32(vid), + Collection: collection, + }) + return genErr + }) + + return err + +} + +func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { + + maxShardCount := 0 + var exisitngEcIndexBits erasure_coding.ShardBits + for loc, ecIndexBits := range nodeToEcIndexBits { + toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount() + if toBeCopiedShardCount > maxShardCount { + maxShardCount = toBeCopiedShardCount + targetNodeLocation = loc + exisitngEcIndexBits = ecIndexBits + } + } + + fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits) + + var copiedEcIndexBits erasure_coding.ShardBits + for loc, ecIndexBits := range nodeToEcIndexBits { + if loc == targetNodeLocation { + continue + } + + needToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards() + if needToCopyEcIndexBits.ShardIdCount() == 0 { + continue + } + + err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + + fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation) + + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ + VolumeId: uint32(vid), + Collection: collection, + ShardIds: needToCopyEcIndexBits.ToUint32Slice(), + CopyEcxFile: false, + CopyEcjFile: true, + CopyVifFile: true, + SourceDataNode: loc, + }) + if copyErr != nil { + return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr) + } + + return nil + }) + + if err != nil { + break + } + + copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits) + + } + + nodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits) + + return targetNodeLocation, err + +} + +func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) { + + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return + } + + return resp.TopologyInfo, nil + +} + +func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) { + + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Collection == selectedCollection && v.Id == uint32(vid) { + ecShardInfos = append(ecShardInfos, v) + } + } + }) + + return +} + +func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) { + + vidMap := make(map[uint32]bool) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Collection == selectedCollection { + vidMap[v.Id] = true + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} + +func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[string]erasure_coding.ShardBits { + + nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Id == uint32(vid) { + nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits) + } + } + }) + + return nodeToEcIndexBits +} diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index f07cb93f9..6efb05488 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -8,13 +8,14 @@ import ( "sync" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/wdclient" - "google.golang.org/grpc" ) func init() { @@ -62,22 +63,21 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // volumeId is provided if vid != 0 { - return doEcEncode(ctx, commandEnv, *collection, vid) + return doEcEncode(commandEnv, *collection, vid) } // apply to all volumes in the collection - volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod) + volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod) if err != nil { return err } fmt.Printf("ec encode volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doEcEncode(ctx, commandEnv, *collection, vid); err != nil { + if err = doEcEncode(commandEnv, *collection, vid); err != nil { return err } } @@ -85,27 +85,29 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } -func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { +func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { // find volume location locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) if !found { return fmt.Errorf("volume %d not found", vid) } + // fmt.Printf("found ec %d shards on %v\n", vid, locations) + // mark the volume as readonly - err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) if err != nil { - return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) + return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) } // generate ec shards - err = generateEcShards(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url) + err = generateEcShards(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url) if err != nil { return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) } // balance the ec shards to current cluster - err = spreadEcShards(ctx, commandEnv, vid, collection, locations) + err = spreadEcShards(commandEnv, vid, collection, locations) if err != nil { return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err) } @@ -113,12 +115,12 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, return nil } -func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error { +func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error { for _, location := range locations { err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, markErr := volumeServerClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{ + _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{ VolumeId: uint32(volumeId), }) return markErr @@ -133,10 +135,10 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol return nil } -func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { +func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, genErr := volumeServerClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{ + _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{ VolumeId: uint32(volumeId), Collection: collection, }) @@ -147,9 +149,9 @@ func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volum } -func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { +func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { - allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, "") + allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "") if err != nil { return err } @@ -163,29 +165,29 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle } // calculate how many shards to allocate for these servers - allocated := balancedEcDistribution(allocatedDataNodes) + allocatedEcIds := balancedEcDistribution(allocatedDataNodes) // ask the data nodes to copy from the source volume server - copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocated, volumeId, collection, existingLocations[0]) + copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) if err != nil { return err } // unmount the to be deleted shards - err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds) + err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds) if err != nil { return err } // ask the source volume server to clean up copied ec shards - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds) if err != nil { return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err) } // ask the source volume server to delete the original volume for _, location := range existingLocations { - err = deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, location.Url) + err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.Url) if err != nil { return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err) } @@ -195,32 +197,28 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle } -func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServers []*EcNode, allocated []int, - volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { +func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { // parallelize shardIdChan := make(chan []uint32, len(targetServers)) var wg sync.WaitGroup - startFromShardId := uint32(0) for i, server := range targetServers { - if allocated[i] <= 0 { + if len(allocatedEcIds[i]) <= 0 { continue } wg.Add(1) - go func(server *EcNode, startFromShardId uint32, shardCount int) { + go func(server *EcNode, allocatedEcShardIds []uint32) { defer wg.Done() - copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(ctx, grpcDialOption, server, - startFromShardId, shardCount, volumeId, collection, existingLocation.Url) + copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server, + allocatedEcShardIds, volumeId, collection, existingLocation.Url) if copyErr != nil { err = copyErr } else { shardIdChan <- copiedShardIds server.addEcVolumeShards(volumeId, collection, copiedShardIds) } - }(server, startFromShardId, allocated[i]) - startFromShardId += uint32(allocated[i]) + }(server, allocatedEcIds[i]) } wg.Wait() close(shardIdChan) @@ -236,29 +234,29 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia return } -func balancedEcDistribution(servers []*EcNode) (allocated []int) { - allocated = make([]int, len(servers)) - allocatedCount := 0 - for allocatedCount < erasure_coding.TotalShardsCount { - for i, server := range servers { - if server.freeEcSlot-allocated[i] > 0 { - allocated[i] += 1 - allocatedCount += 1 - } - if allocatedCount >= erasure_coding.TotalShardsCount { - break - } +func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) { + allocated = make([][]uint32, len(servers)) + allocatedShardIdIndex := uint32(0) + serverIndex := 0 + for allocatedShardIdIndex < erasure_coding.TotalShardsCount { + if servers[serverIndex].freeEcSlot > 0 { + allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex) + allocatedShardIdIndex++ + } + serverIndex++ + if serverIndex >= len(servers) { + serverIndex = 0 } } return allocated } -func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { +func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { var resp *master_pb.VolumeListResponse - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -281,7 +279,7 @@ func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, se } }) - for vid, _ := range vidMap { + for vid := range vidMap { vids = append(vids, needle.VolumeId(vid)) } diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go index b43aed599..d9d943e6d 100644 --- a/weed/shell/command_ec_rebuild.go +++ b/weed/shell/command_ec_rebuild.go @@ -64,7 +64,7 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W } // collect all ec nodes - allEcNodes, _, err := collectEcNodes(context.Background(), commandEnv, "") + allEcNodes, _, err := collectEcNodes(commandEnv, "") if err != nil { return err } @@ -92,8 +92,6 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error { - ctx := context.Background() - fmt.Printf("rebuildEcVolumes %s\n", collection) // collect vid => each shard locations, similar to ecShardMap in topology.go @@ -117,7 +115,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s return fmt.Errorf("disk space is not enough") } - if err := rebuildOneEcVolume(ctx, commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil { + if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil { return err } } @@ -125,13 +123,13 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s return nil } -func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error { +func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error { fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId) // collect shard files to rebuilder local disk var generatedShardIds []uint32 - copiedShardIds, _, err := prepareDataToRecover(ctx, commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges) + copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges) if err != nil { return err } @@ -139,7 +137,7 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * // clean up working files // ask the rebuilder to delete the copied shards - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds) if err != nil { fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds) } @@ -151,13 +149,13 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * } // generate ec shards, and maybe ecx file - generatedShardIds, err = generateMissingShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id) + generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id) if err != nil { return err } // mount the generated shards - err = mountEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds) + err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds) if err != nil { return err } @@ -167,11 +165,10 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * return nil } -func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { +func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{ + resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{ VolumeId: uint32(volumeId), Collection: collection, }) @@ -183,7 +180,7 @@ func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, return } -func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) { +func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) { needEcxFile := true var localShardBits erasure_coding.ShardBits @@ -210,11 +207,13 @@ func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder var copyErr error if applyBalancing { copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: []uint32{uint32(shardId)}, CopyEcxFile: needEcxFile, + CopyEcjFile: needEcxFile, + CopyVifFile: needEcxFile, SourceDataNode: ecNodes[0].info.Id, }) return copyErr diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go index 9e578ed28..4fddcbea5 100644 --- a/weed/shell/command_ec_test.go +++ b/weed/shell/command_ec_test.go @@ -1,13 +1,25 @@ package shell import ( - "context" + "fmt" "testing" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" ) +func TestCommandEcDistribution(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100), + newEcNode("dc1", "rack2", "dn2", 100), + } + + allocated := balancedEcDistribution(allEcNodes) + + fmt.Printf("allocated: %+v", allocated) +} + func TestCommandEcBalanceSmall(t *testing.T) { allEcNodes := []*EcNode{ @@ -108,7 +120,7 @@ func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) { racks := collectRacks(allEcNodes) balanceEcVolumes(nil, "c1", allEcNodes, racks, false) - balanceEcRacks(context.Background(), nil, racks, false) + balanceEcRacks(nil, racks, false) } func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode { diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 66ced46c5..3db487979 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" "math" @@ -24,12 +23,8 @@ func (c *commandFsCat) Name() string { func (c *commandFsCat) Help() string { return `stream the file content on to the screen - fs.cat /dir/ fs.cat /dir/file_name - fs.cat /dir/file_prefix - fs.cat http://:/dir/ fs.cat http://:/dir/file_name - fs.cat http://:/dir/file_prefix ` } @@ -42,21 +37,19 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(filerServer, filerPort, path) { return fmt.Errorf("%s is a directory", path) } dir, name := filer2.FullPath(path).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, Directory: dir, } - respLookupEntry, err := client.LookupDirectoryEntry(ctx, request) + respLookupEntry, err := filer_pb.LookupEntry(client, request) if err != nil { return err } diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go index 408ec86c8..df42cd516 100644 --- a/weed/shell/command_fs_cd.go +++ b/weed/shell/command_fs_cd.go @@ -1,7 +1,6 @@ package shell import ( - "context" "io" ) @@ -45,9 +44,7 @@ func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer return nil } - ctx := context.Background() - - err = commandEnv.checkDirectory(ctx, filerServer, filerPort, path) + err = commandEnv.checkDirectory(filerServer, filerPort, path) if err == nil { commandEnv.option.FilerHost = filerServer diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 5e634c82a..ca2f22b57 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -1,13 +1,12 @@ package shell import ( - "context" "fmt" + "io" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "io" ) func init() { @@ -37,81 +36,70 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(filerServer, filerPort, path) { path = path + "/" } + var blockCount, byteCount uint64 dir, name := filer2.FullPath(path).DirAndName() + blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name) - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - - _, _, err = paginateDirectory(ctx, writer, client, dir, name, 1000) - - return err + if name == "" && err == nil { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) + } - }) + return } -func paginateDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int) (blockCount uint64, byteCount uint64, err error) { - - paginatedCount := -1 - startFromFileName := "" - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr - return - } +func duTraverseDirectory(writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) { - paginatedCount = len(resp.Entries) - - for _, entry := range resp.Entries { - if entry.IsDirectory { - subDir := fmt.Sprintf("%s/%s", dir, entry.Name) - if dir == "/" { - subDir = "/" + entry.Name - } - numBlock, numByte, err := paginateDirectory(ctx, writer, client, subDir, "", paginateSize) - if err == nil { - blockCount += numBlock - byteCount += numByte - } - } else { - blockCount += uint64(len(entry.Chunks)) - byteCount += filer2.TotalSize(entry.Chunks) + err = filer2.ReadDirAllEntries(filerClient, filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", dir, entry.Name) + if dir == "/" { + subDir = "/" + entry.Name } - startFromFileName = entry.Name - - if name != "" && !entry.IsDirectory { - fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + numBlock, numByte, err := duTraverseDirectory(writer, filerClient, subDir, "") + if err == nil { + blockCount += numBlock + byteCount += numByte } + } else { + blockCount += uint64(len(entry.Chunks)) + byteCount += filer2.TotalSize(entry.Chunks) } - } - - if name == "" { - fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) - } + if name != "" && !entry.IsDirectory { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + } + }) return - } -func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { +func (env *CommandEnv) withFilerClient(filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) - }, filerGrpcAddress, env.option.GrpcDialOption) + return pb.WithGrpcFilerClient(filerGrpcAddress, env.option.GrpcDialOption, fn) } + +type commandFilerClient struct { + env *CommandEnv + filerServer string + filerPort int64 +} + +func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *commandFilerClient { + return &commandFilerClient{ + env: env, + filerServer: filerServer, + filerPort: filerPort, + } +} +func (c *commandFilerClient) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + return c.env.withFilerClient(c.filerServer, c.filerPort, fn) +} +func (c *commandFilerClient) AdjustedUrl(hostAndPort string) string { + return hostAndPort +} diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 6979635e1..69ebe1b30 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -1,15 +1,15 @@ package shell import ( - "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" "os" "os/user" "strconv" "strings" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func init() { @@ -59,90 +59,56 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(filerServer, filerPort, path) { path = path + "/" } dir, name := filer2.FullPath(path).DirAndName() + entryCount := 0 - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - - return paginateOneDirectory(ctx, writer, client, dir, name, 1000, isLongFormat, showHidden) - - }) - -} - -func paginateOneDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int, isLongFormat, showHidden bool) (err error) { + err = filer2.ReadDirAllEntries(commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { - entryCount := 0 - paginatedCount := -1 - startFromFileName := "" - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr + if !showHidden && strings.HasPrefix(entry.Name, ".") { return } - paginatedCount = len(resp.Entries) - - for _, entry := range resp.Entries { + entryCount++ - if !showHidden && strings.HasPrefix(entry.Name, ".") { - continue - } - - entryCount++ - - if isLongFormat { - fileMode := os.FileMode(entry.Attributes.FileMode) - userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName - if userName == "" { - if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { - userName = user.Username - } - } - groupName := "" - if len(groupNames) > 0 { - groupName = groupNames[0] + if isLongFormat { + fileMode := os.FileMode(entry.Attributes.FileMode) + userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName + if userName == "" { + if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { + userName = user.Username } - if groupName == "" { - if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { - groupName = group.Name - } - } - - if dir == "/" { - // just for printing - dir = "" + } + groupName := "" + if len(groupNames) > 0 { + groupName = groupNames[0] + } + if groupName == "" { + if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { + groupName = group.Name } - fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", - fileMode, len(entry.Chunks), - userName, groupName, - filer2.TotalSize(entry.Chunks), dir, entry.Name) - } else { - fmt.Fprintf(writer, "%s\n", entry.Name) } - startFromFileName = entry.Name - + if dir == "/" { + // just for printing + dir = "" + } + fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", + fileMode, len(entry.Chunks), + userName, groupName, + filer2.TotalSize(entry.Chunks), dir, entry.Name) + } else { + fmt.Fprintf(writer, "%s\n", entry.Name) } - } - if isLongFormat { + }) + + if isLongFormat && err == nil { fmt.Fprintf(writer, "total %d\n", entryCount) } return - } diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go new file mode 100644 index 000000000..cd1ffb6fd --- /dev/null +++ b/weed/shell/command_fs_meta_cat.go @@ -0,0 +1,72 @@ +package shell + +import ( + "fmt" + "io" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandFsMetaCat{}) +} + +type commandFsMetaCat struct { +} + +func (c *commandFsMetaCat) Name() string { + return "fs.meta.cat" +} + +func (c *commandFsMetaCat) Help() string { + return `print out the meta data content for a file or directory + + fs.meta.cat /dir/ + fs.meta.cat /dir/file_name + fs.meta.cat http://:/dir/ + fs.meta.cat http://:/dir/file_name +` +} + +func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + input := findInputDirectory(args) + + filerServer, filerPort, path, err := commandEnv.parseUrl(input) + if err != nil { + return err + } + + dir, name := filer2.FullPath(path).DirAndName() + + return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + respLookupEntry, err := filer_pb.LookupEntry(client, request) + if err != nil { + return err + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, marshalErr := m.MarshalToString(respLookupEntry.Entry) + if marshalErr != nil { + return fmt.Errorf("marshal meta: %v", marshalErr) + } + + fmt.Fprintf(writer, "%s\n", text) + + return nil + + }) + +} diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index 5ea8de9f5..ed92d8011 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -1,15 +1,15 @@ package shell import ( - "context" "fmt" "io" "os" + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) func init() { @@ -53,9 +53,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. var dirCount, fileCount uint64 - ctx := context.Background() - - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { sizeBuf := make([]byte, 4) @@ -80,7 +78,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return err } - if _, err = client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: fullEntry.Dir, Entry: fullEntry.Entry, }); err != nil { diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index 13b272fbf..099e04506 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" @@ -9,7 +8,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) func init() { @@ -41,38 +39,36 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i } util.LoadConfiguration("notification", true) - v := viper.GetViper() - notification.LoadConfiguration(v.Sub("notification")) + v := util.GetViper() + notification.LoadConfiguration(v, "notification.") - ctx := context.Background() + var dirCount, fileCount uint64 - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { - var dirCount, fileCount uint64 - - err = doTraverse(ctx, writer, client, filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) error { - - if entry.IsDirectory { - dirCount++ - } else { - fileCount++ - } - - return notification.Queue.SendMessage( - string(parentPath.Child(entry.Name)), - &filer_pb.EventNotification{ - NewEntry: entry, - }, - ) + if entry.IsDirectory { + dirCount++ + } else { + fileCount++ + } - }) + notifyErr := notification.Queue.SendMessage( + string(parentPath.Child(entry.Name)), + &filer_pb.EventNotification{ + NewEntry: entry, + }, + ) - if err == nil { - fmt.Fprintf(writer, "\ntotal notified %d directories, %d files\n", dirCount, fileCount) + if notifyErr != nil { + fmt.Fprintf(writer, "fail to notify new entry event for %s: %v\n", parentPath.Child(entry.Name), notifyErr) } - return err - }) + if err == nil { + fmt.Fprintf(writer, "\ntotal notified %d directories, %d files\n", dirCount, fileCount) + } + + return err + } diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index e710fe297..b51fdd0f6 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -1,17 +1,19 @@ package shell import ( - "context" "flag" "fmt" "io" "os" + "sync" + "sync/atomic" "time" + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) func init() { @@ -51,114 +53,127 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. return nil } - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) - if err != nil { - return err + filerServer, filerPort, path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) + if parseErr != nil { + return parseErr } - ctx := context.Background() + t := time.Now() + fileName := *outputFileName + if fileName == "" { + fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", + filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) + } - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if openErr != nil { + return fmt.Errorf("failed to create file %s: %v", fileName, openErr) + } + defer dst.Close() - t := time.Now() - fileName := *outputFileName - if fileName == "" { - fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", - filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) + var wg sync.WaitGroup + wg.Add(1) + outputChan := make(chan []byte, 1024) + go func() { + sizeBuf := make([]byte, 4) + for b := range outputChan { + util.Uint32toBytes(sizeBuf, uint32(len(b))) + dst.Write(sizeBuf) + dst.Write(b) } + wg.Done() + }() - dst, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil - } - defer dst.Close() + var dirCount, fileCount uint64 - var dirCount, fileCount uint64 + err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { - sizeBuf := make([]byte, 4) + protoMessage := &filer_pb.FullEntry{ + Dir: string(parentPath), + Entry: entry, + } - err = doTraverse(ctx, writer, client, filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) error { + bytes, err := proto.Marshal(protoMessage) + if err != nil { + fmt.Fprintf(writer, "marshall error: %v\n", err) + return + } - protoMessage := &filer_pb.FullEntry{ - Dir: string(parentPath), - Entry: entry, - } + outputChan <- bytes - bytes, err := proto.Marshal(protoMessage) - if err != nil { - return fmt.Errorf("marshall error: %v", err) - } + if entry.IsDirectory { + atomic.AddUint64(&dirCount, 1) + } else { + atomic.AddUint64(&fileCount, 1) + } - util.Uint32toBytes(sizeBuf, uint32(len(bytes))) + if *verbose { + println(parentPath.Child(entry.Name)) + } - dst.Write(sizeBuf) - dst.Write(bytes) + }) - if entry.IsDirectory { - dirCount++ - } else { - fileCount++ - } + close(outputChan) - if *verbose { - println(parentPath.Child(entry.Name)) - } + wg.Wait() - return nil + if err == nil { + fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount) + fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) + } - }) + return err - if err == nil { - fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount) - fmt.Fprintf(writer, "\nmeta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) - } +} +func doTraverseBFS(writer io.Writer, filerClient filer2.FilerClient, parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { - return err + K := 5 - }) + var jobQueueWg sync.WaitGroup + queue := util.NewQueue() + jobQueueWg.Add(1) + queue.Enqueue(parentPath) + var isTerminating bool + for i := 0; i < K; i++ { + go func() { + for { + if isTerminating { + break + } + t := queue.Dequeue() + if t == nil { + time.Sleep(329 * time.Millisecond) + continue + } + dir := t.(filer2.FullPath) + processErr := processOneDirectory(writer, filerClient, dir, queue, &jobQueueWg, fn) + if processErr != nil { + err = processErr + } + jobQueueWg.Done() + } + }() + } + jobQueueWg.Wait() + isTerminating = true + return } -func doTraverse(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry) error) (err error) { - - paginatedCount := -1 - startFromFileName := "" - paginateSize := 1000 - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: string(parentPath), - Prefix: "", - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr - return - } - paginatedCount = len(resp.Entries) +func processOneDirectory(writer io.Writer, filerClient filer2.FilerClient, parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { - for _, entry := range resp.Entries { + return filer2.ReadDirAllEntries(filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) { - if err = fn(parentPath, entry); err != nil { - return err - } + fn(parentPath, entry) - if entry.IsDirectory { - subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) - if parentPath == "/" { - subDir = "/" + entry.Name - } - if err = doTraverse(ctx, writer, client, filer2.FullPath(subDir), fn); err != nil { - return err - } + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) + if parentPath == "/" { + subDir = "/" + entry.Name } - startFromFileName = entry.Name - + jobQueueWg.Add(1) + queue.Enqueue(filer2.FullPath(subDir)) } - } - - return + }) } diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 67606ab53..85275058e 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -47,20 +47,18 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer return err } - ctx := context.Background() - sourceDir, sourceName := filer2.FullPath(sourcePath).DirAndName() destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { // collect destination entry info destinationRequest := &filer_pb.LookupDirectoryEntryRequest{ Name: destinationDir, Directory: destinationName, } - respDestinationLookupEntry, err := client.LookupDirectoryEntry(ctx, destinationRequest) + respDestinationLookupEntry, err := filer_pb.LookupEntry(client, destinationRequest) var targetDir, targetName string @@ -82,7 +80,7 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer NewName: targetName, } - _, err = client.AtomicRenameEntry(ctx, request) + _, err = client.AtomicRenameEntry(context.Background(), request) fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, filer2.NewFullPath(targetDir, targetName)) diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 8474e43ea..04530571c 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -1,12 +1,12 @@ package shell import ( - "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" "strings" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func init() { @@ -36,77 +36,42 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ dir, name := filer2.FullPath(path).DirAndName() - ctx := context.Background() + dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1) - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, client, dir, name, newPrefix(), -1) - - if terr == nil { - fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) - } + if terr == nil { + fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) + } - return terr - - }) + return terr } -func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { - - paginatedCount := -1 - startFromFileName := "" - paginateSize := 1000 - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr - return - } - paginatedCount = len(resp.Entries) - if paginatedCount > 0 { - prefix.addMarker(level) - } +func treeTraverseDirectory(writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { - for i, entry := range resp.Entries { + prefix.addMarker(level) - if level < 0 && name != "" { - if entry.Name != name { - break - } + err = filer2.ReadDirAllEntries(filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { + if level < 0 && name != "" { + if entry.Name != name { + return } + } - // 0.1% wrong prefix here, but fixing it would need to paginate to the next batch first - isLast := paginatedCount < paginateSize && i == paginatedCount-1 - fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) - - if entry.IsDirectory { - directoryCount++ - subDir := fmt.Sprintf("%s/%s", dir, entry.Name) - if dir == "/" { - subDir = "/" + entry.Name - } - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, client, subDir, "", prefix, level+1) - directoryCount += dirCount - fileCount += fCount - err = terr - } else { - fileCount++ - } - startFromFileName = entry.Name + fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) + if entry.IsDirectory { + directoryCount++ + subDir := dir.Child(entry.Name) + dirCount, fCount, terr := treeTraverseDirectory(writer, filerClient, subDir, "", prefix, level+1) + directoryCount += dirCount + fileCount += fCount + err = terr + } else { + fileCount++ } - } + }) return - } type Prefix struct { diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go index d7ef0d005..349f52f1c 100644 --- a/weed/shell/command_volume_balance.go +++ b/weed/shell/command_volume_balance.go @@ -27,7 +27,7 @@ func (c *commandVolumeBalance) Name() string { func (c *commandVolumeBalance) Help() string { return `balance all volumes among volume servers - volume.balance [-c ALL|EACH_COLLECTION|] [-force] [-dataCenter=] + volume.balance [-collection ALL|EACH_COLLECTION|] [-force] [-dataCenter=] Algorithm: @@ -69,9 +69,8 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer } var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -79,8 +78,10 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer } typeToNodes := collectVolumeServersByType(resp.TopologyInfo, *dc) - for _, volumeServers := range typeToNodes { + + for maxVolumeCount, volumeServers := range typeToNodes { if len(volumeServers) < 2 { + fmt.Printf("only 1 node is configured max %d volumes, skipping balancing\n", maxVolumeCount) continue } if *collection == "EACH_COLLECTION" { @@ -93,8 +94,8 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer return err } } - } else if *collection == "ALL" { - if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL", *applyBalancing); err != nil { + } else if *collection == "ALL_COLLECTIONS" { + if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil { return err } } else { @@ -107,18 +108,12 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer return nil } -func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.DataNodeInfo, volumeSizeLimit uint64, collection string, applyBalancing bool) error { - var nodes []*Node - for _, dn := range dataNodeInfos { - nodes = append(nodes, &Node{ - info: dn, - }) - } +func balanceVolumeServers(commandEnv *CommandEnv, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error { // balance writable volumes for _, n := range nodes { n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { - if collection != "ALL" { + if collection != "ALL_COLLECTIONS" { if v.Collection != collection { return false } @@ -133,7 +128,7 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat // balance readable volumes for _, n := range nodes { n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { - if collection != "ALL" { + if collection != "ALL_COLLECTIONS" { if v.Collection != collection { return false } @@ -148,15 +143,19 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat return nil } -func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*master_pb.DataNodeInfo) { - typeToNodes = make(map[uint64][]*master_pb.DataNodeInfo) +func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*Node) { + typeToNodes = make(map[uint64][]*Node) for _, dc := range t.DataCenterInfos { if selectedDataCenter != "" && dc.Id != selectedDataCenter { continue } for _, r := range dc.RackInfos { for _, dn := range r.DataNodeInfos { - typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], dn) + typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], &Node{ + info: dn, + dc: dc.Id, + rack: r.Id, + }) } } } @@ -166,6 +165,8 @@ func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter st type Node struct { info *master_pb.DataNodeInfo selectedVolumes map[uint32]*master_pb.VolumeInformationMessage + dc string + rack string } func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) { @@ -207,6 +208,13 @@ func balanceSelectedVolume(commandEnv *CommandEnv, nodes []*Node, sortCandidates sortCandidatesFn(candidateVolumes) for _, v := range candidateVolumes { + if v.ReplicaPlacement > 0 { + if fullNode.dc != emptyNode.dc && fullNode.rack != emptyNode.rack { + // TODO this logic is too simple, but should work most of the time + // Need a correct algorithm to handle all different cases + continue + } + } if _, found := emptyNode.selectedVolumes[v.Id]; !found { if err := moveVolume(commandEnv, v, fullNode, emptyNode, applyBalancing); err == nil { delete(fullNode.selectedVolumes, v.Id) @@ -230,8 +238,7 @@ func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, f } fmt.Fprintf(os.Stdout, "moving volume %s%d %s => %s\n", collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id) if applyBalancing { - ctx := context.Background() - return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second) + return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second) } return nil } diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go new file mode 100644 index 000000000..133ec62c6 --- /dev/null +++ b/weed/shell/command_volume_configure_replication.go @@ -0,0 +1,104 @@ +package shell + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +func init() { + Commands = append(Commands, &commandVolumeConfigureReplication{}) +} + +type commandVolumeConfigureReplication struct { +} + +func (c *commandVolumeConfigureReplication) Name() string { + return "volume.configure.replication" +} + +func (c *commandVolumeConfigureReplication) Help() string { + return `change volume replication value + + This command changes a volume replication value. It should be followed by volume.fix.replication. + +` +} + +func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id") + replicationString := configureReplicationCommand.String("replication", "", "the intended replication value") + if err = configureReplicationCommand.Parse(args); err != nil { + return nil + } + + if *replicationString == "" { + return fmt.Errorf("empty replication value") + } + + replicaPlacement, err := super_block.NewReplicaPlacementFromString(*replicationString) + if err != nil { + return fmt.Errorf("replication format: %v", err) + } + replicaPlacementInt32 := uint32(replicaPlacement.Byte()) + + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return err + } + + vid := needle.VolumeId(*volumeIdInt) + + // find all data nodes with volumes that needs replication change + var allLocations []location + eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + loc := newLocation(dc, string(rack), dn) + for _, v := range dn.VolumeInfos { + if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 { + allLocations = append(allLocations, loc) + continue + } + } + }) + + if len(allLocations) == 0 { + return fmt.Errorf("no volume needs change") + } + + for _, dst := range allLocations { + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, configureErr := volumeServerClient.VolumeConfigure(context.Background(), &volume_server_pb.VolumeConfigureRequest{ + VolumeId: uint32(vid), + Replication: replicaPlacement.String(), + }) + if configureErr != nil { + return configureErr + } + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil + }) + + if err != nil { + return err + } + + } + + return nil +} diff --git a/weed/shell/command_volume_copy.go b/weed/shell/command_volume_copy.go index 1c83ba655..aecc071ad 100644 --- a/weed/shell/command_volume_copy.go +++ b/weed/shell/command_volume_copy.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" @@ -47,7 +46,6 @@ func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io. return fmt.Errorf("source and target volume servers are the same!") } - ctx := context.Background() - _, err = copyVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) + _, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) return } diff --git a/weed/shell/command_volume_delete.go b/weed/shell/command_volume_delete.go index 17d27ea3a..5869b1621 100644 --- a/weed/shell/command_volume_delete.go +++ b/weed/shell/command_volume_delete.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" @@ -42,7 +41,6 @@ func (c *commandVolumeDelete) Do(args []string, commandEnv *CommandEnv, writer i return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) } - ctx := context.Background() - return deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + return deleteVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 4c7a794c0..210f4819d 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -3,13 +3,14 @@ package shell import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" "io" "math/rand" "sort" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func init() { @@ -49,9 +50,8 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, } var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -78,7 +78,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, underReplicatedVolumeLocations := make(map[uint32][]location) for vid, locations := range replicatedVolumeLocations { volumeInfo := replicatedVolumeInfo[vid] - replicaPlacement, _ := storage.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) if replicaPlacement.GetCopyCount() > len(locations) { underReplicatedVolumeLocations[vid] = locations } @@ -97,7 +97,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, for vid, locations := range underReplicatedVolumeLocations { volumeInfo := replicatedVolumeInfo[vid] - replicaPlacement, _ := storage.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) foundNewLocation := false for _, dst := range allLocations { // check whether data nodes satisfy the constraints @@ -113,7 +113,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, } err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ + _, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{ VolumeId: volumeInfo.Id, SourceDataNode: sourceNode.dataNode.Id, }) @@ -145,7 +145,7 @@ func keepDataNodesSorted(dataNodes []location) { }) } -func satisfyReplicaPlacement(replicaPlacement *storage.ReplicaPlacement, existingLocations []location, possibleLocation location) bool { +func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool { existingDataCenters := make(map[string]bool) existingRacks := make(map[string]bool) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 91b5a0d32..c5a9388fa 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -32,9 +32,8 @@ func (c *commandVolumeList) Help() string { func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -46,7 +45,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io. } func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics { - fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, volumeSizeLimitMb) + fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d remote:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount, volumeSizeLimitMb) sort.Slice(t.DataCenterInfos, func(i, j int) bool { return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id }) @@ -58,7 +57,7 @@ func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLi return s } func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics { - fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.RackInfos, func(i, j int) bool { return t.RackInfos[i].Id < t.RackInfos[j].Id @@ -70,7 +69,7 @@ func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statisti return s } func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { - fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.DataNodeInfos, func(i, j int) bool { return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id @@ -82,7 +81,7 @@ func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { return s } func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics { - fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.VolumeInfos, func(i, j int) bool { return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id diff --git a/weed/shell/command_volume_mount.go b/weed/shell/command_volume_mount.go index 50a307492..cffc7136b 100644 --- a/weed/shell/command_volume_mount.go +++ b/weed/shell/command_volume_mount.go @@ -45,14 +45,13 @@ func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) } - ctx := context.Background() - return mountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + return mountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } -func mountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { +func mountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(volumeId), }) return mountErr diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go index 08d87c988..c25b953a5 100644 --- a/weed/shell/command_volume_move.go +++ b/weed/shell/command_volume_move.go @@ -25,7 +25,7 @@ func (c *commandVolumeMove) Name() string { } func (c *commandVolumeMove) Help() string { - return ` move a live volume from one volume server to another volume server + return `move a live volume from one volume server to another volume server volume.move @@ -59,26 +59,25 @@ func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io. return fmt.Errorf("source and target volume servers are the same!") } - ctx := context.Background() - return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second) + return LiveMoveVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second) } // LiveMoveVolume moves one volume from one source volume server to one target volume server, with idleTimeout to drain the incoming requests. -func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) { +func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) { log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer) - lastAppendAtNs, err := copyVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) + lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) if err != nil { return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err) } log.Printf("tailing volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer) - if err = tailVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil { + if err = tailVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil { return fmt.Errorf("tail volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err) } log.Printf("deleting volume %d from %s", volumeId, sourceVolumeServer) - if err = deleteVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer); err != nil { + if err = deleteVolume(grpcDialOption, volumeId, sourceVolumeServer); err != nil { return fmt.Errorf("delete volume %d from %s: %v", volumeId, sourceVolumeServer, err) } @@ -86,10 +85,10 @@ func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeI return nil } -func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) { +func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) { err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ + resp, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{ VolumeId: uint32(volumeId), SourceDataNode: sourceVolumeServer, }) @@ -102,10 +101,10 @@ func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne return } -func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) { +func tailVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) { return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, replicateErr := volumeServerClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{ + _, replicateErr := volumeServerClient.VolumeTailReceiver(context.Background(), &volume_server_pb.VolumeTailReceiverRequest{ VolumeId: uint32(volumeId), SinceNs: lastAppendAtNs, IdleTimeoutSeconds: uint32(idleTimeout.Seconds()), @@ -116,9 +115,9 @@ func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne } -func deleteVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { +func deleteVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{ + _, deleteErr := volumeServerClient.VolumeDelete(context.Background(), &volume_server_pb.VolumeDeleteRequest{ VolumeId: uint32(volumeId), }) return deleteErr diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go new file mode 100644 index 000000000..756dc4686 --- /dev/null +++ b/weed/shell/command_volume_tier_download.go @@ -0,0 +1,166 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandVolumeTierDownload{}) +} + +type commandVolumeTierDownload struct { +} + +func (c *commandVolumeTierDownload) Name() string { + return "volume.tier.download" +} + +func (c *commandVolumeTierDownload) Help() string { + return `download the dat file of a volume from a remote tier + + volume.tier.download [-collection=""] + volume.tier.download [-collection=""] -volumeId= + + e.g.: + volume.tier.download -volumeId=7 + volume.tier.download -volumeId=7 + + This command will download the dat file of a volume from a remote tier to a volume server in local cluster. + +` +} + +func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := tierCommand.Int("volumeId", 0, "the volume id") + collection := tierCommand.String("collection", "", "the collection name") + if err = tierCommand.Parse(args); err != nil { + return nil + } + + vid := needle.VolumeId(*volumeId) + + // collect topology information + topologyInfo, err := collectTopologyInfo(commandEnv) + if err != nil { + return err + } + + // volumeId is provided + if vid != 0 { + return doVolumeTierDownload(commandEnv, writer, *collection, vid) + } + + // apply to all volumes in the collection + // reusing collectVolumeIdsForEcEncode for now + volumeIds := collectRemoteVolumes(topologyInfo, *collection) + if err != nil { + return err + } + fmt.Printf("tier download volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doVolumeTierDownload(commandEnv, writer, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) { + + vidMap := make(map[uint32]bool) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.VolumeInfos { + if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" { + vidMap[v.Id] = true + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} + +func doVolumeTierDownload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) { + // find volume location + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { + return fmt.Errorf("volume %d not found", vid) + } + + // TODO parallelize this + for _, loc := range locations { + // copy the .dat file from remote tier to local + err = downloadDatFromRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url) + if err != nil { + return fmt.Errorf("download dat file for volume %d to %s: %v", vid, loc.Url, err) + } + } + + return nil +} + +func downloadDatFromRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error { + + err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + }) + + var lastProcessed int64 + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0 + + fmt.Fprintf(writer, "downloaded %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed) + + lastProcessed = resp.Processed + } + if downloadErr != nil { + return downloadErr + } + + _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{ + VolumeId: uint32(volumeId), + }) + if unmountErr != nil { + return unmountErr + } + + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ + VolumeId: uint32(volumeId), + }) + if mountErr != nil { + return mountErr + } + + return nil + }) + + return err + +} diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go new file mode 100644 index 000000000..5131e8f85 --- /dev/null +++ b/weed/shell/command_volume_tier_upload.go @@ -0,0 +1,147 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandVolumeTierUpload{}) +} + +type commandVolumeTierUpload struct { +} + +func (c *commandVolumeTierUpload) Name() string { + return "volume.tier.upload" +} + +func (c *commandVolumeTierUpload) Help() string { + return `upload the dat file of a volume to a remote tier + + volume.tier.upload [-collection=""] [-fullPercent=95] [-quietFor=1h] + volume.tier.upload [-collection=""] -volumeId= -dest= [-keepLocalDatFile] + + e.g.: + volume.tier.upload -volumeId=7 -dest=s3 + volume.tier.upload -volumeId=7 -dest=s3.default + + The is defined in master.toml. + For example, "s3.default" in [storage.backend.s3.default] + + This command will move the dat file of a volume to a remote tier. + + SeaweedFS enables scalable and fast local access to lots of files, + and the cloud storage is slower by cost efficient. How to combine them together? + + Usually the data follows 80/20 rule: only 20% of data is frequently accessed. + We can offload the old volumes to the cloud. + + With this, SeaweedFS can be both fast and scalable, and infinite storage space. + Just add more local SeaweedFS volume servers to increase the throughput. + + The index file is still local, and the same O(1) disk read is applied to the remote file. + +` +} + +func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := tierCommand.Int("volumeId", 0, "the volume id") + collection := tierCommand.String("collection", "", "the collection name") + fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size") + quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period") + dest := tierCommand.String("dest", "", "the target tier name") + keepLocalDatFile := tierCommand.Bool("keepLocalDatFile", false, "whether keep local dat file") + if err = tierCommand.Parse(args); err != nil { + return nil + } + + vid := needle.VolumeId(*volumeId) + + // volumeId is provided + if vid != 0 { + return doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile) + } + + // apply to all volumes in the collection + // reusing collectVolumeIdsForEcEncode for now + volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod) + if err != nil { + return err + } + fmt.Printf("tier upload volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil { + return err + } + } + + return nil +} + +func doVolumeTierUpload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) { + // find volume location + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { + return fmt.Errorf("volume %d not found", vid) + } + + err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + if err != nil { + return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) + } + + // copy the .dat file to remote tier + err = uploadDatToRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile) + if err != nil { + return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, locations[0].Url, dest, err) + } + + return nil +} + +func uploadDatToRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error { + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatToRemoteRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + DestinationBackendName: dest, + KeepLocalDatFile: keepLocalDatFile, + }) + + var lastProcessed int64 + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0 + + fmt.Fprintf(writer, "copied %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed) + + lastProcessed = resp.Processed + } + + return copyErr + }) + + return err + +} diff --git a/weed/shell/command_volume_unmount.go b/weed/shell/command_volume_unmount.go index 8096f34d8..6e5bef485 100644 --- a/weed/shell/command_volume_unmount.go +++ b/weed/shell/command_volume_unmount.go @@ -45,14 +45,13 @@ func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) } - ctx := context.Background() - return unmountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + return unmountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } -func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { +func unmountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ + _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{ VolumeId: uint32(volumeId), }) return unmountErr diff --git a/weed/shell/commands.go b/weed/shell/commands.go index b642ec253..b8832ad93 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" "net/url" @@ -9,10 +8,11 @@ import ( "strconv" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/wdclient" - "google.golang.org/grpc" ) type ShellOptions struct { @@ -42,10 +42,9 @@ var ( func NewCommandEnv(options ShellOptions) *CommandEnv { return &CommandEnv{ - env: make(map[string]string), - MasterClient: wdclient.NewMasterClient(context.Background(), - options.GrpcDialOption, "shell", strings.Split(*options.Masters, ",")), - option: options, + env: make(map[string]string), + MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, "shell", 0, strings.Split(*options.Masters, ",")), + option: options, } } @@ -59,38 +58,27 @@ func (ce *CommandEnv) parseUrl(input string) (filerServer string, filerPort int6 return ce.option.FilerHost, ce.option.FilerPort, input, err } -func (ce *CommandEnv) isDirectory(ctx context.Context, filerServer string, filerPort int64, path string) bool { +func (ce *CommandEnv) isDirectory(filerServer string, filerPort int64, path string) bool { - return ce.checkDirectory(ctx, filerServer, filerPort, path) == nil + return ce.checkDirectory(filerServer, filerPort, path) == nil } -func (ce *CommandEnv) checkDirectory(ctx context.Context, filerServer string, filerPort int64, path string) error { +func (ce *CommandEnv) checkDirectory(filerServer string, filerPort int64, path string) error { dir, name := filer2.FullPath(path).DirAndName() - return ce.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return ce.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: name, - InclusiveStartFrom: true, - Limit: 1, + resp, lookupErr := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, }) - if listErr != nil { - return listErr - } - - if len(resp.Entries) == 0 { - return fmt.Errorf("entry not found") - } - - if resp.Entries[0].Name != name { - return fmt.Errorf("not a valid directory, found %s", resp.Entries[0].Name) + if lookupErr != nil { + return lookupErr } - if !resp.Entries[0].IsDirectory { + if !resp.Entry.IsDirectory { return fmt.Errorf("not a directory") } diff --git a/weed/stats/disk_supported.go b/weed/stats/disk_supported.go index 0537828b0..dff580b5b 100644 --- a/weed/stats/disk_supported.go +++ b/weed/stats/disk_supported.go @@ -17,5 +17,7 @@ func fillInDiskStatus(disk *volume_server_pb.DiskStatus) { disk.All = fs.Blocks * uint64(fs.Bsize) disk.Free = fs.Bfree * uint64(fs.Bsize) disk.Used = disk.All - disk.Free + disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100) + disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100) return } diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go index a9624cd86..ee8763e84 100644 --- a/weed/stats/metrics.go +++ b/weed/stats/metrics.go @@ -136,7 +136,7 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, fnG } } -func SourceName(port int) string { +func SourceName(port uint32) string { hostname, err := os.Hostname() if err != nil { return "unknown" diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go index 3c297f20b..6941ca5a1 100644 --- a/weed/storage/backend/backend.go +++ b/weed/storage/backend/backend.go @@ -2,18 +2,134 @@ package backend import ( "io" + "os" + "strings" "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/spf13/viper" ) -type DataStorageBackend interface { +type BackendStorageFile interface { io.ReaderAt io.WriterAt Truncate(off int64) error io.Closer GetStat() (datSize int64, modTime time.Time, err error) - String() string + Name() string +} + +type BackendStorage interface { + ToProperties() map[string]string + NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile + CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) + DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) + DeleteFile(key string) (err error) +} + +type StringProperties interface { + GetString(key string) string +} +type StorageType string +type BackendStorageFactory interface { + StorageType() StorageType + BuildStorage(configuration StringProperties, configPrefix string, id string) (BackendStorage, error) } var ( - StorageBackends []DataStorageBackend + BackendStorageFactories = make(map[StorageType]BackendStorageFactory) + BackendStorages = make(map[string]BackendStorage) ) + +// used by master to load remote storage configurations +func LoadConfiguration(config *viper.Viper) { + + StorageBackendPrefix := "storage.backend" + + for backendTypeName := range config.GetStringMap(StorageBackendPrefix) { + backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)] + if !found { + glog.Fatalf("backend storage type %s not found", backendTypeName) + } + for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) { + if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") { + continue + } + backendStorage, buildErr := backendStorageFactory.BuildStorage(config, + StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId) + if buildErr != nil { + glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId) + } + BackendStorages[backendTypeName+"."+backendStorageId] = backendStorage + if backendStorageId == "default" { + BackendStorages[backendTypeName] = backendStorage + } + } + } + +} + +// used by volume server to receive remote storage configurations from master +func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { + + for _, storageBackend := range storageBackends { + backendStorageFactory, found := BackendStorageFactories[StorageType(storageBackend.Type)] + if !found { + glog.Warningf("storage type %s not found", storageBackend.Type) + continue + } + backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id) + if buildErr != nil { + glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) + } + BackendStorages[storageBackend.Type+"."+storageBackend.Id] = backendStorage + if storageBackend.Id == "default" { + BackendStorages[storageBackend.Type] = backendStorage + } + } +} + +type Properties struct { + m map[string]string +} + +func newProperties(m map[string]string) *Properties { + return &Properties{m: m} +} + +func (p *Properties) GetString(key string) string { + if v, found := p.m[key]; found { + return v + } + return "" +} + +func ToPbStorageBackends() (backends []*master_pb.StorageBackend) { + for sName, s := range BackendStorages { + sType, sId := BackendNameToTypeId(sName) + if sType == "" { + continue + } + backends = append(backends, &master_pb.StorageBackend{ + Type: sType, + Id: sId, + Properties: s.ToProperties(), + }) + } + return +} + +func BackendNameToTypeId(backendName string) (backendType, backendId string) { + parts := strings.Split(backendName, ".") + if len(parts) == 1 { + return backendName, "default" + } + if len(parts) != 2 { + return + } + + backendType, backendId = parts[0], parts[1] + return +} diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go index 7f2b39d15..c4b3caffb 100644 --- a/weed/storage/backend/disk_file.go +++ b/weed/storage/backend/disk_file.go @@ -6,7 +6,7 @@ import ( ) var ( - _ DataStorageBackend = &DiskFile{} + _ BackendStorageFile = &DiskFile{} ) type DiskFile struct { @@ -45,6 +45,6 @@ func (df *DiskFile) GetStat() (datSize int64, modTime time.Time, err error) { return 0, time.Time{}, err } -func (df *DiskFile) String() string { +func (df *DiskFile) Name() string { return df.fullFilePath } diff --git a/weed/storage/backend/memory_map/memory_map_backend.go b/weed/storage/backend/memory_map/memory_map_backend.go index bac105022..03e7308d0 100644 --- a/weed/storage/backend/memory_map/memory_map_backend.go +++ b/weed/storage/backend/memory_map/memory_map_backend.go @@ -8,7 +8,7 @@ import ( ) var ( - _ backend.DataStorageBackend = &MemoryMappedFile{} + _ backend.BackendStorageFile = &MemoryMappedFile{} ) type MemoryMappedFile struct { @@ -55,6 +55,6 @@ func (mmf *MemoryMappedFile) GetStat() (datSize int64, modTime time.Time, err er return 0, time.Time{}, err } -func (mmf *MemoryMappedFile) String() string { +func (mmf *MemoryMappedFile) Name() string { return mmf.mm.File.Name() } diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 0ff7eca21..8d71861c2 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -2,119 +2,176 @@ package s3_backend import ( "fmt" + "io" + "os" "strings" "time" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/google/uuid" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/backend" - "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/chrislusf/seaweedfs/weed/util" -) - -var ( - _ backend.DataStorageBackend = &S3Backend{} ) func init() { - backend.StorageBackends = append(backend.StorageBackends, &S3Backend{}) + backend.BackendStorageFactories["s3"] = &S3BackendFactory{} } -type S3Backend struct { - conn s3iface.S3API - region string - bucket string - dir string - vid needle.VolumeId - key string +type S3BackendFactory struct { } -func (s3backend S3Backend) ReadAt(p []byte, off int64) (n int, err error) { - bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1) - getObjectOutput, getObjectErr := s3backend.conn.GetObject(&s3.GetObjectInput{ - Bucket: &s3backend.bucket, - Key: &s3backend.key, - Range: &bytesRange, - }) +func (factory *S3BackendFactory) StorageType() backend.StorageType { + return backend.StorageType("s3") +} +func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) { + return newS3BackendStorage(configuration, configPrefix, id) +} - if getObjectErr != nil { - return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backend.bucket, s3backend.key, getObjectErr) +type S3BackendStorage struct { + id string + aws_access_key_id string + aws_secret_access_key string + region string + bucket string + conn s3iface.S3API +} + +func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) { + s = &S3BackendStorage{} + s.id = id + s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id") + s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key") + s.region = configuration.GetString(configPrefix + "region") + s.bucket = configuration.GetString(configPrefix + "bucket") + s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region) + + glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) + return +} + +func (s *S3BackendStorage) ToProperties() map[string]string { + m := make(map[string]string) + m["aws_access_key_id"] = s.aws_access_key_id + m["aws_secret_access_key"] = s.aws_secret_access_key + m["region"] = s.region + m["bucket"] = s.bucket + return m +} + +func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) backend.BackendStorageFile { + if strings.HasPrefix(key, "/") { + key = key[1:] } - defer getObjectOutput.Body.Close() - return getObjectOutput.Body.Read(p) + f := &S3BackendStorageFile{ + backendStorage: s, + key: key, + tierInfo: tierInfo, + } + return f } -func (s3backend S3Backend) WriteAt(p []byte, off int64) (n int, err error) { - panic("implement me") +func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) { + randomUuid, _ := uuid.NewRandom() + key = randomUuid.String() + + glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) + + size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn) + + return } -func (s3backend S3Backend) Truncate(off int64) error { - panic("implement me") +func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) { + + glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key) + + size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn) + + return } -func (s3backend S3Backend) Close() error { - return nil +func (s *S3BackendStorage) DeleteFile(key string) (err error) { + + glog.V(1).Infof("delete dat file %s from remote", key) + + err = deleteFromS3(s.conn, s.bucket, key) + + return } -func (s3backend S3Backend) GetStat() (datSize int64, modTime time.Time, err error) { +type S3BackendStorageFile struct { + backendStorage *S3BackendStorage + key string + tierInfo *volume_server_pb.VolumeInfo +} - headObjectOutput, headObjectErr := s3backend.conn.HeadObject(&s3.HeadObjectInput{ - Bucket: &s3backend.bucket, - Key: &s3backend.key, +func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) { + + bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1) + + // glog.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange) + + getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{ + Bucket: &s3backendStorageFile.backendStorage.bucket, + Key: &s3backendStorageFile.key, + Range: &bytesRange, }) - if headObjectErr != nil { - return 0, time.Now(), fmt.Errorf("bucket %s HeadObject %s: %v", s3backend.bucket, s3backend.key, headObjectErr) + if getObjectErr != nil { + return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr) + } + defer getObjectOutput.Body.Close() + + glog.V(4).Infof("read %s %s", s3backendStorageFile.key, bytesRange) + glog.V(4).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength) + + for { + if n, err = getObjectOutput.Body.Read(p); err == nil && n < len(p) { + p = p[n:] + } else { + break + } } - datSize = int64(*headObjectOutput.ContentLength) - modTime = *headObjectOutput.LastModified + if err == io.EOF { + err = nil + } return } -func (s3backend S3Backend) String() string { - return fmt.Sprintf("%s/%s", s3backend.bucket, s3backend.key) +func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) { + panic("not implemented") } -func (s3backend *S3Backend) GetName() string { - return "s3" +func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error { + panic("not implemented") } -func (s3backend *S3Backend) GetSinkToDirectory() string { - return s3backend.dir +func (s3backendStorageFile S3BackendStorageFile) Close() error { + return nil } -func (s3backend *S3Backend) Initialize(configuration util.Configuration, vid needle.VolumeId) error { - glog.V(0).Infof("storage.backend.s3.region: %v", configuration.GetString("region")) - glog.V(0).Infof("storage.backend.s3.bucket: %v", configuration.GetString("bucket")) - glog.V(0).Infof("storage.backend.s3.directory: %v", configuration.GetString("directory")) +func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) { - return s3backend.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("bucket"), - configuration.GetString("directory"), - vid, - ) -} - -func (s3backend *S3Backend) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string, - vid needle.VolumeId) (err error) { - s3backend.region = region - s3backend.bucket = bucket - s3backend.dir = dir - s3backend.conn, err = createSession(awsAccessKeyId, awsSecretAccessKey, region) + files := s3backendStorageFile.tierInfo.GetFiles() - s3backend.vid = vid - s3backend.key = fmt.Sprintf("%s/%d.dat", dir, vid) - if strings.HasPrefix(s3backend.key, "/") { - s3backend.key = s3backend.key[1:] + if len(files) == 0 { + err = fmt.Errorf("remote file info not found") + return } - return err + datSize = int64(files[0].FileSize) + modTime = time.Unix(int64(files[0].ModifiedTime), 0) + + return +} + +func (s3backendStorageFile S3BackendStorageFile) Name() string { + return s3backendStorageFile.key } diff --git a/weed/storage/backend/s3_backend/s3_download.go b/weed/storage/backend/s3_backend/s3_download.go new file mode 100644 index 000000000..dbc28446a --- /dev/null +++ b/weed/storage/backend/s3_backend/s3_download.go @@ -0,0 +1,98 @@ +package s3_backend + +import ( + "fmt" + "os" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string, sourceKey string, + fn func(progressed int64, percentage float32) error) (fileSize int64, err error) { + + fileSize, err = getFileSize(sess, sourceBucket, sourceKey) + if err != nil { + return + } + + //open the file + f, err := os.OpenFile(destFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return 0, fmt.Errorf("failed to open file %q, %v", destFileName, err) + } + defer f.Close() + + // Create a downloader with the session and custom options + downloader := s3manager.NewDownloaderWithClient(sess, func(u *s3manager.Downloader) { + u.PartSize = int64(64 * 1024 * 1024) + u.Concurrency = 5 + }) + + fileWriter := &s3DownloadProgressedWriter{ + fp: f, + size: fileSize, + written: 0, + fn: fn, + } + + // Download the file from S3. + fileSize, err = downloader.Download(fileWriter, &s3.GetObjectInput{ + Bucket: aws.String(sourceBucket), + Key: aws.String(sourceKey), + }) + if err != nil { + return fileSize, fmt.Errorf("failed to download file %s: %v", destFileName, err) + } + + glog.V(1).Infof("downloaded file %s\n", destFileName) + + return +} + +// adapted from https://github.com/aws/aws-sdk-go/pull/1868 +// and https://petersouter.xyz/s3-download-progress-bar-in-golang/ +type s3DownloadProgressedWriter struct { + fp *os.File + size int64 + written int64 + fn func(progressed int64, percentage float32) error +} + +func (w *s3DownloadProgressedWriter) WriteAt(p []byte, off int64) (int, error) { + n, err := w.fp.WriteAt(p, off) + if err != nil { + return n, err + } + + // Got the length have read( or means has uploaded), and you can construct your message + atomic.AddInt64(&w.written, int64(n)) + + if w.fn != nil { + written := w.written + if err := w.fn(written, float32(written*100)/float32(w.size)); err != nil { + return n, err + } + } + + return n, err +} + +func getFileSize(svc s3iface.S3API, bucket string, key string) (filesize int64, error error) { + params := &s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + + resp, err := svc.HeadObject(params) + if err != nil { + return 0, err + } + + return *resp.ContentLength, nil +} diff --git a/weed/storage/backend/s3_backend/s3_sessions.go b/weed/storage/backend/s3_backend/s3_sessions.go index cd7b7ad47..5fdbcb66b 100644 --- a/weed/storage/backend/s3_backend/s3_sessions.go +++ b/weed/storage/backend/s3_backend/s3_sessions.go @@ -52,3 +52,11 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region string) (s3iface.S return t, nil } + +func deleteFromS3(sess s3iface.S3API, sourceBucket string, sourceKey string) (err error) { + _, err = sess.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(sourceBucket), + Key: aws.String(sourceKey), + }) + return err +} diff --git a/weed/storage/backend/s3_backend/s3_upload.go b/weed/storage/backend/s3_backend/s3_upload.go new file mode 100644 index 000000000..500a85590 --- /dev/null +++ b/weed/storage/backend/s3_backend/s3_upload.go @@ -0,0 +1,114 @@ +package s3_backend + +import ( + "fmt" + "os" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, + attributes map[string]string, + fn func(progressed int64, percentage float32) error) (fileSize int64, err error) { + + //open the file + f, err := os.Open(filename) + if err != nil { + return 0, fmt.Errorf("failed to open file %q, %v", filename, err) + } + defer f.Close() + + info, err := f.Stat() + if err != nil { + return 0, fmt.Errorf("failed to stat file %q, %v", filename, err) + } + + fileSize = info.Size() + + partSize := int64(64 * 1024 * 1024) // The minimum/default allowed part size is 5MB + for partSize*1000 < fileSize { + partSize *= 4 + } + + // Create an uploader with the session and custom options + uploader := s3manager.NewUploaderWithClient(sess, func(u *s3manager.Uploader) { + u.PartSize = partSize + u.Concurrency = 5 + }) + + fileReader := &s3UploadProgressedReader{ + fp: f, + size: fileSize, + read: -fileSize, + fn: fn, + } + + // process tagging + tags := "" + for k, v := range attributes { + if len(tags) > 0 { + tags = tags + "&" + } + tags = tags + k + "=" + v + } + + // Upload the file to S3. + var result *s3manager.UploadOutput + result, err = uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String(destBucket), + Key: aws.String(destKey), + Body: fileReader, + ACL: aws.String("private"), + ServerSideEncryption: aws.String("AES256"), + StorageClass: aws.String("STANDARD_IA"), + Tagging: aws.String(tags), + }) + + //in case it fails to upload + if err != nil { + return 0, fmt.Errorf("failed to upload file %s: %v", filename, err) + } + glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location) + + return +} + +// adapted from https://github.com/aws/aws-sdk-go/pull/1868 +type s3UploadProgressedReader struct { + fp *os.File + size int64 + read int64 + fn func(progressed int64, percentage float32) error +} + +func (r *s3UploadProgressedReader) Read(p []byte) (int, error) { + return r.fp.Read(p) +} + +func (r *s3UploadProgressedReader) ReadAt(p []byte, off int64) (int, error) { + n, err := r.fp.ReadAt(p, off) + if err != nil { + return n, err + } + + // Got the length have read( or means has uploaded), and you can construct your message + atomic.AddInt64(&r.read, int64(n)) + + if r.fn != nil { + read := r.read + if err := r.fn(read, float32(read*100)/float32(r.size)); err != nil { + return n, err + } + } + + return n, err +} + +func (r *s3UploadProgressedReader) Seek(offset int64, whence int) (int64, error) { + return r.fp.Seek(offset, whence) +} diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index c7faa57a6..f15303282 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -1,13 +1,12 @@ package storage import ( + "fmt" "io/ioutil" "os" "strings" "sync" - "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" @@ -17,7 +16,7 @@ type DiskLocation struct { Directory string MaxVolumeCount int volumes map[needle.VolumeId]*Volume - sync.RWMutex + volumesLock sync.RWMutex // erasure coding ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume @@ -33,8 +32,8 @@ func NewDiskLocation(dir string, maxVolumeCount int) *DiskLocation { func (l *DiskLocation) volumeIdFromPath(dir os.FileInfo) (needle.VolumeId, string, error) { name := dir.Name() - if !dir.IsDir() && strings.HasSuffix(name, ".dat") { - base := name[:len(name)-len(".dat")] + if !dir.IsDir() && strings.HasSuffix(name, ".idx") { + base := name[:len(name)-len(".idx")] collection, volumeId, err := parseCollectionVolumeId(base) return volumeId, collection, err } @@ -51,30 +50,39 @@ func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeI return collection, vol, err } -func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) { +func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) bool { name := fileInfo.Name() - if !fileInfo.IsDir() && strings.HasSuffix(name, ".dat") { + if !fileInfo.IsDir() && strings.HasSuffix(name, ".idx") { vid, collection, err := l.volumeIdFromPath(fileInfo) - if err == nil { - l.RLock() - _, found := l.volumes[vid] - l.RUnlock() - if !found { - if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0); e == nil { - l.Lock() - l.volumes[vid] = v - l.Unlock() - size, _, _ := v.FileStat() - glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", - l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) - // println("volume", vid, "last append at", v.lastAppendAtNs) - } else { - glog.V(0).Infof("new volume %s error %s", name, e) - } + if err != nil { + glog.Warningf("get volume id failed, %s, err : %s", name, err) + return false + } - } + // void loading one volume more than once + l.volumesLock.RLock() + _, found := l.volumes[vid] + l.volumesLock.RUnlock() + if found { + glog.V(1).Infof("loaded volume, %v", vid) + return true + } + + v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0) + if e != nil { + glog.V(0).Infof("new volume %s error %s", name, e) + return false } + + l.volumesLock.Lock() + l.volumes[vid] = v + l.volumesLock.Unlock() + size, _, _ := v.FileStat() + glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", + l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) + return true } + return false } func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrency int) { @@ -95,7 +103,7 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con go func() { defer wg.Done() for dir := range task_queue { - l.loadExistingVolume(dir, needleMapKind) + _ = l.loadExistingVolume(dir, needleMapKind) } }() } @@ -115,29 +123,46 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) { - l.Lock() - for k, v := range l.volumes { - if v.Collection == collection { - e = l.deleteVolumeById(k) - if e != nil { - l.Unlock() - return - } - } - } - l.Unlock() + l.volumesLock.Lock() + delVolsMap := l.unmountVolumeByCollection(collection) + l.volumesLock.Unlock() l.ecVolumesLock.Lock() - for k, v := range l.ecVolumes { - if v.Collection == collection { - e = l.deleteEcVolumeById(k) - if e != nil { - l.ecVolumesLock.Unlock() - return + delEcVolsMap := l.unmountEcVolumeByCollection(collection) + l.ecVolumesLock.Unlock() + + errChain := make(chan error, 2) + var wg sync.WaitGroup + wg.Add(2) + go func() { + for _, v := range delVolsMap { + if err := v.Destroy(); err != nil { + errChain <- err } } + wg.Done() + }() + + go func() { + for _, v := range delEcVolsMap { + v.Destroy() + } + wg.Done() + }() + + go func() { + wg.Wait() + close(errChain) + }() + + errBuilder := strings.Builder{} + for err := range errChain { + errBuilder.WriteString(err.Error()) + errBuilder.WriteString("; ") + } + if errBuilder.Len() > 0 { + e = fmt.Errorf(errBuilder.String()) } - l.ecVolumesLock.Unlock() return } @@ -156,22 +181,15 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) { } func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool { - if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { - for _, fileInfo := range fileInfos { - volId, _, err := l.volumeIdFromPath(fileInfo) - if vid == volId && err == nil { - l.loadExistingVolume(fileInfo, needleMapKind) - return true - } - } + if fileInfo, found := l.LocateVolume(vid); found { + return l.loadExistingVolume(fileInfo, needleMapKind) } - return false } func (l *DiskLocation) DeleteVolume(vid needle.VolumeId) error { - l.Lock() - defer l.Unlock() + l.volumesLock.Lock() + defer l.volumesLock.Unlock() _, ok := l.volumes[vid] if !ok { @@ -181,8 +199,8 @@ func (l *DiskLocation) DeleteVolume(vid needle.VolumeId) error { } func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error { - l.Lock() - defer l.Unlock() + l.volumesLock.Lock() + defer l.volumesLock.Unlock() v, ok := l.volumes[vid] if !ok { @@ -193,34 +211,48 @@ func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error { return nil } +func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume { + deltaVols := make(map[needle.VolumeId]*Volume, 0) + for k, v := range l.volumes { + if v.Collection == collectionName && !v.isCompacting { + deltaVols[k] = v + } + } + + for k := range deltaVols { + delete(l.volumes, k) + } + return deltaVols +} + func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) { - l.Lock() - defer l.Unlock() + l.volumesLock.Lock() + defer l.volumesLock.Unlock() l.volumes[vid] = volume } func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) { - l.RLock() - defer l.RUnlock() + l.volumesLock.RLock() + defer l.volumesLock.RUnlock() v, ok := l.volumes[vid] return v, ok } func (l *DiskLocation) VolumesLen() int { - l.RLock() - defer l.RUnlock() + l.volumesLock.RLock() + defer l.volumesLock.RUnlock() return len(l.volumes) } func (l *DiskLocation) Close() { - l.Lock() + l.volumesLock.Lock() for _, v := range l.volumes { v.Close() } - l.Unlock() + l.volumesLock.Unlock() l.ecVolumesLock.Lock() for _, ecVolume := range l.ecVolumes { @@ -230,3 +262,16 @@ func (l *DiskLocation) Close() { return } + +func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) { + if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { + for _, fileInfo := range fileInfos { + volId, _, err := l.volumeIdFromPath(fileInfo) + if vid == volId && err == nil { + return fileInfo, true + } + } + } + + return nil, false +} diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go index ba0824c6d..f6c44e966 100644 --- a/weed/storage/disk_location_ec.go +++ b/weed/storage/disk_location_ec.go @@ -169,3 +169,17 @@ func (l *DiskLocation) deleteEcVolumeById(vid needle.VolumeId) (e error) { delete(l.ecVolumes, vid) return } + +func (l *DiskLocation) unmountEcVolumeByCollection(collectionName string) map[needle.VolumeId]*erasure_coding.EcVolume { + deltaVols := make(map[needle.VolumeId]*erasure_coding.EcVolume, 0) + for k, v := range l.ecVolumes { + if v.Collection == collectionName { + deltaVols[k] = v + } + } + + for k, _ := range deltaVols { + delete(l.ecVolumes, k) + } + return deltaVols +} diff --git a/weed/storage/erasure_coding/ec_decoder.go b/weed/storage/erasure_coding/ec_decoder.go new file mode 100644 index 000000000..ae77cee3f --- /dev/null +++ b/weed/storage/erasure_coding/ec_decoder.go @@ -0,0 +1,198 @@ +package erasure_coding + +import ( + "fmt" + "io" + "os" + + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/idx" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +// write .idx file from .ecx and .ecj files +func WriteIdxFileFromEcIndex(baseFileName string) (err error) { + + ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644) + if openErr != nil { + return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) + } + defer ecxFile.Close() + + idxFile, openErr := os.OpenFile(baseFileName+".idx", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if openErr != nil { + return fmt.Errorf("cannot open %s.idx: %v", baseFileName, openErr) + } + defer idxFile.Close() + + io.Copy(idxFile, ecxFile) + + err = iterateEcjFile(baseFileName, func(key types.NeedleId) error { + + bytes := needle_map.ToBytes(key, types.Offset{}, types.TombstoneFileSize) + idxFile.Write(bytes) + + return nil + }) + + return err +} + +// FindDatFileSize calculate .dat file size from max offset entry +// there may be extra deletions after that entry +// but they are deletions anyway +func FindDatFileSize(baseFileName string) (datSize int64, err error) { + + version, err := readEcVolumeVersion(baseFileName) + if err != nil { + return 0, fmt.Errorf("read ec volume %s version: %v", baseFileName, err) + } + + err = iterateEcxFile(baseFileName, func(key types.NeedleId, offset types.Offset, size uint32) error { + + if size == types.TombstoneFileSize { + return nil + } + + entryStopOffset := offset.ToAcutalOffset() + needle.GetActualSize(size, version) + if datSize < entryStopOffset { + datSize = entryStopOffset + } + + return nil + }) + + return +} + +func readEcVolumeVersion(baseFileName string) (version needle.Version, err error) { + + // find volume version + datFile, err := os.OpenFile(baseFileName+".ec00", os.O_RDONLY, 0644) + if err != nil { + return 0, fmt.Errorf("open ec volume %s superblock: %v", baseFileName, err) + } + datBackend := backend.NewDiskFile(datFile) + + superBlock, err := super_block.ReadSuperBlock(datBackend) + datBackend.Close() + if err != nil { + return 0, fmt.Errorf("read ec volume %s superblock: %v", baseFileName, err) + } + + return superBlock.Version, nil + +} + +func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size uint32) error) error { + ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644) + if openErr != nil { + return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) + } + defer ecxFile.Close() + + buf := make([]byte, types.NeedleMapEntrySize) + for { + n, err := ecxFile.Read(buf) + if n != types.NeedleMapEntrySize { + if err == io.EOF { + return nil + } + return err + } + key, offset, size := idx.IdxFileEntry(buf) + if processNeedleFn != nil { + err = processNeedleFn(key, offset, size) + } + if err != nil { + if err != io.EOF { + return err + } + return nil + } + } + +} + +func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error { + ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644) + if openErr != nil { + return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) + } + defer ecjFile.Close() + + buf := make([]byte, types.NeedleIdSize) + for { + n, err := ecjFile.Read(buf) + if n != types.NeedleIdSize { + if err == io.EOF { + return nil + } + return err + } + if processNeedleFn != nil { + err = processNeedleFn(types.BytesToNeedleId(buf)) + } + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + +} + +// WriteDatFile generates .dat from from .ec00 ~ .ec09 files +func WriteDatFile(baseFileName string, datFileSize int64) error { + + datFile, openErr := os.OpenFile(baseFileName+".dat", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if openErr != nil { + return fmt.Errorf("cannot write volume %s.dat: %v", baseFileName, openErr) + } + defer datFile.Close() + + inputFiles := make([]*os.File, DataShardsCount) + + for shardId := 0; shardId < DataShardsCount; shardId++ { + shardFileName := baseFileName + ToExt(shardId) + inputFiles[shardId], openErr = os.OpenFile(shardFileName, os.O_RDONLY, 0) + if openErr != nil { + return openErr + } + defer inputFiles[shardId].Close() + } + + for datFileSize >= DataShardsCount*ErasureCodingLargeBlockSize { + for shardId := 0; shardId < DataShardsCount; shardId++ { + w, err := io.CopyN(datFile, inputFiles[shardId], ErasureCodingLargeBlockSize) + if w != ErasureCodingLargeBlockSize { + return fmt.Errorf("copy %s large block %d: %v", baseFileName, shardId, err) + } + datFileSize -= ErasureCodingLargeBlockSize + } + } + + for datFileSize > 0 { + for shardId := 0; shardId < DataShardsCount; shardId++ { + toRead := min(datFileSize, ErasureCodingSmallBlockSize) + w, err := io.CopyN(datFile, inputFiles[shardId], toRead) + if w != toRead { + return fmt.Errorf("copy %s small block %d: %v", baseFileName, shardId, err) + } + datFileSize -= toRead + } + } + + return nil +} + +func min(x, y int64) int64 { + if x > y { + return y + } + return x +} diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index 97010a1ed..97c3ccbd9 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -5,12 +5,13 @@ import ( "io" "os" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/klauspost/reedsolomon" ) const ( @@ -21,35 +22,38 @@ const ( ErasureCodingSmallBlockSize = 1024 * 1024 // 1MB ) -// WriteSortedEcxFile generates .ecx file from existing .idx file +// WriteSortedFileFromIdx generates .ecx file from existing .idx file // all keys are sorted in ascending order -func WriteSortedEcxFile(baseFileName string) (e error) { +func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { - cm, err := readCompactMap(baseFileName) + nm, err := readNeedleMap(baseFileName) + if nm != nil { + defer nm.Close() + } if err != nil { - return fmt.Errorf("readCompactMap: %v", err) + return fmt.Errorf("readNeedleMap: %v", err) } - ecxFile, err := os.OpenFile(baseFileName+".ecx", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + ecxFile, err := os.OpenFile(baseFileName+ext, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("failed to open ecx file: %v", err) } defer ecxFile.Close() - err = cm.AscendingVisit(func(value needle_map.NeedleValue) error { + err = nm.AscendingVisit(func(value needle_map.NeedleValue) error { bytes := value.ToBytes() _, writeErr := ecxFile.Write(bytes) return writeErr }) if err != nil { - return fmt.Errorf("failed to visit ecx file: %v", err) + return fmt.Errorf("failed to visit idx file: %v", err) } return nil } -// WriteEcFiles generates .ec01 ~ .ec14 files +// WriteEcFiles generates .ec00 ~ .ec13 files func WriteEcFiles(baseFileName string) error { return generateEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize) } @@ -195,7 +199,7 @@ func encodeDatFile(remainingSize int64, err error, baseFileName string, bufferSi } buffers := make([][]byte, TotalShardsCount) - for i, _ := range buffers { + for i := range buffers { buffers[i] = make([]byte, bufferSize) } @@ -232,7 +236,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } buffers := make([][]byte, TotalShardsCount) - for i, _ := range buffers { + for i := range buffers { if shardHasData[i] { buffers[i] = make([]byte, ErasureCodingSmallBlockSize) } @@ -280,14 +284,14 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } -func readCompactMap(baseFileName string) (*needle_map.CompactMap, error) { +func readNeedleMap(baseFileName string) (*needle_map.MemDb, error) { indexFile, err := os.OpenFile(baseFileName+".idx", os.O_RDONLY, 0644) if err != nil { return nil, fmt.Errorf("cannot read Volume Index %s.idx: %v", baseFileName, err) } defer indexFile.Close() - cm := needle_map.NewCompactMap() + cm := needle_map.NewMemDb() err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { if !offset.IsZero() && size != types.TombstoneFileSize { cm.Set(key, offset, size) diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go index 57df09525..92b83cdc8 100644 --- a/weed/storage/erasure_coding/ec_test.go +++ b/weed/storage/erasure_coding/ec_test.go @@ -7,9 +7,10 @@ import ( "os" "testing" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/klauspost/reedsolomon" ) const ( @@ -26,14 +27,14 @@ func TestEncodingDecoding(t *testing.T) { t.Logf("generateEcFiles: %v", err) } - err = WriteSortedEcxFile(baseFileName) + err = WriteSortedFileFromIdx(baseFileName, ".ecx") if err != nil { - t.Logf("WriteSortedEcxFile: %v", err) + t.Logf("WriteSortedFileFromIdx: %v", err) } err = validateFiles(baseFileName) if err != nil { - t.Logf("WriteSortedEcxFile: %v", err) + t.Logf("WriteSortedFileFromIdx: %v", err) } removeGeneratedFiles(baseFileName) @@ -41,9 +42,10 @@ func TestEncodingDecoding(t *testing.T) { } func validateFiles(baseFileName string) error { - cm, err := readCompactMap(baseFileName) + nm, err := readNeedleMap(baseFileName) + defer nm.Close() if err != nil { - return fmt.Errorf("readCompactMap: %v", err) + return fmt.Errorf("readNeedleMap: %v", err) } datFile, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0) @@ -60,7 +62,7 @@ func validateFiles(baseFileName string) error { ecFiles, err := openEcFiles(baseFileName, true) defer closeEcFiles(ecFiles) - err = cm.AscendingVisit(func(value needle_map.NeedleValue) error { + err = nm.AscendingVisit(func(value needle_map.NeedleValue) error { return assertSame(datFile, fi.Size(), ecFiles, value.Offset, value.Size) }) if err != nil { diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index bcae164ca..3d9aa2cff 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -9,7 +9,9 @@ import ( "sync" "time" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/types" @@ -56,6 +58,14 @@ func NewEcVolume(dir string, collection string, vid needle.VolumeId) (ev *EcVolu return nil, fmt.Errorf("cannot open ec volume journal %s.ecj: %v", baseFileName, err) } + // read volume info + ev.Version = needle.Version3 + if volumeInfo, found, _ := pb.MaybeLoadVolumeInfo(baseFileName + ".vif"); found { + ev.Version = needle.Version(volumeInfo.Version) + } else { + pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) + } + ev.ShardLocations = make(map[ShardId][]string) return @@ -126,6 +136,7 @@ func (ev *EcVolume) Destroy() { } os.Remove(ev.FileName() + ".ecx") os.Remove(ev.FileName() + ".ecj") + os.Remove(ev.FileName() + ".vif") } func (ev *EcVolume) FileName() string { @@ -186,10 +197,10 @@ func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle. } func (ev *EcVolume) FindNeedleFromEcx(needleId types.NeedleId) (offset types.Offset, size uint32, err error) { - return searchNeedleFromEcx(ev.ecxFile, ev.ecxFileSize, needleId, nil) + return SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, nil) } -func searchNeedleFromEcx(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size uint32, err error) { +func SearchNeedleFromSortedIndex(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size uint32, err error) { var key types.NeedleId buf := make([]byte, types.NeedleMapEntrySize) l, h := int64(0), ecxFileSize/types.NeedleMapEntrySize diff --git a/weed/storage/erasure_coding/ec_volume_delete.go b/weed/storage/erasure_coding/ec_volume_delete.go index 04102ec9e..822a9e923 100644 --- a/weed/storage/erasure_coding/ec_volume_delete.go +++ b/weed/storage/erasure_coding/ec_volume_delete.go @@ -10,15 +10,15 @@ import ( ) var ( - markNeedleDeleted = func(file *os.File, offset int64) error { + MarkNeedleDeleted = func(file *os.File, offset int64) error { b := make([]byte, types.SizeSize) util.Uint32toBytes(b, types.TombstoneFileSize) n, err := file.WriteAt(b, offset+types.NeedleIdSize+types.OffsetSize) if err != nil { - return fmt.Errorf("ecx write error: %v", err) + return fmt.Errorf("sorted needle write error: %v", err) } if n != types.SizeSize { - return fmt.Errorf("ecx written %d bytes, expecting %d", n, types.SizeSize) + return fmt.Errorf("sorted needle written %d bytes, expecting %d", n, types.SizeSize) } return nil } @@ -26,7 +26,7 @@ var ( func (ev *EcVolume) DeleteNeedleFromEcx(needleId types.NeedleId) (err error) { - _, _, err = searchNeedleFromEcx(ev.ecxFile, ev.ecxFileSize, needleId, markNeedleDeleted) + _, _, err = SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, MarkNeedleDeleted) if err != nil { if err == NotFoundError { @@ -81,7 +81,7 @@ func RebuildEcxFile(baseFileName string) error { needleId := types.BytesToNeedleId(buf) - _, _, err = searchNeedleFromEcx(ecxFile, ecxFileSize, needleId, markNeedleDeleted) + _, _, err = SearchNeedleFromSortedIndex(ecxFile, ecxFileSize, needleId, MarkNeedleDeleted) if err != nil && err != NotFoundError { ecxFile.Close() diff --git a/weed/storage/erasure_coding/ec_volume_info.go b/weed/storage/erasure_coding/ec_volume_info.go index c9e85c662..8ff65bb0f 100644 --- a/weed/storage/erasure_coding/ec_volume_info.go +++ b/weed/storage/erasure_coding/ec_volume_info.go @@ -81,6 +81,15 @@ func (b ShardBits) ShardIds() (ret []ShardId) { return } +func (b ShardBits) ToUint32Slice() (ret []uint32) { + for i := uint32(0); i < TotalShardsCount; i++ { + if b.HasShardId(ShardId(i)) { + ret = append(ret, i) + } + } + return +} + func (b ShardBits) ShardIdCount() (count int) { for count = 0; b > 0; count++ { b &= b - 1 @@ -95,3 +104,10 @@ func (b ShardBits) Minus(other ShardBits) ShardBits { func (b ShardBits) Plus(other ShardBits) ShardBits { return b | other } + +func (b ShardBits) MinusParityShards() ShardBits { + for i := DataShardsCount; i < TotalShardsCount; i++ { + b = b.RemoveShardId(ShardId(i)) + } + return b +} diff --git a/weed/storage/needle/crc.go b/weed/storage/needle/crc.go index 00ea1db69..6fd910bb7 100644 --- a/weed/storage/needle/crc.go +++ b/weed/storage/needle/crc.go @@ -1,11 +1,11 @@ package needle import ( - "crypto/md5" "fmt" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/klauspost/crc32" + + "github.com/chrislusf/seaweedfs/weed/util" ) var table = crc32.MakeTable(crc32.Castagnoli) @@ -29,13 +29,3 @@ func (n *Needle) Etag() string { util.Uint32toBytes(bits, uint32(n.Checksum)) return fmt.Sprintf("%x", bits) } - -func (n *Needle) MD5() string { - - hash := md5.New() - - hash.Write(n.Data) - - return fmt.Sprintf("%x", hash.Sum(nil)) - -} diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go index 2f03ba87b..d3969e868 100644 --- a/weed/storage/needle/needle.go +++ b/weed/storage/needle/needle.go @@ -8,8 +8,6 @@ import ( "strings" "time" - "io/ioutil" - "github.com/chrislusf/seaweedfs/weed/images" . "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -50,53 +48,28 @@ func (n *Needle) String() (str string) { return } -func ParseUpload(r *http.Request) ( - fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, originalDataSize int, - modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) { - pairMap = make(map[string]string) - for k, v := range r.Header { - if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) { - pairMap[k] = v[0] - } - } - - if r.Method == "POST" { - fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r) - } else { - isGzipped = false - mimeType = r.Header.Get("Content-Type") - fileName = "" - data, e = ioutil.ReadAll(r.Body) - originalDataSize = len(data) - } - if e != nil { - return - } - - modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64) - ttl, _ = ReadTTL(r.FormValue("ttl")) - - return -} -func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle, originalSize int, e error) { - var pairMap map[string]string - fname, mimeType, isGzipped, isChunkedFile := "", "", false, false +func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, e error) { n = new(Needle) - fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r) + pu, e := ParseUpload(r, sizeLimit) if e != nil { return } - if len(fname) < 256 { - n.Name = []byte(fname) + n.Data = pu.Data + originalSize = pu.OriginalDataSize + n.LastModified = pu.ModifiedTime + n.Ttl = pu.Ttl + + if len(pu.FileName) < 256 { + n.Name = []byte(pu.FileName) n.SetHasName() } - if len(mimeType) < 256 { - n.Mime = []byte(mimeType) + if len(pu.MimeType) < 256 { + n.Mime = []byte(pu.MimeType) n.SetHasMime() } - if len(pairMap) != 0 { + if len(pu.PairMap) != 0 { trimmedPairMap := make(map[string]string) - for k, v := range pairMap { + for k, v := range pu.PairMap { trimmedPairMap[k[len(PairNamePrefix):]] = v } @@ -107,7 +80,7 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle n.SetHasPairs() } } - if isGzipped { + if pu.IsGzipped { n.SetGzipped() } if n.LastModified == 0 { @@ -118,13 +91,13 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle n.SetHasTtl() } - if isChunkedFile { + if pu.IsChunkedFile { n.SetIsChunkManifest() } if fixJpgOrientation { - loweredName := strings.ToLower(fname) - if mimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") { + loweredName := strings.ToLower(pu.FileName) + if pu.MimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") { n.Data = images.FixJpgOrientation(n.Data) } } diff --git a/weed/storage/needle/needle_parse_multipart.go b/weed/storage/needle/needle_parse_multipart.go deleted file mode 100644 index 8be1a1da4..000000000 --- a/weed/storage/needle/needle_parse_multipart.go +++ /dev/null @@ -1,109 +0,0 @@ -package needle - -import ( - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" - - "io" - "io/ioutil" - "mime" - "net/http" - "path" - "strconv" - "strings" -) - -func parseMultipart(r *http.Request) ( - fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) { - defer func() { - if e != nil && r.Body != nil { - io.Copy(ioutil.Discard, r.Body) - r.Body.Close() - } - }() - form, fe := r.MultipartReader() - if fe != nil { - glog.V(0).Infoln("MultipartReader [ERROR]", fe) - e = fe - return - } - - //first multi-part item - part, fe := form.NextPart() - if fe != nil { - glog.V(0).Infoln("Reading Multi part [ERROR]", fe) - e = fe - return - } - - fileName = part.FileName() - if fileName != "" { - fileName = path.Base(fileName) - } - - data, e = ioutil.ReadAll(part) - if e != nil { - glog.V(0).Infoln("Reading Content [ERROR]", e) - return - } - - //if the filename is empty string, do a search on the other multi-part items - for fileName == "" { - part2, fe := form.NextPart() - if fe != nil { - break // no more or on error, just safely break - } - - fName := part2.FileName() - - //found the first multi-part has filename - if fName != "" { - data2, fe2 := ioutil.ReadAll(part2) - if fe2 != nil { - glog.V(0).Infoln("Reading Content [ERROR]", fe2) - e = fe2 - return - } - - //update - data = data2 - fileName = path.Base(fName) - break - } - } - - originalDataSize = len(data) - - isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) - - if !isChunkedFile { - - dotIndex := strings.LastIndex(fileName, ".") - ext, mtype := "", "" - if dotIndex > 0 { - ext = strings.ToLower(fileName[dotIndex:]) - mtype = mime.TypeByExtension(ext) - } - contentType := part.Header.Get("Content-Type") - if contentType != "" && mtype != contentType { - mimeType = contentType //only return mime type if not deductable - mtype = contentType - } - - if part.Header.Get("Content-Encoding") == "gzip" { - if unzipped, e := util.UnGzipData(data); e == nil { - originalDataSize = len(unzipped) - } - isGzipped = true - } else if util.IsGzippable(ext, mtype, data) { - if compressedData, err := util.GzipData(data); err == nil { - if len(data) > len(compressedData) { - data = compressedData - isGzipped = true - } - } - } - } - - return -} diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go new file mode 100644 index 000000000..85526aaa8 --- /dev/null +++ b/weed/storage/needle/needle_parse_upload.go @@ -0,0 +1,166 @@ +package needle + +import ( + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "path" + "strconv" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type ParsedUpload struct { + FileName string + Data []byte + MimeType string + PairMap map[string]string + IsGzipped bool + OriginalDataSize int + ModifiedTime uint64 + Ttl *TTL + IsChunkedFile bool + UncompressedData []byte +} + +func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) { + pu = &ParsedUpload{} + pu.PairMap = make(map[string]string) + for k, v := range r.Header { + if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) { + pu.PairMap[k] = v[0] + } + } + + if r.Method == "POST" { + e = parseMultipart(r, sizeLimit, pu) + } else { + e = parsePut(r, sizeLimit, pu) + } + if e != nil { + return + } + + pu.ModifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64) + pu.Ttl, _ = ReadTTL(r.FormValue("ttl")) + + pu.OriginalDataSize = len(pu.Data) + pu.UncompressedData = pu.Data + if pu.IsGzipped { + if unzipped, e := util.UnGzipData(pu.Data); e == nil { + pu.OriginalDataSize = len(unzipped) + pu.UncompressedData = unzipped + } + } else if shouldGzip, _ := util.IsGzippableFileType("", pu.MimeType); shouldGzip { + if compressedData, err := util.GzipData(pu.Data); err == nil { + pu.Data = compressedData + pu.IsGzipped = true + } + } + + return +} + +func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { + pu.IsGzipped = r.Header.Get("Content-Encoding") == "gzip" + pu.MimeType = r.Header.Get("Content-Type") + pu.FileName = "" + pu.Data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1)) + if e == io.EOF || int64(pu.OriginalDataSize) == sizeLimit+1 { + io.Copy(ioutil.Discard, r.Body) + } + r.Body.Close() + return nil +} + +func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { + defer func() { + if e != nil && r.Body != nil { + io.Copy(ioutil.Discard, r.Body) + r.Body.Close() + } + }() + form, fe := r.MultipartReader() + if fe != nil { + glog.V(0).Infoln("MultipartReader [ERROR]", fe) + e = fe + return + } + + //first multi-part item + part, fe := form.NextPart() + if fe != nil { + glog.V(0).Infoln("Reading Multi part [ERROR]", fe) + e = fe + return + } + + pu.FileName = part.FileName() + if pu.FileName != "" { + pu.FileName = path.Base(pu.FileName) + } + + pu.Data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1)) + if e != nil { + glog.V(0).Infoln("Reading Content [ERROR]", e) + return + } + if len(pu.Data) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } + + //if the filename is empty string, do a search on the other multi-part items + for pu.FileName == "" { + part2, fe := form.NextPart() + if fe != nil { + break // no more or on error, just safely break + } + + fName := part2.FileName() + + //found the first multi-part has filename + if fName != "" { + data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1)) + if fe2 != nil { + glog.V(0).Infoln("Reading Content [ERROR]", fe2) + e = fe2 + return + } + if len(data2) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } + + //update + pu.Data = data2 + pu.FileName = path.Base(fName) + break + } + } + + pu.IsChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) + + if !pu.IsChunkedFile { + + dotIndex := strings.LastIndex(pu.FileName, ".") + ext, mtype := "", "" + if dotIndex > 0 { + ext = strings.ToLower(pu.FileName[dotIndex:]) + mtype = mime.TypeByExtension(ext) + } + contentType := part.Header.Get("Content-Type") + if contentType != "" && contentType != "application/octet-stream" && mtype != contentType { + pu.MimeType = contentType //only return mime type if not deductable + mtype = contentType + } + + pu.IsGzipped = part.Header.Get("Content-Encoding") == "gzip" + } + + return +} diff --git a/weed/storage/needle/needle_read_write.go b/weed/storage/needle/needle_read_write.go index 8e5d18b1a..7f8aa4823 100644 --- a/weed/storage/needle/needle_read_write.go +++ b/weed/storage/needle/needle_read_write.go @@ -125,13 +125,13 @@ func (n *Needle) prepareWriteBuffer(version Version) ([]byte, uint32, int64, err return writeBytes, 0, 0, fmt.Errorf("Unsupported Version! (%d)", version) } -func (n *Needle) Append(w backend.DataStorageBackend, version Version) (offset uint64, size uint32, actualSize int64, err error) { +func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset uint64, size uint32, actualSize int64, err error) { if end, _, e := w.GetStat(); e == nil { - defer func(w backend.DataStorageBackend, off int64) { + defer func(w backend.BackendStorageFile, off int64) { if err != nil { if te := w.Truncate(end); te != nil { - glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.String(), end, te) + glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) } } }(w, end) @@ -150,7 +150,7 @@ func (n *Needle) Append(w backend.DataStorageBackend, version Version) (offset u return offset, size, actualSize, err } -func ReadNeedleBlob(r backend.DataStorageBackend, offset int64, size uint32, version Version) (dataSlice []byte, err error) { +func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size uint32, version Version) (dataSlice []byte, err error) { dataSize := GetActualSize(size, version) dataSlice = make([]byte, int(dataSize)) @@ -191,7 +191,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size uint32, version Vers } // ReadData hydrates the needle from the file, with only n.Id is set. -func (n *Needle) ReadData(r backend.DataStorageBackend, offset int64, size uint32, version Version) (err error) { +func (n *Needle) ReadData(r backend.BackendStorageFile, offset int64, size uint32, version Version) (err error) { bytes, err := ReadNeedleBlob(r, offset, size, version) if err != nil { return err @@ -266,7 +266,7 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) (err error) { return nil } -func ReadNeedleHeader(r backend.DataStorageBackend, version Version, offset int64) (n *Needle, bytes []byte, bodyLength int64, err error) { +func ReadNeedleHeader(r backend.BackendStorageFile, version Version, offset int64) (n *Needle, bytes []byte, bodyLength int64, err error) { n = new(Needle) if version == Version1 || version == Version2 || version == Version3 { bytes = make([]byte, NeedleHeaderSize) @@ -301,7 +301,7 @@ func NeedleBodyLength(needleSize uint32, version Version) int64 { //n should be a needle already read the header //the input stream will read until next file entry -func (n *Needle) ReadNeedleBody(r backend.DataStorageBackend, version Version, offset int64, bodyLength int64) (bytes []byte, err error) { +func (n *Needle) ReadNeedleBody(r backend.BackendStorageFile, version Version, offset int64, bodyLength int64) (bytes []byte, err error) { if bodyLength <= 0 { return nil, nil diff --git a/weed/storage/needle/volume_ttl.go b/weed/storage/needle/volume_ttl.go index 4a169870d..179057876 100644 --- a/weed/storage/needle/volume_ttl.go +++ b/weed/storage/needle/volume_ttl.go @@ -69,6 +69,9 @@ func (t *TTL) ToBytes(output []byte) { } func (t *TTL) ToUint32() (output uint32) { + if t == nil || t.Count == 0 { + return 0 + } output = uint32(t.Count) << 8 output += uint32(t.Unit) return output diff --git a/weed/storage/needle_map/btree_map.go b/weed/storage/needle_map/btree_map.go deleted file mode 100644 index a26c5e068..000000000 --- a/weed/storage/needle_map/btree_map.go +++ /dev/null @@ -1,53 +0,0 @@ -package needle_map - -import ( - . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/google/btree" -) - -//This map assumes mostly inserting increasing keys -type BtreeMap struct { - tree *btree.BTree -} - -func NewBtreeMap() *BtreeMap { - return &BtreeMap{ - tree: btree.New(32), - } -} - -func (cm *BtreeMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) { - found := cm.tree.ReplaceOrInsert(NeedleValue{key, offset, size}) - if found != nil { - old := found.(NeedleValue) - return old.Offset, old.Size - } - return -} - -func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) { - found := cm.tree.Delete(NeedleValue{key, Offset{}, 0}) - if found != nil { - old := found.(NeedleValue) - return old.Size - } - return -} -func (cm *BtreeMap) Get(key NeedleId) (*NeedleValue, bool) { - found := cm.tree.Get(NeedleValue{key, Offset{}, 0}) - if found != nil { - old := found.(NeedleValue) - return &old, true - } - return nil, false -} - -// Visit visits all entries or stop if any error when visiting -func (cm *BtreeMap) AscendingVisit(visit func(NeedleValue) error) (ret error) { - cm.tree.Ascend(func(item btree.Item) bool { - needle := item.(NeedleValue) - ret = visit(needle) - return ret == nil - }) - return ret -} diff --git a/weed/storage/needle_map/compact_map_test.go b/weed/storage/needle_map/compact_map_test.go index 3bad85727..7eea3969a 100644 --- a/weed/storage/needle_map/compact_map_test.go +++ b/weed/storage/needle_map/compact_map_test.go @@ -8,7 +8,14 @@ import ( func TestOverflow2(t *testing.T) { m := NewCompactMap() - m.Set(NeedleId(150088), ToOffset(8), 3000073) + _, oldSize := m.Set(NeedleId(150088), ToOffset(8), 3000073) + if oldSize != 0 { + t.Fatalf("expecting no previous data") + } + _, oldSize = m.Set(NeedleId(150088), ToOffset(8), 3000073) + if oldSize != 3000073 { + t.Fatalf("expecting previous data size is %d, not %d", 3000073, oldSize) + } m.Set(NeedleId(150073), ToOffset(8), 3000073) m.Set(NeedleId(150089), ToOffset(8), 3000073) m.Set(NeedleId(150076), ToOffset(8), 3000073) diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go new file mode 100644 index 000000000..a52d52a10 --- /dev/null +++ b/weed/storage/needle_map/memdb.go @@ -0,0 +1,119 @@ +package needle_map + +import ( + "fmt" + "os" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/idx" + . "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" +) + +//This map uses in memory level db +type MemDb struct { + db *leveldb.DB +} + +func NewMemDb() *MemDb { + opts := &opt.Options{} + + var err error + t := &MemDb{} + if t.db, err = leveldb.Open(storage.NewMemStorage(), opts); err != nil { + glog.V(0).Infof("MemDb fails to open: %v", err) + return nil + } + + return t +} + +func (cm *MemDb) Set(key NeedleId, offset Offset, size uint32) error { + + bytes := ToBytes(key, offset, size) + + if err := cm.db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil { + return fmt.Errorf("failed to write temp leveldb: %v", err) + } + return nil +} + +func (cm *MemDb) Delete(key NeedleId) error { + bytes := make([]byte, NeedleIdSize) + NeedleIdToBytes(bytes, key) + return cm.db.Delete(bytes, nil) + +} +func (cm *MemDb) Get(key NeedleId) (*NeedleValue, bool) { + bytes := make([]byte, NeedleIdSize) + NeedleIdToBytes(bytes[0:NeedleIdSize], key) + data, err := cm.db.Get(bytes, nil) + if err != nil || len(data) != OffsetSize+SizeSize { + return nil, false + } + offset := BytesToOffset(data[0:OffsetSize]) + size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) + return &NeedleValue{Key: key, Offset: offset, Size: size}, true +} + +// Visit visits all entries or stop if any error when visiting +func (cm *MemDb) AscendingVisit(visit func(NeedleValue) error) (ret error) { + iter := cm.db.NewIterator(nil, nil) + for iter.Next() { + key := BytesToNeedleId(iter.Key()) + data := iter.Value() + offset := BytesToOffset(data[0:OffsetSize]) + size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) + + needle := NeedleValue{Key: key, Offset: offset, Size: size} + ret = visit(needle) + if ret != nil { + return + } + } + iter.Release() + ret = iter.Error() + + return +} + +func (cm *MemDb) SaveToIdx(idxName string) (ret error) { + idxFile, err := os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return + } + defer idxFile.Close() + + return cm.AscendingVisit(func(value NeedleValue) error { + if value.Offset.IsZero() || value.Size == TombstoneFileSize { + return nil + } + _, err := idxFile.Write(value.ToBytes()) + return err + }) + +} + +func (cm *MemDb) LoadFromIdx(idxName string) (ret error) { + idxFile, err := os.OpenFile(idxName, os.O_RDONLY, 0644) + if err != nil { + return + } + defer idxFile.Close() + + return idx.WalkIndexFile(idxFile, func(key NeedleId, offset Offset, size uint32) error { + if offset.IsZero() || size == TombstoneFileSize { + return cm.Delete(key) + } + return cm.Set(key, offset, size) + }) + +} + +func (cm *MemDb) Close() { + cm.db.Close() +} diff --git a/weed/storage/needle_map/memdb_test.go b/weed/storage/needle_map/memdb_test.go new file mode 100644 index 000000000..7b45d23f8 --- /dev/null +++ b/weed/storage/needle_map/memdb_test.go @@ -0,0 +1,23 @@ +package needle_map + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func BenchmarkMemDb(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + nm := NewMemDb() + + nid := types.NeedleId(345) + offset := types.Offset{ + OffsetHigher: types.OffsetHigher{}, + OffsetLower: types.OffsetLower{}, + } + nm.Set(nid, offset, 324) + nm.Close() + } + +} diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index ef8571e83..3bb258559 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -128,8 +128,17 @@ func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error { } func (m *LevelDbNeedleMap) Close() { - m.indexFile.Close() - m.db.Close() + indexFileName := m.indexFile.Name() + if err := m.indexFile.Sync(); err != nil { + glog.Warningf("sync file %s failed: %v", indexFileName, err) + } + if err := m.indexFile.Close(); err != nil { + glog.Warningf("close index file %s failed: %v", indexFileName, err) + } + + if err := m.db.Close(); err != nil { + glog.Warningf("close levelDB failed: %v", err) + } } func (m *LevelDbNeedleMap) Destroy() error { diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index ee639a7e6..84197912f 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -22,24 +22,11 @@ func NewCompactNeedleMap(file *os.File) *NeedleMap { return nm } -func NewBtreeNeedleMap(file *os.File) *NeedleMap { - nm := &NeedleMap{ - m: needle_map.NewBtreeMap(), - } - nm.indexFile = file - return nm -} - func LoadCompactNeedleMap(file *os.File) (*NeedleMap, error) { nm := NewCompactNeedleMap(file) return doLoading(file, nm) } -func LoadBtreeNeedleMap(file *os.File) (*NeedleMap, error) { - nm := NewBtreeNeedleMap(file) - return doLoading(file, nm) -} - func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { e := idx.WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error { nm.MaybeSetMaxFileKey(key) @@ -47,14 +34,12 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { nm.FileCounter++ nm.FileByteCounter = nm.FileByteCounter + uint64(size) oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size) - // glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize) if !oldOffset.IsZero() && oldSize != TombstoneFileSize { nm.DeletionCounter++ nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize) } } else { oldSize := nm.m.Delete(NeedleId(key)) - // glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize) nm.DeletionCounter++ nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize) } @@ -79,6 +64,10 @@ func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error { return nm.appendToIndexFile(key, offset, TombstoneFileSize) } func (nm *NeedleMap) Close() { + indexFileName := nm.indexFile.Name() + if err := nm.indexFile.Sync(); err != nil { + glog.Warningf("sync file %s failed, %v", indexFileName, err) + } _ = nm.indexFile.Close() } func (nm *NeedleMap) Destroy() error { diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go index 539f83a87..ae2177a30 100644 --- a/weed/storage/needle_map_metric_test.go +++ b/weed/storage/needle_map_metric_test.go @@ -1,17 +1,18 @@ package storage import ( - "github.com/chrislusf/seaweedfs/weed/glog" - . "github.com/chrislusf/seaweedfs/weed/storage/types" "io/ioutil" "math/rand" "testing" + + "github.com/chrislusf/seaweedfs/weed/glog" + . "github.com/chrislusf/seaweedfs/weed/storage/types" ) func TestFastLoadingNeedleMapMetrics(t *testing.T) { idxFile, _ := ioutil.TempFile("", "tmp.idx") - nm := NewBtreeNeedleMap(idxFile) + nm := NewCompactNeedleMap(idxFile) for i := 0; i < 10000; i++ { nm.Put(Uint64ToNeedleId(uint64(i+1)), Uint32ToOffset(uint32(0)), uint32(1)) diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go new file mode 100644 index 000000000..e6f9258f3 --- /dev/null +++ b/weed/storage/needle_map_sorted_file.go @@ -0,0 +1,105 @@ +package storage + +import ( + "os" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + . "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +type SortedFileNeedleMap struct { + baseNeedleMapper + baseFileName string + dbFile *os.File + dbFileSize int64 +} + +func NewSortedFileNeedleMap(baseFileName string, indexFile *os.File) (m *SortedFileNeedleMap, err error) { + m = &SortedFileNeedleMap{baseFileName: baseFileName} + m.indexFile = indexFile + fileName := baseFileName + ".sdx" + if !isSortedFileFresh(fileName, indexFile) { + glog.V(0).Infof("Start to Generate %s from %s", fileName, indexFile.Name()) + erasure_coding.WriteSortedFileFromIdx(baseFileName, ".sdx") + glog.V(0).Infof("Finished Generating %s from %s", fileName, indexFile.Name()) + } + glog.V(1).Infof("Opening %s...", fileName) + + if m.dbFile, err = os.Open(baseFileName + ".sdx"); err != nil { + return + } + dbStat, _ := m.dbFile.Stat() + m.dbFileSize = dbStat.Size() + glog.V(1).Infof("Loading %s...", indexFile.Name()) + mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) + if indexLoadError != nil { + return nil, indexLoadError + } + m.mapMetric = *mm + return +} + +func isSortedFileFresh(dbFileName string, indexFile *os.File) bool { + // normally we always write to index file first + dbFile, err := os.Open(dbFileName) + if err != nil { + return false + } + defer dbFile.Close() + dbStat, dbStatErr := dbFile.Stat() + indexStat, indexStatErr := indexFile.Stat() + if dbStatErr != nil || indexStatErr != nil { + glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) + return false + } + + return dbStat.ModTime().After(indexStat.ModTime()) +} + +func (m *SortedFileNeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue, ok bool) { + offset, size, err := erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, nil) + ok = err == nil + return &needle_map.NeedleValue{Key: key, Offset: offset, Size: size}, ok + +} + +func (m *SortedFileNeedleMap) Put(key NeedleId, offset Offset, size uint32) error { + return os.ErrInvalid +} + +func (m *SortedFileNeedleMap) Delete(key NeedleId, offset Offset) error { + + _, size, err := erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, nil) + + if err != nil { + if err == erasure_coding.NotFoundError { + return nil + } + return err + } + + if size == TombstoneFileSize { + return nil + } + + // write to index file first + if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil { + return err + } + _, _, err = erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, erasure_coding.MarkNeedleDeleted) + + return err +} + +func (m *SortedFileNeedleMap) Close() { + m.indexFile.Close() + m.dbFile.Close() +} + +func (m *SortedFileNeedleMap) Destroy() error { + m.Close() + os.Remove(m.indexFile.Name()) + return os.Remove(m.baseFileName + ".sdx") +} diff --git a/weed/storage/store.go b/weed/storage/store.go index 4d1061bed..e29680f6f 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -2,14 +2,19 @@ package storage import ( "fmt" + "path/filepath" + "strings" "sync/atomic" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" - "google.golang.org/grpc" ) const ( @@ -60,7 +65,7 @@ func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, di return } func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32) error { - rt, e := NewReplicaPlacementFromString(replicaPlacement) + rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement) if e != nil { return e } @@ -101,7 +106,7 @@ func (s *Store) FindFreeLocation() (ret *DiskLocation) { } return ret } -func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) error { +func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) error { if s.findVolume(vid) != nil { return fmt.Errorf("Volume Id %d already exists!", vid) } @@ -126,10 +131,10 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind return fmt.Errorf("No more free space left") } -func (s *Store) Status() []*VolumeInfo { +func (s *Store) VolumeInfos() []*VolumeInfo { var stats []*VolumeInfo for _, location := range s.Locations { - location.RLock() + location.volumesLock.RLock() for k, v := range location.volumes { s := &VolumeInfo{ Id: needle.VolumeId(k), @@ -140,13 +145,14 @@ func (s *Store) Status() []*VolumeInfo { FileCount: int(v.FileCount()), DeleteCount: int(v.DeletedCount()), DeletedByteCount: v.DeletedSize(), - ReadOnly: v.readOnly, + ReadOnly: v.noWriteOrDelete || v.noWriteCanDelete, Ttl: v.Ttl, CompactRevision: uint32(v.CompactionRevision), } + s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey() stats = append(stats, s) } - location.RUnlock() + location.volumesLock.RUnlock() } sortVolumeInfos(stats) return stats @@ -167,7 +173,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { for _, location := range s.Locations { var deleteVids []needle.VolumeId maxVolumeCount = maxVolumeCount + location.MaxVolumeCount - location.RLock() + location.volumesLock.RLock() for _, v := range location.volumes { if maxFileKey < v.MaxFileKey() { maxFileKey = v.MaxFileKey() @@ -184,16 +190,16 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { fileSize, _, _ := v.FileStat() collectionVolumeSize[v.Collection] += fileSize } - location.RUnlock() + location.volumesLock.RUnlock() if len(deleteVids) > 0 { // delete expired volumes. - location.Lock() + location.volumesLock.Lock() for _, vid := range deleteVids { location.deleteVolumeById(vid) glog.V(0).Infoln("volume", vid, "is deleted.") } - location.Unlock() + location.volumesLock.Unlock() } } @@ -223,11 +229,11 @@ func (s *Store) Close() { func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (size uint32, isUnchanged bool, err error) { if v := s.findVolume(i); v != nil { - if v.readOnly { + if v.noWriteOrDelete || v.noWriteCanDelete { err = fmt.Errorf("volume %d is read only", i) return } - if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(size, v.version)) { + if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(size, v.Version())) { _, size, isUnchanged, err = v.writeNeedle(n) } else { err = fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) @@ -241,10 +247,10 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (size uin func (s *Store) DeleteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (uint32, error) { if v := s.findVolume(i); v != nil { - if v.readOnly { + if v.noWriteOrDelete { return 0, fmt.Errorf("volume %d is read only", i) } - if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(0, v.version)) { + if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(0, v.Version())) { return v.deleteNeedle(n) } else { return 0, fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) @@ -273,7 +279,7 @@ func (s *Store) MarkVolumeReadonly(i needle.VolumeId) error { if v == nil { return fmt.Errorf("volume %d not found", i) } - v.readOnly = true + v.noWriteOrDelete = true return nil } @@ -343,6 +349,31 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error { return fmt.Errorf("volume %d not found on disk", i) } +func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error { + + for _, location := range s.Locations { + fileInfo, found := location.LocateVolume(i) + if !found { + continue + } + // load, modify, save + baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name())) + vifFile := filepath.Join(location.Directory, baseFileName+".vif") + volumeInfo, _, err := pb.MaybeLoadVolumeInfo(vifFile) + if err != nil { + return fmt.Errorf("volume %d fail to load vif", i) + } + volumeInfo.Replication = replication + err = pb.SaveVolumeInfo(vifFile, volumeInfo) + if err != nil { + return fmt.Errorf("volume %d fail to save vif", i) + } + return nil + } + + return fmt.Errorf("volume %d not found on disk", i) +} + func (s *Store) SetVolumeSizeLimit(x uint64) { atomic.StoreUint64(&s.volumeSizeLimit, x) } diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 7e3f1a46c..e423e7dca 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -8,6 +8,8 @@ import ( "sync" "time" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" @@ -16,7 +18,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/klauspost/reedsolomon" ) func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat { @@ -115,19 +116,11 @@ func (s *Store) DestroyEcVolume(vid needle.VolumeId) { } } -func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *needle.Needle) (int, error) { +func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, error) { for _, location := range s.Locations { if localEcVolume, found := location.FindEcVolume(vid); found { - // read the volume version - for localEcVolume.Version == 0 { - err := s.readEcVolumeVersion(ctx, vid, localEcVolume) - time.Sleep(1357 * time.Millisecond) - glog.V(0).Infof("ReadEcShardNeedle vid %d version:%v: %v", vid, localEcVolume.Version, err) - } - version := localEcVolume.Version - - offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n.Id, version) + offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n.Id, localEcVolume.Version) if err != nil { return 0, fmt.Errorf("locate in local ec volume: %v", err) } @@ -140,7 +133,7 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n if len(intervals) > 1 { glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals) } - bytes, isDeleted, err := s.readEcShardIntervals(ctx, vid, n.Id, localEcVolume, intervals) + bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals) if err != nil { return 0, fmt.Errorf("ReadEcShardIntervals: %v", err) } @@ -148,7 +141,7 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n return 0, fmt.Errorf("ec entry %s is deleted", n.Id) } - err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, version) + err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, localEcVolume.Version) if err != nil { return 0, fmt.Errorf("readbytes: %v", err) } @@ -159,30 +152,14 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n return 0, fmt.Errorf("ec shard %d not found", vid) } -func (s *Store) readEcVolumeVersion(ctx context.Context, vid needle.VolumeId, ecVolume *erasure_coding.EcVolume) (err error) { - - interval := erasure_coding.Interval{ - BlockIndex: 0, - InnerBlockOffset: 0, - Size: _SuperBlockSize, - IsLargeBlock: true, // it could be large block, but ok in this place - LargeBlockRowsCount: 0, - } - data, _, err := s.readEcShardIntervals(ctx, vid, 0, ecVolume, []erasure_coding.Interval{interval}) - if err == nil { - ecVolume.Version = needle.Version(data[0]) - } - return -} - -func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) { +func (s *Store) readEcShardIntervals(vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) { - if err = s.cachedLookupEcShardLocations(ctx, ecVolume); err != nil { + if err = s.cachedLookupEcShardLocations(ecVolume); err != nil { return nil, false, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterAddress, err) } for i, interval := range intervals { - if d, isDeleted, e := s.readOneEcShardInterval(ctx, needleId, ecVolume, interval); e != nil { + if d, isDeleted, e := s.readOneEcShardInterval(needleId, ecVolume, interval); e != nil { return nil, isDeleted, e } else { if isDeleted { @@ -198,7 +175,7 @@ func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, n return } -func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) { +func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) { shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize) data = make([]byte, interval.Size) if shard, found := ecVolume.FindEcVolumeShard(shardId); found { @@ -213,7 +190,7 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.Needl // try reading directly if hasShardIdLocation { - _, is_deleted, err = s.readRemoteEcShardInterval(ctx, sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset) + _, is_deleted, err = s.readRemoteEcShardInterval(sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset) if err == nil { return } @@ -222,7 +199,7 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.Needl } // try reading by recovering from other shards - _, is_deleted, err = s.recoverOneRemoteEcShardInterval(ctx, needleId, ecVolume, shardId, data, actualOffset) + _, is_deleted, err = s.recoverOneRemoteEcShardInterval(needleId, ecVolume, shardId, data, actualOffset) if err == nil { return } @@ -238,7 +215,7 @@ func forgetShardId(ecVolume *erasure_coding.EcVolume, shardId erasure_coding.Sha ecVolume.ShardLocationsLock.Unlock() } -func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *erasure_coding.EcVolume) (err error) { +func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume) (err error) { shardCount := len(ecVolume.ShardLocations) if shardCount < erasure_coding.DataShardsCount && @@ -257,7 +234,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras req := &master_pb.LookupEcVolumeRequest{ VolumeId: uint32(ecVolume.VolumeId), } - resp, err := masterClient.LookupEcVolume(ctx, req) + resp, err := masterClient.LookupEcVolume(context.Background(), req) if err != nil { return fmt.Errorf("lookup ec volume %d: %v", ecVolume.VolumeId, err) } @@ -281,7 +258,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras return } -func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) readRemoteEcShardInterval(sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { if len(sourceDataNodes) == 0 { return 0, false, fmt.Errorf("failed to find ec shard %d.%d", vid, shardId) @@ -289,7 +266,7 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ for _, sourceDataNode := range sourceDataNodes { glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) - n, is_deleted, err = s.doReadRemoteEcShardInterval(ctx, sourceDataNode, needleId, vid, shardId, buf, offset) + n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset) if err == nil { return } @@ -299,12 +276,12 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ return } -func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { // copy data slice - shardReadClient, err := client.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{ + shardReadClient, err := client.VolumeEcShardRead(context.Background(), &volume_server_pb.VolumeEcShardReadRequest{ VolumeId: uint32(vid), ShardId: uint32(shardId), Offset: offset, @@ -339,7 +316,7 @@ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode return } -func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) @@ -367,7 +344,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId ty go func(shardId erasure_coding.ShardId, locations []string) { defer wg.Done() data := make([]byte, len(buf)) - nRead, isDeleted, readErr := s.readRemoteEcShardInterval(ctx, locations, needleId, ecVolume.VolumeId, shardId, data, offset) + nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset) if readErr != nil { glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr) forgetShardId(ecVolume, shardId) diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go index e027d2887..4a75fb20b 100644 --- a/weed/storage/store_ec_delete.go +++ b/weed/storage/store_ec_delete.go @@ -12,9 +12,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/types" ) -func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) { +func (s *Store) DeleteEcShardNeedle(ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) { - count, err := s.ReadEcShardNeedle(ctx, ecVolume.VolumeId, n) + count, err := s.ReadEcShardNeedle(ecVolume.VolumeId, n) if err != nil { return 0, err @@ -24,7 +24,7 @@ func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_codin return 0, fmt.Errorf("unexpected cookie %x", cookie) } - if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx, ecVolume, n.Id); err != nil { + if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume, n.Id); err != nil { return 0, err } @@ -32,7 +32,7 @@ func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_codin } -func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { _, _, intervals, err := ecVolume.LocateEcShardNeedle(needleId, ecVolume.Version) @@ -43,13 +43,13 @@ func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, shardId, _ := intervals[0].ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize) hasDeletionSuccess := false - err = s.doDeleteNeedleFromRemoteEcShardServers(ctx, shardId, ecVolume, needleId) + err = s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId) if err == nil { hasDeletionSuccess = true } for shardId = erasure_coding.DataShardsCount; shardId < erasure_coding.TotalShardsCount; shardId++ { - if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(ctx, shardId, ecVolume, needleId); parityDeletionError == nil { + if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId); parityDeletionError == nil { hasDeletionSuccess = true } } @@ -62,7 +62,7 @@ func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, } -func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { ecVolume.ShardLocationsLock.RLock() sourceDataNodes, hasShardLocations := ecVolume.ShardLocations[shardId] @@ -74,7 +74,7 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar for _, sourceDataNode := range sourceDataNodes { glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode) - err := s.doDeleteNeedleFromRemoteEcShard(ctx, sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId) + err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId) if err != nil { return err } @@ -85,12 +85,12 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar } -func (s *Store) doDeleteNeedleFromRemoteEcShard(ctx context.Context, sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromRemoteEcShard(sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error { return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { // copy data slice - _, err := client.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{ + _, err := client.VolumeEcBlobDelete(context.Background(), &volume_server_pb.VolumeEcBlobDeleteRequest{ VolumeId: uint32(vid), Collection: collection, FileKey: uint64(needleId), diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index b1f1a6277..e94d9b516 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -16,7 +16,8 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) { } func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error { if v := s.findVolume(vid); v != nil { - return v.Compact(preallocate, compactionBytePerSecond) + return v.Compact2(preallocate) // compactionBytePerSecond + // return v.Compact(preallocate, compactionBytePerSecond) } return fmt.Errorf("volume id %d is not found during compact", vid) } diff --git a/weed/storage/replica_placement.go b/weed/storage/super_block/replica_placement.go similarity index 98% rename from weed/storage/replica_placement.go rename to weed/storage/super_block/replica_placement.go index c1aca52eb..fcccbba7d 100644 --- a/weed/storage/replica_placement.go +++ b/weed/storage/super_block/replica_placement.go @@ -1,4 +1,4 @@ -package storage +package super_block import ( "errors" diff --git a/weed/storage/replica_placement_test.go b/weed/storage/super_block/replica_placement_test.go similarity index 93% rename from weed/storage/replica_placement_test.go rename to weed/storage/super_block/replica_placement_test.go index 7968af7cb..7742ba548 100644 --- a/weed/storage/replica_placement_test.go +++ b/weed/storage/super_block/replica_placement_test.go @@ -1,4 +1,4 @@ -package storage +package super_block import ( "testing" diff --git a/weed/storage/super_block/super_block.go b/weed/storage/super_block/super_block.go new file mode 100644 index 000000000..f48cd0bdc --- /dev/null +++ b/weed/storage/super_block/super_block.go @@ -0,0 +1,69 @@ +package super_block + +import ( + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + SuperBlockSize = 8 +) + +/* +* Super block currently has 8 bytes allocated for each volume. +* Byte 0: version, 1 or 2 +* Byte 1: Replica Placement strategy, 000, 001, 002, 010, etc +* Byte 2 and byte 3: Time to live. See TTL for definition +* Byte 4 and byte 5: The number of times the volume has been compacted. +* Rest bytes: Reserved + */ +type SuperBlock struct { + Version needle.Version + ReplicaPlacement *ReplicaPlacement + Ttl *needle.TTL + CompactionRevision uint16 + Extra *master_pb.SuperBlockExtra + ExtraSize uint16 +} + +func (s *SuperBlock) BlockSize() int { + switch s.Version { + case needle.Version2, needle.Version3: + return SuperBlockSize + int(s.ExtraSize) + } + return SuperBlockSize +} + +func (s *SuperBlock) Bytes() []byte { + header := make([]byte, SuperBlockSize) + header[0] = byte(s.Version) + header[1] = s.ReplicaPlacement.Byte() + s.Ttl.ToBytes(header[2:4]) + util.Uint16toBytes(header[4:6], s.CompactionRevision) + + if s.Extra != nil { + extraData, err := proto.Marshal(s.Extra) + if err != nil { + glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) + } + extraSize := len(extraData) + if extraSize > 256*256-2 { + // reserve a couple of bits for future extension + glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) + } + s.ExtraSize = uint16(extraSize) + util.Uint16toBytes(header[6:8], s.ExtraSize) + + header = append(header, extraData...) + } + + return header +} + +func (s *SuperBlock) Initialized() bool { + return s.ReplicaPlacement != nil && s.Ttl != nil +} diff --git a/weed/storage/super_block/super_block_read.go.go b/weed/storage/super_block/super_block_read.go.go new file mode 100644 index 000000000..9eb12e116 --- /dev/null +++ b/weed/storage/super_block/super_block_read.go.go @@ -0,0 +1,44 @@ +package super_block + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// ReadSuperBlock reads from data file and load it into volume's super block +func ReadSuperBlock(datBackend backend.BackendStorageFile) (superBlock SuperBlock, err error) { + + header := make([]byte, SuperBlockSize) + if _, e := datBackend.ReadAt(header, 0); e != nil { + err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.Name(), e) + return + } + + superBlock.Version = needle.Version(header[0]) + if superBlock.ReplicaPlacement, err = NewReplicaPlacementFromByte(header[1]); err != nil { + err = fmt.Errorf("cannot read replica type: %s", err.Error()) + return + } + superBlock.Ttl = needle.LoadTTLFromBytes(header[2:4]) + superBlock.CompactionRevision = util.BytesToUint16(header[4:6]) + superBlock.ExtraSize = util.BytesToUint16(header[6:8]) + + if superBlock.ExtraSize > 0 { + // read more + extraData := make([]byte, int(superBlock.ExtraSize)) + superBlock.Extra = &master_pb.SuperBlockExtra{} + err = proto.Unmarshal(extraData, superBlock.Extra) + if err != nil { + err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.Name(), err) + return + } + } + + return +} diff --git a/weed/storage/volume_super_block_test.go b/weed/storage/super_block/super_block_test.go similarity index 86% rename from weed/storage/volume_super_block_test.go rename to weed/storage/super_block/super_block_test.go index 06ad8a5d3..25699070d 100644 --- a/weed/storage/volume_super_block_test.go +++ b/weed/storage/super_block/super_block_test.go @@ -1,4 +1,4 @@ -package storage +package super_block import ( "testing" @@ -10,7 +10,7 @@ func TestSuperBlockReadWrite(t *testing.T) { rp, _ := NewReplicaPlacementFromByte(byte(001)) ttl, _ := needle.ReadTTL("15d") s := &SuperBlock{ - version: needle.CurrentVersion, + Version: needle.CurrentVersion, ReplicaPlacement: rp, Ttl: ttl, } diff --git a/weed/storage/volume.go b/weed/storage/volume.go index e85696eab..7da83de7a 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -2,18 +2,19 @@ package storage import ( "fmt" + "path" + "strconv" + "sync" + "time" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" - "path" - "strconv" - "sync" - "time" - "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -21,15 +22,17 @@ type Volume struct { Id needle.VolumeId dir string Collection string - DataBackend backend.DataStorageBackend + DataBackend backend.BackendStorageFile nm NeedleMapper needleMapKind NeedleMapType - readOnly bool + noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete + noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete + hasRemoteFile bool // if the volume has a remote file MemoryMapMaxSizeMb uint32 - SuperBlock + super_block.SuperBlock - dataFileAccessLock sync.Mutex + dataFileAccessLock sync.RWMutex lastModifiedTsSeconds uint64 //unix time in seconds lastAppendAtNs uint64 //unix time in nanoseconds @@ -37,18 +40,20 @@ type Volume struct { lastCompactRevision uint16 isCompacting bool + + volumeInfo *volume_server_pb.VolumeInfo } -func NewVolume(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) { +func NewVolume(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) { // if replicaPlacement is nil, the superblock will be loaded from disk v = &Volume{dir: dirname, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb} - v.SuperBlock = SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl} + v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl} v.needleMapKind = needleMapKind e = v.load(true, true, needleMapKind, preallocate) return } func (v *Volume) String() string { - return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, readOnly:%v", v.Id, v.dir, v.Collection, v.DataBackend, v.nm, v.readOnly) + return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, noWrite:%v canDelete:%v", v.Id, v.dir, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete) } func VolumeFileName(dir string, collection string, id int) (fileName string) { @@ -65,12 +70,15 @@ func (v *Volume) FileName() (fileName string) { } func (v *Volume) Version() needle.Version { - return v.SuperBlock.Version() + if v.volumeInfo.Version != 0 { + v.SuperBlock.Version = needle.Version(v.volumeInfo.Version) + } + return v.SuperBlock.Version } func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.DataBackend == nil { return @@ -80,13 +88,13 @@ func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) if e == nil { return uint64(datFileSize), v.nm.IndexFileSize(), modTime } - glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.String(), e) + glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) return // -1 causes integer overflow and the volume to become unwritable. } func (v *Volume) ContentSize() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -94,8 +102,8 @@ func (v *Volume) ContentSize() uint64 { } func (v *Volume) DeletedSize() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -103,8 +111,8 @@ func (v *Volume) DeletedSize() uint64 { } func (v *Volume) FileCount() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -112,8 +120,8 @@ func (v *Volume) FileCount() uint64 { } func (v *Volume) DeletedCount() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -121,8 +129,8 @@ func (v *Volume) DeletedCount() uint64 { } func (v *Volume) MaxFileKey() types.NeedleId { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -130,8 +138,8 @@ func (v *Volume) MaxFileKey() types.NeedleId { } func (v *Volume) IndexFileSize() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -172,9 +180,9 @@ func (v *Volume) expired(volumeSizeLimit uint64) bool { if v.Ttl == nil || v.Ttl.Minutes() == 0 { return false } - glog.V(1).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds) + glog.V(2).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds) livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60 - glog.V(1).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes) + glog.V(2).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes) if int64(v.Ttl.Minutes()) < livedMinutes { return true } @@ -200,18 +208,32 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool { func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage { size, _, modTime := v.FileStat() - return &master_pb.VolumeInformationMessage{ + volumInfo := &master_pb.VolumeInformationMessage{ Id: uint32(v.Id), Size: size, Collection: v.Collection, - FileCount: uint64(v.FileCount()), - DeleteCount: uint64(v.DeletedCount()), + FileCount: v.FileCount(), + DeleteCount: v.DeletedCount(), DeletedByteCount: v.DeletedSize(), - ReadOnly: v.readOnly, + ReadOnly: v.noWriteOrDelete || v.noWriteCanDelete, ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), CompactRevision: uint32(v.SuperBlock.CompactionRevision), ModifiedAtSecond: modTime.Unix(), } + + volumInfo.RemoteStorageName, volumInfo.RemoteStorageKey = v.RemoteStorageNameKey() + + return volumInfo +} + +func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) { + if v.volumeInfo == nil { + return + } + if len(v.volumeInfo.GetFiles()) == 0 { + return + } + return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey() } diff --git a/weed/storage/volume_backup.go b/weed/storage/volume_backup.go index fe0506917..f7075fe2b 100644 --- a/weed/storage/volume_backup.go +++ b/weed/storage/volume_backup.go @@ -6,17 +6,19 @@ import ( "io" "os" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" - "google.golang.org/grpc" ) func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{} if datSize, _, err := v.DataBackend.GetStat(); err == nil { @@ -62,8 +64,6 @@ update needle map when receiving new .dat bytes. But seems not necessary now.) func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.DialOption) error { - ctx := context.Background() - startFromOffset, _, _ := v.FileStat() appendAtNs, err := v.findLastAppendAtNs() if err != nil { @@ -74,7 +74,7 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{ + stream, err := client.VolumeIncrementalCopy(context.Background(), &volume_server_pb.VolumeIncrementalCopyRequest{ VolumeId: uint32(v.Id), SinceNs: appendAtNs, }) @@ -108,7 +108,7 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial } // add to needle map - return ScanVolumeFileFrom(v.version, v.DataBackend, int64(startFromOffset), &VolumeFileScanner4GenIdx{v: v}) + return ScanVolumeFileFrom(v.Version(), v.DataBackend, int64(startFromOffset), &VolumeFileScanner4GenIdx{v: v}) } @@ -154,11 +154,11 @@ func (v *Volume) locateLastAppendEntry() (Offset, error) { func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) { - n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.version, offset.ToAcutalOffset()) + n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset()) if err != nil { return 0, fmt.Errorf("ReadNeedleHeader: %v", err) } - _, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.version, offset.ToAcutalOffset()+int64(NeedleHeaderSize), bodyLength) + _, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset()+int64(NeedleHeaderSize), bodyLength) if err != nil { return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToAcutalOffset(), bodyLength, err) } @@ -244,7 +244,7 @@ type VolumeFileScanner4GenIdx struct { v *Volume } -func (scanner *VolumeFileScanner4GenIdx) VisitSuperBlock(superBlock SuperBlock) error { +func (scanner *VolumeFileScanner4GenIdx) VisitSuperBlock(superBlock super_block.SuperBlock) error { return nil } diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index 61b59e9f7..a65c2a3ff 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -55,7 +55,7 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err return } -func verifyNeedleIntegrity(datFile backend.DataStorageBackend, v needle.Version, offset int64, key NeedleId, size uint32) (lastAppendAtNs uint64, err error) { +func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size uint32) (lastAppendAtNs uint64, err error) { n := new(needle.Needle) if err = n.ReadData(datFile, offset, size, v); err != nil { return n.AppendAtNs, err diff --git a/weed/storage/volume_create.go b/weed/storage/volume_create.go index b27a62990..ffcb246a4 100644 --- a/weed/storage/volume_create.go +++ b/weed/storage/volume_create.go @@ -9,10 +9,13 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/backend" ) -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.DataStorageBackend, error) { +func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if e != nil { + return nil, e + } if preallocate > 0 { glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) } - return backend.NewDiskFile(file), e + return backend.NewDiskFile(file), nil } diff --git a/weed/storage/volume_create_linux.go b/weed/storage/volume_create_linux.go index e3305d991..ee599ac32 100644 --- a/weed/storage/volume_create_linux.go +++ b/weed/storage/volume_create_linux.go @@ -10,11 +10,14 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/backend" ) -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.DataStorageBackend, error) { +func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if e != nil { + return nil, e + } if preallocate != 0 { syscall.Fallocate(int(file.Fd()), 1, 0, preallocate) glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) } - return backend.NewDiskFile(file), e + return backend.NewDiskFile(file), nil } diff --git a/weed/storage/volume_create_windows.go b/weed/storage/volume_create_windows.go index 81536810b..e1c0b961f 100644 --- a/weed/storage/volume_create_windows.go +++ b/weed/storage/volume_create_windows.go @@ -11,18 +11,23 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads" ) -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.DataStorageBackend, error) { - +func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { if preallocate > 0 { glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) } if memoryMapSizeMB > 0 { file, e := os_overloads.OpenFile(fileName, windows.O_RDWR|windows.O_CREAT, 0644, true) - return memory_map.NewMemoryMappedFile(file, memoryMapSizeMB), e + if e != nil { + return nil, e + } + return memory_map.NewMemoryMappedFile(file, memoryMapSizeMB), nil } else { file, e := os_overloads.OpenFile(fileName, windows.O_RDWR|windows.O_CREAT|windows.O_TRUNC, 0644, false) - return backend.NewDiskFile(file), e + if e != nil { + return nil, e + } + return backend.NewDiskFile(file), nil } } diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go index 111058b6e..313818cde 100644 --- a/weed/storage/volume_info.go +++ b/weed/storage/volume_info.go @@ -6,37 +6,42 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) type VolumeInfo struct { - Id needle.VolumeId - Size uint64 - ReplicaPlacement *ReplicaPlacement - Ttl *needle.TTL - Collection string - Version needle.Version - FileCount int - DeleteCount int - DeletedByteCount uint64 - ReadOnly bool - CompactRevision uint32 - ModifiedAtSecond int64 + Id needle.VolumeId + Size uint64 + ReplicaPlacement *super_block.ReplicaPlacement + Ttl *needle.TTL + Collection string + Version needle.Version + FileCount int + DeleteCount int + DeletedByteCount uint64 + ReadOnly bool + CompactRevision uint32 + ModifiedAtSecond int64 + RemoteStorageName string + RemoteStorageKey string } func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err error) { vi = VolumeInfo{ - Id: needle.VolumeId(m.Id), - Size: m.Size, - Collection: m.Collection, - FileCount: int(m.FileCount), - DeleteCount: int(m.DeleteCount), - DeletedByteCount: m.DeletedByteCount, - ReadOnly: m.ReadOnly, - Version: needle.Version(m.Version), - CompactRevision: m.CompactRevision, - ModifiedAtSecond: m.ModifiedAtSecond, + Id: needle.VolumeId(m.Id), + Size: m.Size, + Collection: m.Collection, + FileCount: int(m.FileCount), + DeleteCount: int(m.DeleteCount), + DeletedByteCount: m.DeletedByteCount, + ReadOnly: m.ReadOnly, + Version: needle.Version(m.Version), + CompactRevision: m.CompactRevision, + ModifiedAtSecond: m.ModifiedAtSecond, + RemoteStorageName: m.RemoteStorageName, + RemoteStorageKey: m.RemoteStorageKey, } - rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) + rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) if e != nil { return vi, e } @@ -51,7 +56,7 @@ func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi Volu Collection: m.Collection, Version: needle.Version(m.Version), } - rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) + rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) if e != nil { return vi, e } @@ -60,6 +65,10 @@ func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi Volu return vi, nil } +func (vi VolumeInfo) IsRemote() bool { + return vi.RemoteStorageName != "" +} + func (vi VolumeInfo) String() string { return fmt.Sprintf("Id:%d, Size:%d, ReplicaPlacement:%s, Collection:%s, Version:%v, FileCount:%d, DeleteCount:%d, DeletedByteCount:%d, ReadOnly:%v", vi.Id, vi.Size, vi.ReplicaPlacement, vi.Collection, vi.Version, vi.FileCount, vi.DeleteCount, vi.DeletedByteCount, vi.ReadOnly) @@ -67,18 +76,20 @@ func (vi VolumeInfo) String() string { func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage { return &master_pb.VolumeInformationMessage{ - Id: uint32(vi.Id), - Size: uint64(vi.Size), - Collection: vi.Collection, - FileCount: uint64(vi.FileCount), - DeleteCount: uint64(vi.DeleteCount), - DeletedByteCount: vi.DeletedByteCount, - ReadOnly: vi.ReadOnly, - ReplicaPlacement: uint32(vi.ReplicaPlacement.Byte()), - Version: uint32(vi.Version), - Ttl: vi.Ttl.ToUint32(), - CompactRevision: vi.CompactRevision, - ModifiedAtSecond: vi.ModifiedAtSecond, + Id: uint32(vi.Id), + Size: uint64(vi.Size), + Collection: vi.Collection, + FileCount: uint64(vi.FileCount), + DeleteCount: uint64(vi.DeleteCount), + DeletedByteCount: vi.DeletedByteCount, + ReadOnly: vi.ReadOnly, + ReplicaPlacement: uint32(vi.ReplicaPlacement.Byte()), + Version: uint32(vi.Version), + Ttl: vi.Ttl.ToUint32(), + CompactRevision: vi.CompactRevision, + ModifiedAtSecond: vi.ModifiedAtSecond, + RemoteStorageName: vi.RemoteStorageName, + RemoteStorageKey: vi.RemoteStorageKey, } } diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index 6f1d8fe40..6b42fc452 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -3,146 +3,148 @@ package storage import ( "fmt" "os" - "time" - "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/storage/backend" - "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" ) -func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType) (v *Volume, e error) { +func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType) (v *Volume, err error) { v = &Volume{dir: dirname, Collection: collection, Id: id} - v.SuperBlock = SuperBlock{} + v.SuperBlock = super_block.SuperBlock{} v.needleMapKind = needleMapKind - e = v.load(false, false, needleMapKind, 0) + err = v.load(false, false, needleMapKind, 0) return } -func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) error { - var e error +func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) (err error) { fileName := v.FileName() alreadyHasSuperBlock := false - // open dat file - if exists, canRead, canWrite, modifiedTime, fileSize := checkFile(fileName + ".dat"); exists { + hasVolumeInfoFile := v.maybeLoadVolumeInfo() && v.volumeInfo.Version != 0 + + if v.HasRemoteFile() { + v.noWriteCanDelete = true + v.noWriteOrDelete = false + glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files) + v.LoadRemoteFile() + alreadyHasSuperBlock = true + } else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(fileName + ".dat"); exists { + // open dat file if !canRead { return fmt.Errorf("cannot read Volume Data file %s.dat", fileName) } var dataFile *os.File if canWrite { - dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644) + dataFile, err = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644) } else { glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode") - dataFile, e = os.Open(fileName + ".dat") - v.readOnly = true + dataFile, err = os.Open(fileName + ".dat") + v.noWriteOrDelete = true } v.lastModifiedTsSeconds = uint64(modifiedTime.Unix()) - if fileSize >= _SuperBlockSize { + if fileSize >= super_block.SuperBlockSize { alreadyHasSuperBlock = true } v.DataBackend = backend.NewDiskFile(dataFile) } else { if createDatIfMissing { - v.DataBackend, e = createVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb) + v.DataBackend, err = createVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb) } else { return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName) } } - if e != nil { - if !os.IsPermission(e) { - return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, e) + if err != nil { + if !os.IsPermission(err) { + return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, err) } else { - return fmt.Errorf("load data file %s.dat: %v", fileName, e) + return fmt.Errorf("load data file %s.dat: %v", fileName, err) } } if alreadyHasSuperBlock { - e = v.readSuperBlock() + err = v.readSuperBlock() } else { if !v.SuperBlock.Initialized() { return fmt.Errorf("volume %s.dat not initialized", fileName) } - e = v.maybeWriteSuperBlock() + err = v.maybeWriteSuperBlock() } - if e == nil && alsoLoadIndex { + if err == nil && alsoLoadIndex { var indexFile *os.File - if v.readOnly { - glog.V(1).Infoln("open to read file", fileName+".idx") - if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); e != nil { - return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, e) + if v.noWriteOrDelete { + glog.V(0).Infoln("open to read file", fileName+".idx") + if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); err != nil { + return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, err) } } else { glog.V(1).Infoln("open to write file", fileName+".idx") - if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); e != nil { - return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, e) + if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil { + return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, err) } } - if v.lastAppendAtNs, e = CheckVolumeDataIntegrity(v, indexFile); e != nil { - v.readOnly = true - glog.V(0).Infof("volumeDataIntegrityChecking failed %v", e) + if v.lastAppendAtNs, err = CheckVolumeDataIntegrity(v, indexFile); err != nil { + v.noWriteOrDelete = true + glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err) } - switch needleMapKind { - case NeedleMapInMemory: - glog.V(0).Infoln("loading index", fileName+".idx", "to memory readonly", v.readOnly) - if v.nm, e = LoadCompactNeedleMap(indexFile); e != nil { - glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", e) - } - case NeedleMapLevelDb: - glog.V(0).Infoln("loading leveldb", fileName+".ldb") - opts := &opt.Options{ - BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB - WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB - CompactionTableSizeMultiplier: 10, // default value is 1 - } - if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) - } - case NeedleMapLevelDbMedium: - glog.V(0).Infoln("loading leveldb medium", fileName+".ldb") - opts := &opt.Options{ - BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB - WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB - CompactionTableSizeMultiplier: 10, // default value is 1 - } - if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) - } - case NeedleMapLevelDbLarge: - glog.V(0).Infoln("loading leveldb large", fileName+".ldb") - opts := &opt.Options{ - BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB - WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB - CompactionTableSizeMultiplier: 10, // default value is 1 + + if v.noWriteOrDelete || v.noWriteCanDelete { + if v.nm, err = NewSortedFileNeedleMap(fileName, indexFile); err != nil { + glog.V(0).Infof("loading sorted db %s error: %v", fileName+".sdx", err) } - if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) + } else { + switch needleMapKind { + case NeedleMapInMemory: + glog.V(0).Infoln("loading index", fileName+".idx", "to memory") + if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil { + glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", err) + } + case NeedleMapLevelDb: + glog.V(0).Infoln("loading leveldb", fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, // default value is 1 + } + if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + } + case NeedleMapLevelDbMedium: + glog.V(0).Infoln("loading leveldb medium", fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, // default value is 1 + } + if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + } + case NeedleMapLevelDbLarge: + glog.V(0).Infoln("loading leveldb large", fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, // default value is 1 + } + if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + } } } } - stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Inc() + if !hasVolumeInfoFile { + v.volumeInfo.Version = uint32(v.SuperBlock.Version) + v.SaveVolumeInfo() + } - return e -} + stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Inc() -func checkFile(filename string) (exists, canRead, canWrite bool, modTime time.Time, fileSize int64) { - exists = true - fi, err := os.Stat(filename) - if os.IsNotExist(err) { - exists = false - return - } - if fi.Mode()&0400 != 0 { - canRead = true - } - if fi.Mode()&0200 != 0 { - canWrite = true - } - modTime = fi.ModTime() - fileSize = fi.Size() - return + return err } diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go index 242325755..ac6154cef 100644 --- a/weed/storage/volume_read_write.go +++ b/weed/storage/volume_read_write.go @@ -11,6 +11,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -45,22 +46,25 @@ func (v *Volume) Destroy() (err error) { err = fmt.Errorf("volume %d is compacting", v.Id) return } + storageName, storageKey := v.RemoteStorageNameKey() + if v.HasRemoteFile() && storageName != "" && storageKey != "" { + if backendStorage, found := backend.BackendStorages[storageName]; found { + backendStorage.DeleteFile(storageKey) + } + } v.Close() os.Remove(v.FileName() + ".dat") os.Remove(v.FileName() + ".idx") + os.Remove(v.FileName() + ".vif") + os.Remove(v.FileName() + ".sdx") os.Remove(v.FileName() + ".cpd") os.Remove(v.FileName() + ".cpx") os.RemoveAll(v.FileName() + ".ldb") - os.RemoveAll(v.FileName() + ".bdb") return } func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) { - glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) - if v.readOnly { - err = fmt.Errorf("%s is read-only", v.DataBackend.String()) - return - } + // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() if v.isFileUnchanged(n) { @@ -110,9 +114,6 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) { glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) - if v.readOnly { - return 0, fmt.Errorf("%s is read-only", v.DataBackend.String()) - } v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() nv, ok := v.nm.Get(n.Id) @@ -136,8 +137,8 @@ func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) { // read fills in Needle content by looking up n.Id from NeedleMapper func (v *Volume) readNeedle(n *needle.Needle) (int, error) { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() nv, ok := v.nm.Get(n.Id) if !ok || nv.Offset.IsZero() { @@ -171,7 +172,7 @@ func (v *Volume) readNeedle(n *needle.Needle) (int, error) { } type VolumeFileScanner interface { - VisitSuperBlock(SuperBlock) error + VisitSuperBlock(super_block.SuperBlock) error ReadNeedleBody() bool VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error } @@ -183,8 +184,10 @@ func ScanVolumeFile(dirname string, collection string, id needle.VolumeId, if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil { return fmt.Errorf("failed to load volume %d: %v", id, err) } - if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { - return fmt.Errorf("failed to process volume %d super block: %v", id, err) + if v.volumeInfo.Version == 0 { + if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { + return fmt.Errorf("failed to process volume %d super block: %v", id, err) + } } defer v.Close() @@ -195,13 +198,13 @@ func ScanVolumeFile(dirname string, collection string, id needle.VolumeId, return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner) } -func ScanVolumeFileFrom(version needle.Version, datBackend backend.DataStorageBackend, offset int64, volumeFileScanner VolumeFileScanner) (err error) { +func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) { n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset) if e != nil { if e == io.EOF { return nil } - return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.String(), offset, e) + return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e) } for n != nil { var needleBody []byte diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go index bce5af465..5e913e062 100644 --- a/weed/storage/volume_super_block.go +++ b/weed/storage/volume_super_block.go @@ -5,92 +5,29 @@ import ( "os" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) -const ( - _SuperBlockSize = 8 -) - -/* -* Super block currently has 8 bytes allocated for each volume. -* Byte 0: version, 1 or 2 -* Byte 1: Replica Placement strategy, 000, 001, 002, 010, etc -* Byte 2 and byte 3: Time to live. See TTL for definition -* Byte 4 and byte 5: The number of times the volume has been compacted. -* Rest bytes: Reserved - */ -type SuperBlock struct { - version needle.Version - ReplicaPlacement *ReplicaPlacement - Ttl *needle.TTL - CompactionRevision uint16 - Extra *master_pb.SuperBlockExtra - extraSize uint16 -} - -func (s *SuperBlock) BlockSize() int { - switch s.version { - case needle.Version2, needle.Version3: - return _SuperBlockSize + int(s.extraSize) - } - return _SuperBlockSize -} - -func (s *SuperBlock) Version() needle.Version { - return s.version -} -func (s *SuperBlock) Bytes() []byte { - header := make([]byte, _SuperBlockSize) - header[0] = byte(s.version) - header[1] = s.ReplicaPlacement.Byte() - s.Ttl.ToBytes(header[2:4]) - util.Uint16toBytes(header[4:6], s.CompactionRevision) - - if s.Extra != nil { - extraData, err := proto.Marshal(s.Extra) - if err != nil { - glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) - } - extraSize := len(extraData) - if extraSize > 256*256-2 { - // reserve a couple of bits for future extension - glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) - } - s.extraSize = uint16(extraSize) - util.Uint16toBytes(header[6:8], s.extraSize) - - header = append(header, extraData...) - } - - return header -} - -func (s *SuperBlock) Initialized() bool { - return s.ReplicaPlacement != nil && s.Ttl != nil -} - func (v *Volume) maybeWriteSuperBlock() error { datSize, _, e := v.DataBackend.GetStat() if e != nil { - glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.String(), e) + glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e) return e } if datSize == 0 { - v.SuperBlock.version = needle.CurrentVersion + v.SuperBlock.Version = needle.CurrentVersion _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0) if e != nil && os.IsPermission(e) { //read-only, but zero length - recreate it! var dataFile *os.File - if dataFile, e = os.Create(v.DataBackend.String()); e == nil { + if dataFile, e = os.Create(v.DataBackend.Name()); e == nil { v.DataBackend = backend.NewDiskFile(dataFile) if _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0); e == nil { - v.readOnly = false + v.noWriteOrDelete = false + v.noWriteCanDelete = false } } } @@ -99,38 +36,13 @@ func (v *Volume) maybeWriteSuperBlock() error { } func (v *Volume) readSuperBlock() (err error) { - v.SuperBlock, err = ReadSuperBlock(v.DataBackend) - return err -} - -// ReadSuperBlock reads from data file and load it into volume's super block -func ReadSuperBlock(datBackend backend.DataStorageBackend) (superBlock SuperBlock, err error) { - - header := make([]byte, _SuperBlockSize) - if _, e := datBackend.ReadAt(header, 0); e != nil { - err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.String(), e) - return - } - - superBlock.version = needle.Version(header[0]) - if superBlock.ReplicaPlacement, err = NewReplicaPlacementFromByte(header[1]); err != nil { - err = fmt.Errorf("cannot read replica type: %s", err.Error()) - return - } - superBlock.Ttl = needle.LoadTTLFromBytes(header[2:4]) - superBlock.CompactionRevision = util.BytesToUint16(header[4:6]) - superBlock.extraSize = util.BytesToUint16(header[6:8]) - - if superBlock.extraSize > 0 { - // read more - extraData := make([]byte, int(superBlock.extraSize)) - superBlock.Extra = &master_pb.SuperBlockExtra{} - err = proto.Unmarshal(extraData, superBlock.Extra) - if err != nil { - err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.String(), err) - return + v.SuperBlock, err = super_block.ReadSuperBlock(v.DataBackend) + if v.volumeInfo != nil && v.volumeInfo.Replication != "" { + if replication, err := super_block.NewReplicaPlacementFromString(v.volumeInfo.Replication); err != nil { + return fmt.Errorf("Error parse volume %d replication %s : %v", v.Id, v.volumeInfo.Replication, err) + } else { + v.SuperBlock.ReplicaPlacement = replication } } - - return + return err } diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go new file mode 100644 index 000000000..fd7b08654 --- /dev/null +++ b/weed/storage/volume_tier.go @@ -0,0 +1,50 @@ +package storage + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" +) + +func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { + return v.volumeInfo +} + +func (v *Volume) maybeLoadVolumeInfo() (found bool) { + + v.volumeInfo, v.hasRemoteFile, _ = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif") + + if v.hasRemoteFile { + glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id, + v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key) + } + + return + +} + +func (v *Volume) HasRemoteFile() bool { + return v.hasRemoteFile +} + +func (v *Volume) LoadRemoteFile() error { + tierFile := v.volumeInfo.GetFiles()[0] + backendStorage := backend.BackendStorages[tierFile.BackendName()] + + if v.DataBackend != nil { + v.DataBackend.Close() + } + + v.DataBackend = backendStorage.NewStorageFile(tierFile.Key, v.volumeInfo) + return nil +} + +func (v *Volume) SaveVolumeInfo() error { + + tierFileName := v.FileName() + ".vif" + + return pb.SaveVolumeInfo(tierFileName, v.volumeInfo) + +} diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index e90746b54..5d0d63877 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -3,6 +3,7 @@ package storage import ( "fmt" "os" + "runtime" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -11,6 +12,7 @@ import ( idx2 "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -19,101 +21,124 @@ func (v *Volume) garbageLevel() float64 { if v.ContentSize() == 0 { return 0 } - return float64(v.DeletedSize()) / float64(v.ContentSize()) + deletedSize := v.DeletedSize() + fileSize := v.ContentSize() + if v.DeletedCount() > 0 && v.DeletedSize() == 0 { + // this happens for .sdx converted back to normal .idx + // where deleted entry size is missing + datFileSize, _, _ := v.FileStat() + deletedSize = datFileSize - fileSize - super_block.SuperBlockSize + fileSize = datFileSize + } + return float64(deletedSize) / float64(fileSize) } +// compact a volume based on deletions in .dat files func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error { - if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory - glog.V(3).Infof("Compacting volume %d ...", v.Id) - //no need to lock for copy on write - //v.accessLock.Lock() - //defer v.accessLock.Unlock() - //glog.V(3).Infof("Got Compaction lock...") - v.isCompacting = true - defer func() { - v.isCompacting = false - }() - - filePath := v.FileName() - v.lastCompactIndexOffset = v.IndexFileSize() - v.lastCompactRevision = v.SuperBlock.CompactionRevision - glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) - return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond) - } else { + if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } + glog.V(3).Infof("Compacting volume %d ...", v.Id) + //no need to lock for copy on write + //v.accessLock.Lock() + //defer v.accessLock.Unlock() + //glog.V(3).Infof("Got Compaction lock...") + v.isCompacting = true + defer func() { + v.isCompacting = false + }() + + filePath := v.FileName() + v.lastCompactIndexOffset = v.IndexFileSize() + v.lastCompactRevision = v.SuperBlock.CompactionRevision + glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) + return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond) } -func (v *Volume) Compact2() error { - - if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory - glog.V(3).Infof("Compact2 volume %d ...", v.Id) +// compact a volume based on deletions in .idx files +func (v *Volume) Compact2(preallocate int64) error { - v.isCompacting = true - defer func() { - v.isCompacting = false - }() - - filePath := v.FileName() - glog.V(3).Infof("creating copies for volume %d ...", v.Id) - return v.copyDataBasedOnIndexFile(filePath+".cpd", filePath+".cpx") - } else { + if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } + glog.V(3).Infof("Compact2 volume %d ...", v.Id) + + v.isCompacting = true + defer func() { + v.isCompacting = false + }() + + filePath := v.FileName() + v.lastCompactIndexOffset = v.IndexFileSize() + v.lastCompactRevision = v.SuperBlock.CompactionRevision + glog.V(3).Infof("creating copies for volume %d ...", v.Id) + return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate) } func (v *Volume) CommitCompact() error { - if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory - glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) + if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory + return nil + } + glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) - v.isCompacting = true - defer func() { - v.isCompacting = false - }() + v.isCompacting = true + defer func() { + v.isCompacting = false + }() - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() - glog.V(3).Infof("Got volume %d committing lock...", v.Id) - v.nm.Close() + glog.V(3).Infof("Got volume %d committing lock...", v.Id) + v.nm.Close() + if v.DataBackend != nil { if err := v.DataBackend.Close(); err != nil { glog.V(0).Infof("fail to close volume %d", v.Id) } - v.DataBackend = nil - stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec() - - var e error - if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil { - glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) - e = os.Remove(v.FileName() + ".cpd") + } + v.DataBackend = nil + stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec() + + var e error + if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil { + glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) + e = os.Remove(v.FileName() + ".cpd") + if e != nil { + return e + } + e = os.Remove(v.FileName() + ".cpx") + if e != nil { + return e + } + } else { + if runtime.GOOS == "windows" { + e = os.RemoveAll(v.FileName() + ".dat") if e != nil { return e } - e = os.Remove(v.FileName() + ".cpx") + e = os.RemoveAll(v.FileName() + ".idx") if e != nil { return e } - } else { - var e error - if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { - return fmt.Errorf("rename %s: %v", v.FileName()+".cpd", e) - } - if e = os.Rename(v.FileName()+".cpx", v.FileName()+".idx"); e != nil { - return fmt.Errorf("rename %s: %v", v.FileName()+".cpx", e) - } } + var e error + if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { + return fmt.Errorf("rename %s: %v", v.FileName()+".cpd", e) + } + if e = os.Rename(v.FileName()+".cpx", v.FileName()+".idx"); e != nil { + return fmt.Errorf("rename %s: %v", v.FileName()+".cpx", e) + } + } - //glog.V(3).Infof("Pretending to be vacuuming...") - //time.Sleep(20 * time.Second) + //glog.V(3).Infof("Pretending to be vacuuming...") + //time.Sleep(20 * time.Second) - os.RemoveAll(v.FileName() + ".ldb") - os.RemoveAll(v.FileName() + ".bdb") + os.RemoveAll(v.FileName() + ".ldb") - glog.V(3).Infof("Loading volume %d commit file...", v.Id) - if e = v.load(true, false, v.needleMapKind, 0); e != nil { - return e - } + glog.V(3).Infof("Loading volume %d commit file...", v.Id) + if e = v.load(true, false, v.needleMapKind, 0); e != nil { + return e } return nil } @@ -132,14 +157,15 @@ func (v *Volume) cleanupCompact() error { return nil } -func fetchCompactRevisionFromDatFile(datBackend backend.DataStorageBackend) (compactRevision uint16, err error) { - superBlock, err := ReadSuperBlock(datBackend) +func fetchCompactRevisionFromDatFile(datBackend backend.BackendStorageFile) (compactRevision uint16, err error) { + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { return 0, err } return superBlock.CompactionRevision, nil } +// if old .dat and .idx files are updated, this func tries to apply the same changes to new files accordingly func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldIdxFileName string) (err error) { var indexSize int64 @@ -150,6 +176,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI oldDatBackend := backend.NewDiskFile(oldDatFile) defer oldDatBackend.Close() + // skip if the old .idx file has not changed if indexSize, err = verifyIndexFileIntegrity(oldIdxFile); err != nil { return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", oldIdxFileName, err) } @@ -157,6 +184,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI return nil } + // fail if the old .dat file has changed to a new revision oldDatCompactRevision, err := fetchCompactRevisionFromDatFile(oldDatBackend) if err != nil { return fmt.Errorf("fetchCompactRevisionFromDatFile src %s failed: %v", oldDatFile.Name(), err) @@ -270,15 +298,15 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI type VolumeFileScanner4Vacuum struct { version needle.Version v *Volume - dstBackend backend.DataStorageBackend - nm *NeedleMap + dstBackend backend.BackendStorageFile + nm *needle_map.MemDb newOffset int64 now uint64 writeThrottler *util.WriteThrottler } -func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version superBlock.CompactionRevision++ _, err := scanner.dstBackend.WriteAt(superBlock.Bytes(), 0) scanner.newOffset = int64(superBlock.BlockSize()) @@ -296,7 +324,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in nv, ok := scanner.v.nm.Get(n.Id) glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size != TombstoneFileSize { - if err := scanner.nm.Put(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil { + if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } if _, _, _, err := n.Append(scanner.dstBackend, scanner.v.Version()); err != nil { @@ -312,90 +340,92 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64, compactionBytePerSecond int64) (err error) { var ( - dst backend.DataStorageBackend - idx *os.File + dst backend.BackendStorageFile ) if dst, err = createVolumeFile(dstName, preallocate, 0); err != nil { return } defer dst.Close() - if idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { - return - } - defer idx.Close() + nm := needle_map.NewMemDb() + defer nm.Close() scanner := &VolumeFileScanner4Vacuum{ v: v, now: uint64(time.Now().Unix()), - nm: NewBtreeNeedleMap(idx), + nm: nm, dstBackend: dst, writeThrottler: util.NewWriteThrottler(compactionBytePerSecond), } err = ScanVolumeFile(v.dir, v.Collection, v.Id, v.needleMapKind, scanner) + if err != nil { + return nil + } + + err = nm.SaveToIdx(idxName) return } -func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { +func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64) (err error) { var ( - dst, idx, oldIndexFile *os.File + srcDatBackend, dstDatBackend backend.BackendStorageFile + dataFile *os.File ) - if dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { + if dstDatBackend, err = createVolumeFile(dstDatName, preallocate, 0); err != nil { return } - dstDatBackend := backend.NewDiskFile(dst) defer dstDatBackend.Close() - if idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { + oldNm := needle_map.NewMemDb() + defer oldNm.Close() + newNm := needle_map.NewMemDb() + defer newNm.Close() + if err = oldNm.LoadFromIdx(srcIdxName); err != nil { return } - defer idx.Close() - - if oldIndexFile, err = os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644); err != nil { - return + if dataFile, err = os.Open(srcDatName); err != nil { + return err } - defer oldIndexFile.Close() + srcDatBackend = backend.NewDiskFile(dataFile) + defer srcDatBackend.Close() - nm := NewBtreeNeedleMap(idx) now := uint64(time.Now().Unix()) - v.SuperBlock.CompactionRevision++ - dst.Write(v.SuperBlock.Bytes()) - newOffset := int64(v.SuperBlock.BlockSize()) + sb.CompactionRevision++ + dstDatBackend.WriteAt(sb.Bytes(), 0) + newOffset := int64(sb.BlockSize()) - idx2.WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error { - if offset.IsZero() || size == TombstoneFileSize { - return nil - } + oldNm.AscendingVisit(func(value needle_map.NeedleValue) error { + + offset, size := value.Offset, value.Size - nv, ok := v.nm.Get(key) - if !ok { + if offset.IsZero() || size == TombstoneFileSize { return nil } n := new(needle.Needle) - err := n.ReadData(v.DataBackend, offset.ToAcutalOffset(), size, v.Version()) + err := n.ReadData(srcDatBackend, offset.ToAcutalOffset(), size, version) if err != nil { return nil } - if n.HasTtl() && now >= n.LastModified+uint64(v.Ttl.Minutes()*60) { + if n.HasTtl() && now >= n.LastModified+uint64(sb.Ttl.Minutes()*60) { return nil } - glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) - if nv.Offset == offset && nv.Size > 0 { - if err = nm.Put(n.Id, ToOffset(newOffset), n.Size); err != nil { - return fmt.Errorf("cannot put needle: %s", err) - } - if _, _, _, err = n.Append(dstDatBackend, v.Version()); err != nil { - return fmt.Errorf("cannot append needle: %s", err) - } - newOffset += n.DiskSize(v.Version()) - glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + if err = newNm.Set(n.Id, ToOffset(newOffset), n.Size); err != nil { + return fmt.Errorf("cannot put needle: %s", err) + } + if _, _, _, err = n.Append(dstDatBackend, sb.Version); err != nil { + return fmt.Errorf("cannot append needle: %s", err) } + newOffset += n.DiskSize(version) + glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + return nil }) + newNm.SaveToIdx(datIdxName) + return } diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index ba1e59f2c..95f43d6ec 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -46,7 +47,7 @@ func TestMakeDiff(t *testing.T) { v := new(Volume) //lastCompactIndexOffset value is the index file size before step 4 v.lastCompactIndexOffset = 96 - v.SuperBlock.version = 0x2 + v.SuperBlock.Version = 0x2 /* err := v.makeupDiff( "/yourpath/1.cpd", @@ -68,7 +69,7 @@ func TestCompaction(t *testing.T) { } defer os.RemoveAll(dir) // clean up - v, err := NewVolume(dir, "", 1, NeedleMapInMemory, &ReplicaPlacement{}, &needle.TTL{}, 0, 0) + v, err := NewVolume(dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0) if err != nil { t.Fatalf("volume creation: %v", err) } @@ -83,7 +84,7 @@ func TestCompaction(t *testing.T) { } startTime := time.Now() - v.Compact(0, 1024*1024) + v.Compact2(0) speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds() t.Logf("compaction speed: %.2f bytes/s", speed) diff --git a/weed/topology/collection.go b/weed/topology/collection.go index f6b728ec9..7a611d904 100644 --- a/weed/topology/collection.go +++ b/weed/topology/collection.go @@ -3,8 +3,8 @@ package topology import ( "fmt" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -24,7 +24,7 @@ func (c *Collection) String() string { return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout) } -func (c *Collection) GetOrCreateVolumeLayout(rp *storage.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { +func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { keyString := rp.String() if ttl != nil { keyString += ttl.String() diff --git a/weed/topology/data_center.go b/weed/topology/data_center.go index 640cb1937..dc3accb71 100644 --- a/weed/topology/data_center.go +++ b/weed/topology/data_center.go @@ -48,6 +48,7 @@ func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo { MaxVolumeCount: uint64(dc.GetMaxVolumeCount()), FreeVolumeCount: uint64(dc.FreeSpace()), ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()), + RemoteVolumeCount: uint64(dc.GetRemoteVolumeCount()), } for _, c := range dc.Children() { rack := c.(*Rack) diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 3e72ccdbf..617341e54 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -2,14 +2,13 @@ package topology import ( "fmt" + "strconv" "sync" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "strconv" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" ) @@ -44,15 +43,26 @@ func (dn *DataNode) String() string { func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew bool) { dn.Lock() defer dn.Unlock() - if _, ok := dn.volumes[v.Id]; !ok { + if oldV, ok := dn.volumes[v.Id]; !ok { dn.volumes[v.Id] = v dn.UpAdjustVolumeCountDelta(1) + if v.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(1) + } if !v.ReadOnly { dn.UpAdjustActiveVolumeCountDelta(1) } dn.UpAdjustMaxVolumeId(v.Id) isNew = true } else { + if oldV.IsRemote() != v.IsRemote() { + if v.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(1) + } + if oldV.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(-1) + } + } dn.volumes[v.Id] = v } return @@ -70,7 +80,12 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume delete(dn.volumes, vid) deletedVolumes = append(deletedVolumes, v) dn.UpAdjustVolumeCountDelta(-1) - dn.UpAdjustActiveVolumeCountDelta(-1) + if v.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(-1) + } + if !v.ReadOnly { + dn.UpAdjustActiveVolumeCountDelta(-1) + } } } dn.Unlock() @@ -88,7 +103,12 @@ func (dn *DataNode) DeltaUpdateVolumes(newlVolumes, deletedVolumes []storage.Vol for _, v := range deletedVolumes { delete(dn.volumes, v.Id) dn.UpAdjustVolumeCountDelta(-1) - dn.UpAdjustActiveVolumeCountDelta(-1) + if v.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(-1) + } + if !v.ReadOnly { + dn.UpAdjustActiveVolumeCountDelta(-1) + } } dn.Unlock() for _, v := range newlVolumes { @@ -160,6 +180,7 @@ func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo { MaxVolumeCount: uint64(dn.GetMaxVolumeCount()), FreeVolumeCount: uint64(dn.FreeSpace()), ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()), + RemoteVolumeCount: uint64(dn.GetRemoteVolumeCount()), } for _, v := range dn.GetVolumes() { m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage()) diff --git a/weed/topology/node.go b/weed/topology/node.go index b2808f589..ceeb96d60 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -20,6 +20,7 @@ type Node interface { ReserveOneVolume(r int64) (*DataNode, error) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) UpAdjustVolumeCountDelta(volumeCountDelta int64) + UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) UpAdjustEcShardCountDelta(ecShardCountDelta int64) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) UpAdjustMaxVolumeId(vid needle.VolumeId) @@ -27,6 +28,7 @@ type Node interface { GetVolumeCount() int64 GetEcShardCount() int64 GetActiveVolumeCount() int64 + GetRemoteVolumeCount() int64 GetMaxVolumeCount() int64 GetMaxVolumeId() needle.VolumeId SetParent(Node) @@ -44,6 +46,7 @@ type Node interface { } type NodeImpl struct { volumeCount int64 + remoteVolumeCount int64 activeVolumeCount int64 ecShardCount int64 maxVolumeCount int64 @@ -59,56 +62,64 @@ type NodeImpl struct { } // the first node must satisfy filterFirstNodeFn(), the rest nodes must have one free slot -func (n *NodeImpl) RandomlyPickNodes(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) { - candidates := make([]Node, 0, len(n.children)) +func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) { + var totalWeights int64 var errs []string n.RLock() + candidates := make([]Node, 0, len(n.children)) + candidatesWeights := make([]int64, 0, len(n.children)) + //pick nodes which has enough free volumes as candidates, and use free volumes number as node weight. for _, node := range n.children { - if err := filterFirstNodeFn(node); err == nil { - candidates = append(candidates, node) - } else { - errs = append(errs, string(node.Id())+":"+err.Error()) + if node.FreeSpace() <= 0 { + continue } + totalWeights += node.FreeSpace() + candidates = append(candidates, node) + candidatesWeights = append(candidatesWeights, node.FreeSpace()) } n.RUnlock() - if len(candidates) == 0 { - return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n")) + if len(candidates) < numberOfNodes { + glog.V(2).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") + return nil, nil, errors.New("No enough data node found!") } - firstNode = candidates[rand.Intn(len(candidates))] - glog.V(2).Infoln(n.Id(), "picked main node:", firstNode.Id()) - restNodes = make([]Node, numberOfNodes-1) - candidates = candidates[:0] - n.RLock() - for _, node := range n.children { - if node.Id() == firstNode.Id() { - continue - } - if node.FreeSpace() <= 0 { - continue + //pick nodes randomly by weights, the node picked earlier has higher final weights + sortedCandidates := make([]Node, 0, len(candidates)) + for i := 0; i < len(candidates); i++ { + weightsInterval := rand.Int63n(totalWeights) + lastWeights := int64(0) + for k, weights := range candidatesWeights { + if (weightsInterval >= lastWeights) && (weightsInterval < lastWeights+weights) { + sortedCandidates = append(sortedCandidates, candidates[k]) + candidatesWeights[k] = 0 + totalWeights -= weights + break + } + lastWeights += weights } - glog.V(2).Infoln("select rest node candidate:", node.Id()) - candidates = append(candidates, node) } - n.RUnlock() - glog.V(2).Infoln(n.Id(), "picking", numberOfNodes-1, "from rest", len(candidates), "node candidates") - ret := len(restNodes) == 0 - for k, node := range candidates { - if k < len(restNodes) { - restNodes[k] = node - if k == len(restNodes)-1 { - ret = true + + restNodes = make([]Node, 0, numberOfNodes-1) + ret := false + n.RLock() + for k, node := range sortedCandidates { + if err := filterFirstNodeFn(node); err == nil { + firstNode = node + if k >= numberOfNodes-1 { + restNodes = sortedCandidates[:numberOfNodes-1] + } else { + restNodes = append(restNodes, sortedCandidates[:k]...) + restNodes = append(restNodes, sortedCandidates[k+1:numberOfNodes]...) } + ret = true + break } else { - r := rand.Intn(k + 1) - if r < len(restNodes) { - restNodes[r] = node - } + errs = append(errs, string(node.Id())+":"+err.Error()) } } + n.RUnlock() if !ret { - glog.V(2).Infoln(n.Id(), "failed to pick", numberOfNodes-1, "from rest", len(candidates), "node candidates") - err = errors.New("No enough data node found!") + return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n")) } return } @@ -132,10 +143,11 @@ func (n *NodeImpl) Id() NodeId { return n.id } func (n *NodeImpl) FreeSpace() int64 { + freeVolumeSlotCount := n.maxVolumeCount + n.remoteVolumeCount - n.volumeCount if n.ecShardCount > 0 { - return n.maxVolumeCount - n.volumeCount - n.ecShardCount/erasure_coding.DataShardsCount - 1 + freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1 } - return n.maxVolumeCount - n.volumeCount + return freeVolumeSlotCount } func (n *NodeImpl) SetParent(node Node) { n.parent = node @@ -191,6 +203,12 @@ func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be n n.parent.UpAdjustVolumeCountDelta(volumeCountDelta) } } +func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) { //can be negative + atomic.AddInt64(&n.remoteVolumeCount, remoteVolumeCountDelta) + if n.parent != nil { + n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta) + } +} func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative atomic.AddInt64(&n.ecShardCount, ecShardCountDelta) if n.parent != nil { @@ -220,6 +238,9 @@ func (n *NodeImpl) GetVolumeCount() int64 { func (n *NodeImpl) GetEcShardCount() int64 { return n.ecShardCount } +func (n *NodeImpl) GetRemoteVolumeCount() int64 { + return n.remoteVolumeCount +} func (n *NodeImpl) GetActiveVolumeCount() int64 { return n.activeVolumeCount } @@ -235,6 +256,7 @@ func (n *NodeImpl) LinkChildNode(node Node) { n.UpAdjustMaxVolumeCountDelta(node.GetMaxVolumeCount()) n.UpAdjustMaxVolumeId(node.GetMaxVolumeId()) n.UpAdjustVolumeCountDelta(node.GetVolumeCount()) + n.UpAdjustRemoteVolumeCountDelta(node.GetRemoteVolumeCount()) n.UpAdjustEcShardCountDelta(node.GetEcShardCount()) n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount()) node.SetParent(n) @@ -250,6 +272,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) { node.SetParent(nil) delete(n.children, node.Id()) n.UpAdjustVolumeCountDelta(-node.GetVolumeCount()) + n.UpAdjustRemoteVolumeCountDelta(-node.GetRemoteVolumeCount()) n.UpAdjustEcShardCountDelta(-node.GetEcShardCount()) n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount()) n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount()) diff --git a/weed/topology/rack.go b/weed/topology/rack.go index 932c1a804..1921c0c05 100644 --- a/weed/topology/rack.go +++ b/weed/topology/rack.go @@ -67,6 +67,7 @@ func (r *Rack) ToRackInfo() *master_pb.RackInfo { MaxVolumeCount: uint64(r.GetMaxVolumeCount()), FreeVolumeCount: uint64(r.FreeSpace()), ActiveVolumeCount: uint64(r.GetActiveVolumeCount()), + RemoteVolumeCount: uint64(r.GetRemoteVolumeCount()), } for _, c := range r.Children() { dn := c.(*DataNode) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index d21c4d210..8c4996d45 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -1,7 +1,6 @@ package topology import ( - "bytes" "encoding/json" "errors" "fmt" @@ -25,58 +24,60 @@ func ReplicatedWrite(masterNode string, s *storage.Store, //check JWT jwt := security.GetJwt(r) + var remoteLocations []operation.Location + if r.FormValue("type") != "replicate" { + remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode) + if err != nil { + glog.V(0).Infoln(err) + return + } + } + size, isUnchanged, err = s.WriteVolumeNeedle(volumeId, n) if err != nil { err = fmt.Errorf("failed to write to local disk: %v", err) + glog.V(0).Infoln(err) return } - needToReplicate := !s.HasVolume(volumeId) - needToReplicate = needToReplicate || s.GetVolume(volumeId).NeedToReplicate() - if !needToReplicate { - needToReplicate = s.GetVolume(volumeId).NeedToReplicate() - } - if needToReplicate { //send to other replica locations - if r.FormValue("type") != "replicate" { - - if err = distributedOperation(masterNode, s, volumeId, func(location operation.Location) error { - u := url.URL{ - Scheme: "http", - Host: location.Url, - Path: r.URL.Path, - } - q := url.Values{ - "type": {"replicate"}, - "ttl": {n.Ttl.String()}, - } - if n.LastModified > 0 { - q.Set("ts", strconv.FormatUint(n.LastModified, 10)) - } - if n.IsChunkedManifest() { - q.Set("cm", "true") + if len(remoteLocations) > 0 { //send to other replica locations + if err = distributedOperation(remoteLocations, s, func(location operation.Location) error { + u := url.URL{ + Scheme: "http", + Host: location.Url, + Path: r.URL.Path, + } + q := url.Values{ + "type": {"replicate"}, + "ttl": {n.Ttl.String()}, + } + if n.LastModified > 0 { + q.Set("ts", strconv.FormatUint(n.LastModified, 10)) + } + if n.IsChunkedManifest() { + q.Set("cm", "true") + } + u.RawQuery = q.Encode() + + pairMap := make(map[string]string) + if n.HasPairs() { + tmpMap := make(map[string]string) + err := json.Unmarshal(n.Pairs, &tmpMap) + if err != nil { + glog.V(0).Infoln("Unmarshal pairs error:", err) } - u.RawQuery = q.Encode() - - pairMap := make(map[string]string) - if n.HasPairs() { - tmpMap := make(map[string]string) - err := json.Unmarshal(n.Pairs, &tmpMap) - if err != nil { - glog.V(0).Infoln("Unmarshal pairs error:", err) - } - for k, v := range tmpMap { - pairMap[needle.PairNamePrefix+k] = v - } + for k, v := range tmpMap { + pairMap[needle.PairNamePrefix+k] = v } - - _, err := operation.Upload(u.String(), - string(n.Name), bytes.NewReader(n.Data), n.IsGzipped(), string(n.Mime), - pairMap, jwt) - return err - }); err != nil { - size = 0 - err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err) } + + // volume server do not know about encryption + _, err := operation.UploadData(u.String(), string(n.Name), false, n.Data, n.IsGzipped(), string(n.Mime), pairMap, jwt) + return err + }); err != nil { + size = 0 + err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err) + glog.V(0).Infoln(err) } } return @@ -84,31 +85,34 @@ func ReplicatedWrite(masterNode string, s *storage.Store, func ReplicatedDelete(masterNode string, store *storage.Store, volumeId needle.VolumeId, n *needle.Needle, - r *http.Request) (uint32, error) { + r *http.Request) (size uint32, err error) { //check JWT jwt := security.GetJwt(r) - ret, err := store.DeleteVolumeNeedle(volumeId, n) + var remoteLocations []operation.Location + if r.FormValue("type") != "replicate" { + remoteLocations, err = getWritableRemoteReplications(store, volumeId, masterNode) + if err != nil { + glog.V(0).Infoln(err) + return + } + } + + size, err = store.DeleteVolumeNeedle(volumeId, n) if err != nil { glog.V(0).Infoln("delete error:", err) - return ret, err + return } - needToReplicate := !store.HasVolume(volumeId) - if !needToReplicate && ret > 0 { - needToReplicate = store.GetVolume(volumeId).NeedToReplicate() - } - if needToReplicate { //send to other replica locations - if r.FormValue("type") != "replicate" { - if err = distributedOperation(masterNode, store, volumeId, func(location operation.Location) error { - return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt)) - }); err != nil { - ret = 0 - } + if len(remoteLocations) > 0 { //send to other replica locations + if err = distributedOperation(remoteLocations, store, func(location operation.Location) error { + return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt)) + }); err != nil { + size = 0 } } - return ret, err + return } type DistributedOperationResult map[string]error @@ -131,32 +135,44 @@ type RemoteResult struct { Error error } -func distributedOperation(masterNode string, store *storage.Store, volumeId needle.VolumeId, op func(location operation.Location) error) error { - if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil { - length := 0 - selfUrl := (store.Ip + ":" + strconv.Itoa(store.Port)) - results := make(chan RemoteResult) - for _, location := range lookupResult.Locations { - if location.Url != selfUrl { - length++ - go func(location operation.Location, results chan RemoteResult) { - results <- RemoteResult{location.Url, op(location)} - }(location, results) +func distributedOperation(locations []operation.Location, store *storage.Store, op func(location operation.Location) error) error { + length := len(locations) + results := make(chan RemoteResult) + for _, location := range locations { + go func(location operation.Location, results chan RemoteResult) { + results <- RemoteResult{location.Url, op(location)} + }(location, results) + } + ret := DistributedOperationResult(make(map[string]error)) + for i := 0; i < length; i++ { + result := <-results + ret[result.Host] = result.Error + } + + return ret.Error() +} + +func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterNode string) ( + remoteLocations []operation.Location, err error) { + copyCount := s.GetVolume(volumeId).ReplicaPlacement.GetCopyCount() + if copyCount > 1 { + if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil { + if len(lookupResult.Locations) < copyCount { + err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]", + len(lookupResult.Locations), volumeId, copyCount) + return } - } - ret := DistributedOperationResult(make(map[string]error)) - for i := 0; i < length; i++ { - result := <-results - ret[result.Host] = result.Error - } - if volume := store.GetVolume(volumeId); volume != nil { - if length+1 < volume.ReplicaPlacement.GetCopyCount() { - return fmt.Errorf("replicating opetations [%d] is less than volume's replication copy count [%d]", length+1, volume.ReplicaPlacement.GetCopyCount()) + selfUrl := s.Ip + ":" + strconv.Itoa(s.Port) + for _, location := range lookupResult.Locations { + if location.Url != selfUrl { + remoteLocations = append(remoteLocations, location) + } } + } else { + err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr) + return } - return ret.Error() - } else { - glog.V(0).Infoln() - return fmt.Errorf("Failed to lookup for %d: %v", volumeId, lookupErr) } + + return } diff --git a/weed/topology/topology.go b/weed/topology/topology.go index b7ebe8af5..fbf998707 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -7,11 +7,13 @@ import ( "sync" "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -58,7 +60,12 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls func (t *Topology) IsLeader() bool { if t.RaftServer != nil { - return t.RaftServer.State() == raft.Leader + if t.RaftServer.State() == raft.Leader { + return true + } + if t.RaftServer.Leader() == "" { + return true + } } return false } @@ -73,7 +80,7 @@ func (t *Topology) Leader() (string, error) { if l == "" { // We are a single node cluster, we are the leader - return t.RaftServer.Name(), errors.New("Raft Server not initialized!") + return t.RaftServer.Name(), nil } return l, nil @@ -129,7 +136,7 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil } -func (t *Topology) GetVolumeLayout(collectionName string, rp *storage.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { +func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { return t.collectionMap.Get(collectionName, func() interface{} { return NewCollection(collectionName, t.volumeSizeLimit) }).(*Collection).GetOrCreateVolumeLayout(rp, ttl) @@ -150,7 +157,7 @@ func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) t.ecShardMapLock.RUnlock() } - for k, _ := range mapOfCollections { + for k := range mapOfCollections { ret = append(ret, k) } return ret diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index 041351492..068bd401e 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -59,6 +59,7 @@ func (t *Topology) UnRegisterDataNode(dn *DataNode) { vl.SetVolumeUnavailable(dn, v.Id) } dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount()) + dn.UpAdjustRemoteVolumeCountDelta(-dn.GetRemoteVolumeCount()) dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount()) dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount()) if dn.Parent() != nil { diff --git a/weed/topology/topology_map.go b/weed/topology/topology_map.go index 0ad30f12e..73c55d77d 100644 --- a/weed/topology/topology_map.go +++ b/weed/topology/topology_map.go @@ -85,6 +85,7 @@ func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo { MaxVolumeCount: uint64(t.GetMaxVolumeCount()), FreeVolumeCount: uint64(t.FreeSpace()), ActiveVolumeCount: uint64(t.GetActiveVolumeCount()), + RemoteVolumeCount: uint64(t.GetRemoteVolumeCount()), } for _, c := range t.Children() { dc := c.(*DataCenter) diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go index 8f79ad684..e7676ccf7 100644 --- a/weed/topology/topology_test.go +++ b/weed/topology/topology_test.go @@ -5,6 +5,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "testing" ) @@ -94,7 +95,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { []*master_pb.VolumeShortInformationMessage{newVolumeShortMessage}, nil, dn) - rp, _ := storage.NewReplicaPlacementFromString("000") + rp, _ := super_block.NewReplicaPlacementFromString("000") layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL) assert(t, "writables after repeated add", len(layout.writables), volumeCount) @@ -154,7 +155,7 @@ func TestAddRemoveVolume(t *testing.T) { DeletedByteCount: 45, ReadOnly: false, Version: needle.CurrentVersion, - ReplicaPlacement: &storage.ReplicaPlacement{}, + ReplicaPlacement: &super_block.ReplicaPlacement{}, Ttl: needle.EMPTY_TTL, } diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index ff32f1874..ca626e973 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -13,8 +13,10 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) -func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) bool { - ch := make(chan bool, locationlist.Length()) +func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, + locationlist *VolumeLocationList, garbageThreshold float64) (*VolumeLocationList, bool) { + ch := make(chan int, locationlist.Length()) + errCount := int32(0) for index, dn := range locationlist.list { go func(index int, url string, vid needle.VolumeId) { err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { @@ -22,11 +24,15 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi VolumeId: uint32(vid), }) if err != nil { - ch <- false + atomic.AddInt32(&errCount, 1) + ch <- -1 return err } - isNeeded := resp.GarbageRatio > garbageThreshold - ch <- isNeeded + if resp.GarbageRatio >= garbageThreshold { + ch <- index + } else { + ch <- -1 + } return nil }) if err != nil { @@ -34,18 +40,21 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi } }(index, dn.Url(), vid) } - isCheckSuccess := true + vacuumLocationList := NewVolumeLocationList() for range locationlist.list { select { - case canVacuum := <-ch: - isCheckSuccess = isCheckSuccess && canVacuum + case index := <-ch: + if index != -1 { + vacuumLocationList.list = append(vacuumLocationList.list, locationlist.list[index]) + } case <-time.After(30 * time.Minute): - return false + return vacuumLocationList, false } } - return isCheckSuccess + return vacuumLocationList, errCount == 0 && len(vacuumLocationList.list) > 0 } -func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool { +func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, + locationlist *VolumeLocationList, preallocate int64) bool { vl.accessLock.Lock() vl.removeFromWritable(vid) vl.accessLock.Unlock() @@ -163,11 +172,12 @@ func vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeL } glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) - if batchVacuumVolumeCheck(grpcDialOption, volumeLayout, vid, locationList, garbageThreshold) { - if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, locationList, preallocate) { - batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, locationList) + if vacuumLocationList, needVacuum := batchVacuumVolumeCheck( + grpcDialOption, volumeLayout, vid, locationList, garbageThreshold); needVacuum { + if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) { + batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, vacuumLocationList) } else { - batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, locationList) + batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, vacuumLocationList) } } } diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 636eb2260..446c88f60 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -6,6 +6,9 @@ import ( "sync" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" @@ -22,7 +25,7 @@ This package is created to resolve these replica placement issues: type VolumeGrowOption struct { Collection string - ReplicaPlacement *storage.ReplicaPlacement + ReplicaPlacement *super_block.ReplicaPlacement Ttl *needle.TTL Prealloacte int64 DataCenter string @@ -46,15 +49,20 @@ func NewDefaultVolumeGrowth() *VolumeGrowth { // one replication type may need rp.GetCopyCount() actual volumes // given copyCount, how many logical volumes to create func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) { + v := util.GetViper() + v.SetDefault("master.volume_growth.copy_1", 7) + v.SetDefault("master.volume_growth.copy_2", 6) + v.SetDefault("master.volume_growth.copy_3", 3) + v.SetDefault("master.volume_growth.copy_other", 1) switch copyCount { case 1: - count = 7 + count = v.GetInt("master.volume_growth.copy_1") case 2: - count = 6 + count = v.GetInt("master.volume_growth.copy_2") case 3: - count = 3 + count = v.GetInt("master.volume_growth.copy_3") default: - count = 1 + count = v.GetInt("master.volume_growth.copy_other") } return } @@ -104,7 +112,7 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *VolumeGrowOption) (servers []*DataNode, err error) { //find main datacenter and other data centers rp := option.ReplicaPlacement - mainDataCenter, otherDataCenters, dc_err := topo.RandomlyPickNodes(rp.DiffDataCenterCount+1, func(node Node) error { + mainDataCenter, otherDataCenters, dc_err := topo.PickNodesByWeight(rp.DiffDataCenterCount+1, func(node Node) error { if option.DataCenter != "" && node.IsDataCenter() && node.Id() != NodeId(option.DataCenter) { return fmt.Errorf("Not matching preferred data center:%s", option.DataCenter) } @@ -136,7 +144,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum } //find main rack and other racks - mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).RandomlyPickNodes(rp.DiffRackCount+1, func(node Node) error { + mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).PickNodesByWeight(rp.DiffRackCount+1, func(node Node) error { if option.Rack != "" && node.IsRack() && node.Id() != NodeId(option.Rack) { return fmt.Errorf("Not matching preferred rack:%s", option.Rack) } @@ -163,7 +171,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum } //find main rack and other racks - mainServer, otherServers, serverErr := mainRack.(*Rack).RandomlyPickNodes(rp.SameRackCount+1, func(node Node) error { + mainServer, otherServers, serverErr := mainRack.(*Rack).PickNodesByWeight(rp.SameRackCount+1, func(node Node) error { if option.DataNode != "" && node.IsDataNode() && node.Id() != NodeId(option.DataNode) { return fmt.Errorf("Not matching preferred data node:%s", option.DataNode) } diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go index 3573365fd..6ff5be0eb 100644 --- a/weed/topology/volume_growth_test.go +++ b/weed/topology/volume_growth_test.go @@ -8,6 +8,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var topologyLayout = ` @@ -113,7 +114,7 @@ func setup(topologyLayout string) *Topology { func TestFindEmptySlotsForOneVolume(t *testing.T) { topo := setup(topologyLayout) vg := NewDefaultVolumeGrowth() - rp, _ := storage.NewReplicaPlacementFromString("002") + rp, _ := super_block.NewReplicaPlacementFromString("002") volumeGrowOption := &VolumeGrowOption{ Collection: "", ReplicaPlacement: rp, @@ -130,3 +131,212 @@ func TestFindEmptySlotsForOneVolume(t *testing.T) { fmt.Println("assigned node :", server.Id()) } } + +var topologyLayout2 = ` +{ + "dc1":{ + "rack1":{ + "server111":{ + "volumes":[ + {"id":1, "size":12312}, + {"id":2, "size":12312}, + {"id":3, "size":12312} + ], + "limit":300 + }, + "server112":{ + "volumes":[ + {"id":4, "size":12312}, + {"id":5, "size":12312}, + {"id":6, "size":12312} + ], + "limit":300 + }, + "server113":{ + "volumes":[], + "limit":300 + }, + "server114":{ + "volumes":[], + "limit":300 + }, + "server115":{ + "volumes":[], + "limit":300 + }, + "server116":{ + "volumes":[], + "limit":300 + } + }, + "rack2":{ + "server121":{ + "volumes":[ + {"id":4, "size":12312}, + {"id":5, "size":12312}, + {"id":6, "size":12312} + ], + "limit":300 + }, + "server122":{ + "volumes":[], + "limit":300 + }, + "server123":{ + "volumes":[ + {"id":2, "size":12312}, + {"id":3, "size":12312}, + {"id":4, "size":12312} + ], + "limit":300 + }, + "server124":{ + "volumes":[], + "limit":300 + }, + "server125":{ + "volumes":[], + "limit":300 + }, + "server126":{ + "volumes":[], + "limit":300 + } + }, + "rack3":{ + "server131":{ + "volumes":[], + "limit":300 + }, + "server132":{ + "volumes":[], + "limit":300 + }, + "server133":{ + "volumes":[], + "limit":300 + }, + "server134":{ + "volumes":[], + "limit":300 + }, + "server135":{ + "volumes":[], + "limit":300 + }, + "server136":{ + "volumes":[], + "limit":300 + } + } + } +} +` + +func TestReplication011(t *testing.T) { + topo := setup(topologyLayout2) + vg := NewDefaultVolumeGrowth() + rp, _ := super_block.NewReplicaPlacementFromString("011") + volumeGrowOption := &VolumeGrowOption{ + Collection: "MAIL", + ReplicaPlacement: rp, + DataCenter: "dc1", + Rack: "", + DataNode: "", + } + servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption) + if err != nil { + fmt.Println("finding empty slots error :", err) + t.Fail() + } + for _, server := range servers { + fmt.Println("assigned node :", server.Id()) + } +} + +var topologyLayout3 = ` +{ + "dc1":{ + "rack1":{ + "server111":{ + "volumes":[], + "limit":2000 + } + } + }, + "dc2":{ + "rack2":{ + "server222":{ + "volumes":[], + "limit":2000 + } + } + }, + "dc3":{ + "rack3":{ + "server333":{ + "volumes":[], + "limit":1000 + } + } + }, + "dc4":{ + "rack4":{ + "server444":{ + "volumes":[], + "limit":1000 + } + } + }, + "dc5":{ + "rack5":{ + "server555":{ + "volumes":[], + "limit":500 + } + } + }, + "dc6":{ + "rack6":{ + "server666":{ + "volumes":[], + "limit":500 + } + } + } +} +` + +func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) { + topo := setup(topologyLayout3) + vg := NewDefaultVolumeGrowth() + rp, _ := super_block.NewReplicaPlacementFromString("100") + volumeGrowOption := &VolumeGrowOption{ + Collection: "Weight", + ReplicaPlacement: rp, + DataCenter: "", + Rack: "", + DataNode: "", + } + + distribution := map[NodeId]int{} + // assign 1000 volumes + for i := 0; i < 1000; i++ { + servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption) + if err != nil { + fmt.Println("finding empty slots error :", err) + t.Fail() + } + for _, server := range servers { + // fmt.Println("assigned node :", server.Id()) + if _, ok := distribution[server.id]; !ok { + distribution[server.id] = 0 + } + distribution[server.id] += 1 + } + } + + for k, v := range distribution { + fmt.Printf("%s : %d\n", k, v) + } +} diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 799cbca62..7633b28be 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -10,11 +10,12 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) // mapping from volume to its locations, inverted from server to volume type VolumeLayout struct { - rp *storage.ReplicaPlacement + rp *super_block.ReplicaPlacement ttl *needle.TTL vid2location map[needle.VolumeId]*VolumeLocationList writables []needle.VolumeId // transient array of writable volume id @@ -30,7 +31,7 @@ type VolumeLayoutStats struct { FileCount uint64 } -func NewVolumeLayout(rp *storage.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64) *VolumeLayout { +func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64) *VolumeLayout { return &VolumeLayout{ rp: rp, ttl: ttl, diff --git a/weed/util/bytes.go b/weed/util/bytes.go index dfa4ae665..9c7e5e2cb 100644 --- a/weed/util/bytes.go +++ b/weed/util/bytes.go @@ -1,5 +1,10 @@ package util +import ( + "crypto/md5" + "io" +) + // big endian func BytesToUint64(b []byte) (v uint64) { @@ -43,3 +48,29 @@ func Uint16toBytes(b []byte, v uint16) { func Uint8toBytes(b []byte, v uint8) { b[0] = byte(v) } + +// returns a 64 bit big int +func HashStringToLong(dir string) (v int64) { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + v += int64(b[0]) + v <<= 8 + v += int64(b[1]) + v <<= 8 + v += int64(b[2]) + v <<= 8 + v += int64(b[3]) + v <<= 8 + v += int64(b[4]) + v <<= 8 + v += int64(b[5]) + v <<= 8 + v += int64(b[6]) + v <<= 8 + v += int64(b[7]) + + return +} diff --git a/weed/util/cipher.go b/weed/util/cipher.go new file mode 100644 index 000000000..7bcb6559a --- /dev/null +++ b/weed/util/cipher.go @@ -0,0 +1,81 @@ +package util + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + "fmt" + "io" + "io/ioutil" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +type CipherKey []byte + +func GenCipherKey() CipherKey { + key := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + glog.Fatalf("random key gen: %v", err) + } + return CipherKey(key) +} + +func Encrypt(plaintext []byte, key CipherKey) ([]byte, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err = io.ReadFull(rand.Reader, nonce); err != nil { + return nil, err + } + + return gcm.Seal(nonce, nonce, plaintext, nil), nil +} + +func Decrypt(ciphertext []byte, key CipherKey) ([]byte, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + nonceSize := gcm.NonceSize() + if len(ciphertext) < nonceSize { + return nil, errors.New("ciphertext too short") + } + + nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:] + return gcm.Open(nil, nonce, ciphertext, nil) +} + +func EncryptReader(clearReader io.Reader) (cipherKey CipherKey, encryptedReader io.ReadCloser, clearDataLen, encryptedDataLen int, err error) { + clearData, err := ioutil.ReadAll(clearReader) + if err != nil { + err = fmt.Errorf("read raw input: %v", err) + return + } + clearDataLen = len(clearData) + cipherKey = GenCipherKey() + encryptedData, err := Encrypt(clearData, cipherKey) + if err != nil { + err = fmt.Errorf("encrypt input: %v", err) + return + } + encryptedDataLen = len(encryptedData) + encryptedReader = ioutil.NopCloser(bytes.NewReader(encryptedData)) + return +} diff --git a/weed/util/compression.go b/weed/util/compression.go index c6c9423e2..6072df632 100644 --- a/weed/util/compression.go +++ b/weed/util/compression.go @@ -60,7 +60,7 @@ func UnGzipData(input []byte) ([]byte, error) { // images switch ext { - case ".svg", ".bmp": + case ".svg", ".bmp", ".wav": return true, true } if strings.HasPrefix(mtype, "image/") { @@ -87,6 +87,14 @@ func UnGzipData(input []byte) ([]byte, error) { if strings.HasSuffix(mtype, "script") { return true, true } + + } + + if strings.HasPrefix(mtype, "audio/") { + switch strings.TrimPrefix(mtype, "audio/") { + case "wave", "wav", "x-wav", "x-pn-wav": + return true, true + } } return false, false diff --git a/weed/util/config.go b/weed/util/config.go index 84f146bc8..dfbfdbd82 100644 --- a/weed/util/config.go +++ b/weed/util/config.go @@ -1,17 +1,19 @@ package util import ( - "github.com/chrislusf/seaweedfs/weed/glog" + "strings" + "github.com/spf13/viper" + + "github.com/chrislusf/seaweedfs/weed/glog" ) type Configuration interface { GetString(key string) string GetBool(key string) bool GetInt(key string) int - GetInt64(key string) int64 - GetFloat64(key string) float64 GetStringSlice(key string) []string + SetDefault(key string, value interface{}) } func LoadConfiguration(configFileName string, required bool) (loaded bool) { @@ -28,10 +30,7 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) if required { glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ - "\n\nPlease follow this example and add a filer.toml file to "+ - "current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n"+ - " https://github.com/chrislusf/seaweedfs/blob/master/weed/%s.toml\n"+ - "\nOr use this command to generate the default toml file\n"+ + "\n\nPlease use this command to generate the default %s.toml file\n"+ " weed scaffold -config=%s -output=.\n\n\n", configFileName, configFileName, configFileName) } else { @@ -41,3 +40,11 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { return true } + +func GetViper() *viper.Viper { + v := viper.GetViper() + v.AutomaticEnv() + v.SetEnvPrefix("weed") + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + return v +} diff --git a/weed/util/constants.go b/weed/util/constants.go index f0df5fd59..c23bc11f6 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 45) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 61) ) diff --git a/weed/util/file_util.go b/weed/util/file_util.go index 78add6724..bef9f7cd6 100644 --- a/weed/util/file_util.go +++ b/weed/util/file_util.go @@ -3,6 +3,7 @@ package util import ( "errors" "os" + "time" "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -40,3 +41,21 @@ func FileExists(filename string) bool { return true } + +func CheckFile(filename string) (exists, canRead, canWrite bool, modTime time.Time, fileSize int64) { + exists = true + fi, err := os.Stat(filename) + if os.IsNotExist(err) { + exists = false + return + } + if fi.Mode()&0400 != 0 { + canRead = true + } + if fi.Mode()&0200 != 0 { + canWrite = true + } + modTime = fi.ModTime() + fileSize = fi.Size() + return +} diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 79a442a56..750516b92 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -11,6 +11,8 @@ import ( "net/http" "net/url" "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" ) var ( @@ -28,18 +30,18 @@ func init() { } func PostBytes(url string, body []byte) ([]byte, error) { - r, err := client.Post(url, "application/octet-stream", bytes.NewReader(body)) + r, err := client.Post(url, "", bytes.NewReader(body)) if err != nil { return nil, fmt.Errorf("Post to %s: %v", url, err) } defer r.Body.Close() - if r.StatusCode >= 400 { - return nil, fmt.Errorf("%s: %s", url, r.Status) - } b, err := ioutil.ReadAll(r.Body) if err != nil { return nil, fmt.Errorf("Read response body: %v", err) } + if r.StatusCode >= 400 { + return nil, fmt.Errorf("%s: %s", url, r.Status) + } return b, nil } @@ -86,7 +88,7 @@ func Head(url string) (http.Header, error) { if err != nil { return nil, err } - defer r.Body.Close() + defer CloseResponse(r) if r.StatusCode >= 400 { return nil, fmt.Errorf("%s: %s", url, r.Status) } @@ -128,7 +130,7 @@ func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachB if err != nil { return err } - defer r.Body.Close() + defer CloseResponse(r) if r.StatusCode != 200 { return fmt.Errorf("%s: %s", url, r.Status) } @@ -151,7 +153,7 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e if err != nil { return err } - defer r.Body.Close() + defer CloseResponse(r) if r.StatusCode != 200 { return fmt.Errorf("%s: %s", url, r.Status) } @@ -187,11 +189,22 @@ func NormalizeUrl(url string) string { return "http://" + url } -func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange bool) (n int64, e error) { +func ReadUrl(fileUrl string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) { + + if cipherKey != nil { + var n int + err := readEncryptedUrl(fileUrl, cipherKey, isGzipped, offset, size, func(data []byte) { + n = copy(buf, data) + }) + return int64(n), err + } - req, _ := http.NewRequest("GET", fileUrl, nil) - if isReadRange { - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size))) + req, err := http.NewRequest("GET", fileUrl, nil) + if err != nil { + return 0, err + } + if !isFullChunk { + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) } else { req.Header.Set("Accept-Encoding", "gzip") } @@ -207,7 +220,8 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo } var reader io.ReadCloser - switch r.Header.Get("Content-Encoding") { + contentEncoding := r.Header.Get("Content-Encoding") + switch contentEncoding { case "gzip": reader, err = gzip.NewReader(r.Body) defer reader.Close() @@ -215,55 +229,121 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo reader = r.Body } - var i, m int + var ( + i, m int + n int64 + ) + // refers to https://github.com/golang/go/blob/master/src/bytes/buffer.go#L199 + // commit id c170b14c2c1cfb2fd853a37add92a82fd6eb4318 for { m, err = reader.Read(buf[i:]) - if m == 0 { - return - } i += m n += int64(m) if err == io.EOF { return n, nil } - if e != nil { - return n, e + if err != nil { + return n, err + } + if n == int64(len(buf)) { + break } } - + // drains the response body to avoid memory leak + data, _ := ioutil.ReadAll(reader) + if len(data) != 0 { + glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) + } + return n, err } -func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte)) (n int64, e error) { +func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error { + + if cipherKey != nil { + return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, offset, size, fn) + } + + req, err := http.NewRequest("GET", fileUrl, nil) + if err != nil { + return err + } - req, _ := http.NewRequest("GET", fileUrl, nil) - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size))) + if !isFullChunk { + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) + } r, err := client.Do(req) if err != nil { - return 0, err + return err } - defer r.Body.Close() + defer CloseResponse(r) if r.StatusCode >= 400 { - return 0, fmt.Errorf("%s: %s", fileUrl, r.Status) + return fmt.Errorf("%s: %s", fileUrl, r.Status) } - var m int + var ( + m int + ) buf := make([]byte, 64*1024) for { m, err = r.Body.Read(buf) - if m == 0 { - return - } fn(buf[:m]) - n += int64(m) if err == io.EOF { - return n, nil + return nil + } + if err != nil { + return err } - if e != nil { - return n, e + } + +} + +func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentGzipped bool, offset int64, size int, fn func(data []byte)) error { + encryptedData, err := Get(fileUrl) + if err != nil { + return fmt.Errorf("fetch %s: %v", fileUrl, err) + } + decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey)) + if err != nil { + return fmt.Errorf("decrypt %s: %v", fileUrl, err) + } + if isContentGzipped { + decryptedData, err = UnGzipData(decryptedData) + if err != nil { + return fmt.Errorf("unzip decrypt %s: %v", fileUrl, err) } } + if len(decryptedData) < int(offset)+size { + return fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size) + } + fn(decryptedData[int(offset) : int(offset)+size]) + return nil +} + +func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) { + + req, err := http.NewRequest("GET", fileUrl, nil) + if err != nil { + return nil, err + } + if rangeHeader != "" { + req.Header.Add("Range", rangeHeader) + } + + r, err := client.Do(req) + if err != nil { + return nil, err + } + if r.StatusCode >= 400 { + return nil, fmt.Errorf("%s: %s", fileUrl, r.Status) + } + + return r.Body, nil +} +func CloseResponse(resp *http.Response) { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() } diff --git a/weed/util/httpdown/http_down.go b/weed/util/httpdown/http_down.go new file mode 100644 index 000000000..5cbd9611c --- /dev/null +++ b/weed/util/httpdown/http_down.go @@ -0,0 +1,395 @@ +// Package httpdown provides http.ConnState enabled graceful termination of +// http.Server. +// based on github.com/facebookarchive/httpdown, who's licence is MIT-licence, +// we add a feature of supporting for http TLS +package httpdown + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/facebookgo/clock" + "github.com/facebookgo/stats" +) + +const ( + defaultStopTimeout = time.Minute + defaultKillTimeout = time.Minute +) + +// A Server allows encapsulates the process of accepting new connections and +// serving them, and gracefully shutting down the listener without dropping +// active connections. +type Server interface { + // Wait waits for the serving loop to finish. This will happen when Stop is + // called, at which point it returns no error, or if there is an error in the + // serving loop. You must call Wait after calling Serve or ListenAndServe. + Wait() error + + // Stop stops the listener. It will block until all connections have been + // closed. + Stop() error +} + +// HTTP defines the configuration for serving a http.Server. Multiple calls to +// Serve or ListenAndServe can be made on the same HTTP instance. The default +// timeouts of 1 minute each result in a maximum of 2 minutes before a Stop() +// returns. +type HTTP struct { + // StopTimeout is the duration before we begin force closing connections. + // Defaults to 1 minute. + StopTimeout time.Duration + + // KillTimeout is the duration before which we completely give up and abort + // even though we still have connected clients. This is useful when a large + // number of client connections exist and closing them can take a long time. + // Note, this is in addition to the StopTimeout. Defaults to 1 minute. + KillTimeout time.Duration + + // Stats is optional. If provided, it will be used to record various metrics. + Stats stats.Client + + // Clock allows for testing timing related functionality. Do not specify this + // in production code. + Clock clock.Clock + + // when set CertFile and KeyFile, the httpDown will start a http with TLS. + // Files containing a certificate and matching private key for the + // server must be provided if neither the Server's + // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated. + // If the certificate is signed by a certificate authority, the + // certFile should be the concatenation of the server's certificate, + // any intermediates, and the CA's certificate. + CertFile, KeyFile string +} + +// Serve provides the low-level API which is useful if you're creating your own +// net.Listener. +func (h HTTP) Serve(s *http.Server, l net.Listener) Server { + stopTimeout := h.StopTimeout + if stopTimeout == 0 { + stopTimeout = defaultStopTimeout + } + killTimeout := h.KillTimeout + if killTimeout == 0 { + killTimeout = defaultKillTimeout + } + klock := h.Clock + if klock == nil { + klock = clock.New() + } + + ss := &server{ + stopTimeout: stopTimeout, + killTimeout: killTimeout, + stats: h.Stats, + clock: klock, + oldConnState: s.ConnState, + listener: l, + server: s, + serveDone: make(chan struct{}), + serveErr: make(chan error, 1), + new: make(chan net.Conn), + active: make(chan net.Conn), + idle: make(chan net.Conn), + closed: make(chan net.Conn), + stop: make(chan chan struct{}), + kill: make(chan chan struct{}), + certFile: h.CertFile, + keyFile: h.KeyFile, + } + s.ConnState = ss.connState + go ss.manage() + go ss.serve() + return ss +} + +// ListenAndServe returns a Server for the given http.Server. It is equivalent +// to ListenAndServe from the standard library, but returns immediately. +// Requests will be accepted in a background goroutine. If the http.Server has +// a non-nil TLSConfig, a TLS enabled listener will be setup. +func (h HTTP) ListenAndServe(s *http.Server) (Server, error) { + addr := s.Addr + if addr == "" { + if s.TLSConfig == nil { + addr = ":http" + } else { + addr = ":https" + } + } + l, err := net.Listen("tcp", addr) + if err != nil { + stats.BumpSum(h.Stats, "listen.error", 1) + return nil, err + } + if s.TLSConfig != nil { + l = tls.NewListener(l, s.TLSConfig) + } + return h.Serve(s, l), nil +} + +// server manages the serving process and allows for gracefully stopping it. +type server struct { + stopTimeout time.Duration + killTimeout time.Duration + stats stats.Client + clock clock.Clock + + oldConnState func(net.Conn, http.ConnState) + server *http.Server + serveDone chan struct{} + serveErr chan error + listener net.Listener + + new chan net.Conn + active chan net.Conn + idle chan net.Conn + closed chan net.Conn + stop chan chan struct{} + kill chan chan struct{} + + stopOnce sync.Once + stopErr error + + certFile, keyFile string +} + +func (s *server) connState(c net.Conn, cs http.ConnState) { + if s.oldConnState != nil { + s.oldConnState(c, cs) + } + + switch cs { + case http.StateNew: + s.new <- c + case http.StateActive: + s.active <- c + case http.StateIdle: + s.idle <- c + case http.StateHijacked, http.StateClosed: + s.closed <- c + } +} + +func (s *server) manage() { + defer func() { + close(s.new) + close(s.active) + close(s.idle) + close(s.closed) + close(s.stop) + close(s.kill) + }() + + var stopDone chan struct{} + + conns := map[net.Conn]http.ConnState{} + var countNew, countActive, countIdle float64 + + // decConn decrements the count associated with the current state of the + // given connection. + decConn := func(c net.Conn) { + switch conns[c] { + default: + panic(fmt.Errorf("unknown existing connection: %s", c)) + case http.StateNew: + countNew-- + case http.StateActive: + countActive-- + case http.StateIdle: + countIdle-- + } + } + + // setup a ticker to report various values every minute. if we don't have a + // Stats implementation provided, we Stop it so it never ticks. + statsTicker := s.clock.Ticker(time.Minute) + if s.stats == nil { + statsTicker.Stop() + } + + for { + select { + case <-statsTicker.C: + // we'll only get here when s.stats is not nil + s.stats.BumpAvg("http-state.new", countNew) + s.stats.BumpAvg("http-state.active", countActive) + s.stats.BumpAvg("http-state.idle", countIdle) + s.stats.BumpAvg("http-state.total", countNew+countActive+countIdle) + case c := <-s.new: + conns[c] = http.StateNew + countNew++ + case c := <-s.active: + decConn(c) + countActive++ + + conns[c] = http.StateActive + case c := <-s.idle: + decConn(c) + countIdle++ + + conns[c] = http.StateIdle + + // if we're already stopping, close it + if stopDone != nil { + c.Close() + } + case c := <-s.closed: + stats.BumpSum(s.stats, "conn.closed", 1) + decConn(c) + delete(conns, c) + + // if we're waiting to stop and are all empty, we just closed the last + // connection and we're done. + if stopDone != nil && len(conns) == 0 { + close(stopDone) + return + } + case stopDone = <-s.stop: + // if we're already all empty, we're already done + if len(conns) == 0 { + close(stopDone) + return + } + + // close current idle connections right away + for c, cs := range conns { + if cs == http.StateIdle { + c.Close() + } + } + + // continue the loop and wait for all the ConnState updates which will + // eventually close(stopDone) and return from this goroutine. + + case killDone := <-s.kill: + // force close all connections + stats.BumpSum(s.stats, "kill.conn.count", float64(len(conns))) + for c := range conns { + c.Close() + } + + // don't block the kill. + close(killDone) + + // continue the loop and we wait for all the ConnState updates and will + // return from this goroutine when we're all done. otherwise we'll try to + // send those ConnState updates on closed channels. + + } + } +} + +func (s *server) serve() { + stats.BumpSum(s.stats, "serve", 1) + if s.certFile == "" && s.keyFile == "" { + s.serveErr <- s.server.Serve(s.listener) + } else { + s.serveErr <- s.server.ServeTLS(s.listener, s.certFile, s.keyFile) + } + close(s.serveDone) + close(s.serveErr) +} + +func (s *server) Wait() error { + if err := <-s.serveErr; !isUseOfClosedError(err) { + return err + } + return nil +} + +func (s *server) Stop() error { + s.stopOnce.Do(func() { + defer stats.BumpTime(s.stats, "stop.time").End() + stats.BumpSum(s.stats, "stop", 1) + + // first disable keep-alive for new connections + s.server.SetKeepAlivesEnabled(false) + + // then close the listener so new connections can't connect come thru + closeErr := s.listener.Close() + <-s.serveDone + + // then trigger the background goroutine to stop and wait for it + stopDone := make(chan struct{}) + s.stop <- stopDone + + // wait for stop + select { + case <-stopDone: + case <-s.clock.After(s.stopTimeout): + defer stats.BumpTime(s.stats, "kill.time").End() + stats.BumpSum(s.stats, "kill", 1) + + // stop timed out, wait for kill + killDone := make(chan struct{}) + s.kill <- killDone + select { + case <-killDone: + case <-s.clock.After(s.killTimeout): + // kill timed out, give up + stats.BumpSum(s.stats, "kill.timeout", 1) + } + } + + if closeErr != nil && !isUseOfClosedError(closeErr) { + stats.BumpSum(s.stats, "listener.close.error", 1) + s.stopErr = closeErr + } + }) + return s.stopErr +} + +func isUseOfClosedError(err error) bool { + if err == nil { + return false + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + return err.Error() == "use of closed network connection" +} + +// ListenAndServe is a convenience function to serve and wait for a SIGTERM +// or SIGINT before shutting down. +func ListenAndServe(s *http.Server, hd *HTTP) error { + if hd == nil { + hd = &HTTP{} + } + hs, err := hd.ListenAndServe(s) + if err != nil { + return err + } + + waiterr := make(chan error, 1) + go func() { + defer close(waiterr) + waiterr <- hs.Wait() + }() + + signals := make(chan os.Signal, 10) + signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT) + + select { + case err := <-waiterr: + if err != nil { + return err + } + case <-signals: + signal.Stop(signals) + if err := hs.Stop(); err != nil { + return err + } + if err := <-waiterr; err != nil { + return err + } + } + return nil +} diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go index b8068e67f..8acd50d42 100644 --- a/weed/util/net_timeout.go +++ b/weed/util/net_timeout.go @@ -66,11 +66,8 @@ func (c *Conn) Write(b []byte) (count int, e error) { } func (c *Conn) Close() error { - err := c.Conn.Close() - if err == nil { - stats.ConnectionClose() - } - return err + stats.ConnectionClose() + return c.Conn.Close() } func NewListener(addr string, timeout time.Duration) (net.Listener, error) { diff --git a/weed/util/queue.go b/weed/util/queue.go new file mode 100644 index 000000000..1e6211e0d --- /dev/null +++ b/weed/util/queue.go @@ -0,0 +1,61 @@ +package util + +import "sync" + +type node struct { + data interface{} + next *node +} + +type Queue struct { + head *node + tail *node + count int + sync.RWMutex +} + +func NewQueue() *Queue { + q := &Queue{} + return q +} + +func (q *Queue) Len() int { + q.RLock() + defer q.RUnlock() + return q.count +} + +func (q *Queue) Enqueue(item interface{}) { + q.Lock() + defer q.Unlock() + + n := &node{data: item} + + if q.tail == nil { + q.tail = n + q.head = n + } else { + q.tail.next = n + q.tail = n + } + q.count++ +} + +func (q *Queue) Dequeue() interface{} { + q.Lock() + defer q.Unlock() + + if q.head == nil { + return nil + } + + n := q.head + q.head = n.next + + if q.head == nil { + q.tail = nil + } + q.count-- + + return n.data +} diff --git a/weed/util/queue_unbounded.go b/weed/util/queue_unbounded.go new file mode 100644 index 000000000..664cd965e --- /dev/null +++ b/weed/util/queue_unbounded.go @@ -0,0 +1,45 @@ +package util + +import "sync" + +type UnboundedQueue struct { + outbound []string + outboundLock sync.RWMutex + inbound []string + inboundLock sync.RWMutex +} + +func NewUnboundedQueue() *UnboundedQueue { + q := &UnboundedQueue{} + return q +} + +func (q *UnboundedQueue) EnQueue(items ...string) { + q.inboundLock.Lock() + defer q.inboundLock.Unlock() + + q.outbound = append(q.outbound, items...) + +} + +func (q *UnboundedQueue) Consume(fn func([]string)) { + q.outboundLock.Lock() + defer q.outboundLock.Unlock() + + if len(q.outbound) == 0 { + q.inboundLock.Lock() + inbountLen := len(q.inbound) + if inbountLen > 0 { + t := q.outbound + q.outbound = q.inbound + q.inbound = t + } + q.inboundLock.Unlock() + } + + if len(q.outbound) > 0 { + fn(q.outbound) + q.outbound = q.outbound[:0] + } + +} diff --git a/weed/util/queue_unbounded_test.go b/weed/util/queue_unbounded_test.go new file mode 100644 index 000000000..2d02032cb --- /dev/null +++ b/weed/util/queue_unbounded_test.go @@ -0,0 +1,25 @@ +package util + +import "testing" + +func TestEnqueueAndConsume(t *testing.T) { + + q := NewUnboundedQueue() + + q.EnQueue("1", "2", "3") + + f := func(items []string) { + for _, t := range items { + println(t) + } + println("-----------------------") + } + q.Consume(f) + + q.Consume(f) + + q.EnQueue("4", "5") + q.EnQueue("6", "7") + q.Consume(f) + +} diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 6ba668ade..301f20615 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -2,19 +2,19 @@ package wdclient import ( "context" - "fmt" "math/rand" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" ) type MasterClient struct { - ctx context.Context name string + grpcPort uint32 currentMaster string masters []string grpcDialOption grpc.DialOption @@ -22,10 +22,10 @@ type MasterClient struct { vidMap } -func NewMasterClient(ctx context.Context, grpcDialOption grpc.DialOption, clientName string, masters []string) *MasterClient { +func NewMasterClient(grpcDialOption grpc.DialOption, clientName string, clientGrpcPort uint32, masters []string) *MasterClient { return &MasterClient{ - ctx: ctx, name: clientName, + grpcPort: clientGrpcPort, masters: masters, grpcDialOption: grpcDialOption, vidMap: newVidMap(), @@ -66,15 +66,15 @@ func (mc *MasterClient) tryAllMasters() { func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) { glog.V(1).Infof("%s Connecting to master %v", mc.name, master) - gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { + gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { - stream, err := client.KeepConnected(ctx) + stream, err := client.KeepConnected(context.Background()) if err != nil { glog.V(0).Infof("%s failed to keep connected to %s: %v", mc.name, master, err) return err } - if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.name}); err != nil { + if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.name, GrpcPort: mc.grpcPort}); err != nil { glog.V(0).Infof("%s failed to send to %s: %v", mc.name, master, err) return err } @@ -91,7 +91,7 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri // maybe the leader is changed if volumeLocation.Leader != "" { - glog.V(1).Infof("redirected to leader %v", volumeLocation.Leader) + glog.V(0).Infof("redirected to leader %v", volumeLocation.Leader) nextHintedLeader = volumeLocation.Leader return nil } @@ -118,22 +118,8 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri return } -func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(ctx context.Context, client master_pb.SeaweedClient) error) error { - - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) - if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) - } - - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { - client := master_pb.NewSeaweedClient(grpcConnection) - return fn(ctx, client) - }, masterGrpcAddress, grpcDialOption) - -} - -func (mc *MasterClient) WithClient(ctx context.Context, fn func(client master_pb.SeaweedClient) error) error { - return withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { +func (mc *MasterClient) WithClient(fn func(client master_pb.SeaweedClient) error) error { + return pb.WithMasterClient(mc.currentMaster, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { return fn(client) }) }
IdCollectionSizeFilesTrashRemoteKey
{{ .Id }}{{ .Collection }}{{ .Size }} Bytes{{ .FileCount }}{{ .DeleteCount }} / {{.DeletedByteCount}} Bytes{{ .RemoteStorageName }}{{ .RemoteStorageKey }}