Browse Source

Merge remote-tracking branch 'upstream/master'

pull/1568/head
shibinbin 5 years ago
parent
commit
40334bc28d
  1. 1
      .github/ISSUE_TEMPLATE/bug_report.md
  2. 37
      .github/workflows/go.yml
  3. 4
      .travis.yml
  4. 11
      Makefile
  5. 81
      README.md
  6. 1
      backers.md
  7. 29
      docker/Dockerfile.local
  8. 19
      docker/Makefile
  9. 4
      docker/README.md
  10. 25
      docker/entrypoint.sh
  11. 53
      docker/local-cluster-compose.yml
  12. 22
      docker/local-dev-compose.yml
  13. 2
      docker/seaweedfs-compose.yml
  14. 6
      docker/seaweedfs-dev-compose.yml
  15. 43
      go.mod
  16. 258
      go.sum
  17. 23
      k8s/README.md
  18. 22
      k8s/seaweedfs/.helmignore
  19. 4
      k8s/seaweedfs/Chart.yaml
  20. 114
      k8s/seaweedfs/templates/_helpers.tpl
  21. 14
      k8s/seaweedfs/templates/ca-cert.yaml
  22. 8
      k8s/seaweedfs/templates/cert-clusterissuer.yaml
  23. 33
      k8s/seaweedfs/templates/client-cert.yaml
  24. 33
      k8s/seaweedfs/templates/filer-cert.yaml
  25. 22
      k8s/seaweedfs/templates/filer-service.yaml
  26. 207
      k8s/seaweedfs/templates/filer-statefulset.yaml
  27. 59
      k8s/seaweedfs/templates/ingress.yaml
  28. 33
      k8s/seaweedfs/templates/master-cert.yaml
  29. 24
      k8s/seaweedfs/templates/master-service.yaml
  30. 199
      k8s/seaweedfs/templates/master-statefulset.yaml
  31. 158
      k8s/seaweedfs/templates/s3-deployment.yaml
  32. 17
      k8s/seaweedfs/templates/s3-service.yaml
  33. 1352
      k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml
  34. 14
      k8s/seaweedfs/templates/secret-seaweedfs-db.yaml
  35. 52
      k8s/seaweedfs/templates/security-configmap.yaml
  36. 29
      k8s/seaweedfs/templates/service-account.yaml
  37. 33
      k8s/seaweedfs/templates/volume-cert.yaml
  38. 22
      k8s/seaweedfs/templates/volume-service.yaml
  39. 187
      k8s/seaweedfs/templates/volume-statefulset.yaml
  40. 309
      k8s/seaweedfs/values.yaml
  41. 15
      other/java/client/pom.xml
  42. 139
      other/java/client/pom_debug.xml
  43. 27
      other/java/client/src/main/java/seaweedfs/client/ChunkCache.java
  44. 9
      other/java/client/src/main/java/seaweedfs/client/FilerClient.java
  45. 33
      other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java
  46. 37
      other/java/client/src/main/java/seaweedfs/client/Gzip.java
  47. 55
      other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java
  48. 88
      other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
  49. 43
      other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
  50. 69
      other/java/client/src/main/proto/filer.proto
  51. 42
      other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java
  52. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  53. 2
      other/java/hdfs2/pom.xml
  54. 12
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  55. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  56. 2
      other/java/hdfs3/pom.xml
  57. 12
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  58. 49
      snap/README.md
  59. 53
      snap/snapcraft.yaml
  60. 42
      unmaintained/check_disk_size/check_disk_size.go
  61. 4
      unmaintained/compact_leveldb/compact_leveldb.go
  62. 46
      unmaintained/repeated_vacuum/repeated_vacuum.go
  63. 4
      unmaintained/see_dat/see_dat.go
  64. 3
      unmaintained/see_idx/see_idx.go
  65. 75
      unmaintained/see_log_entry/see_log_entry.go
  66. 6
      unmaintained/see_meta/see_meta.go
  67. 3
      unmaintained/volume_tailer/volume_tailer.go
  68. 2
      weed/command/backup.go
  69. 60
      weed/command/benchmark.go
  70. 16
      weed/command/command.go
  71. 2
      weed/command/compact.go
  72. 1
      weed/command/download.go
  73. 2
      weed/command/export.go
  74. 24
      weed/command/filer.go
  75. 160
      weed/command/filer_copy.go
  76. 1
      weed/command/filer_replication.go
  77. 1
      weed/command/fix.go
  78. 16
      weed/command/master.go
  79. 38
      weed/command/mount.go
  80. 4
      weed/command/mount_linux.go
  81. 127
      weed/command/mount_std.go
  82. 114
      weed/command/msg_broker.go
  83. 103
      weed/command/s3.go
  84. 54
      weed/command/scaffold.go
  85. 29
      weed/command/server.go
  86. 13
      weed/command/shell.go
  87. 10
      weed/command/upload.go
  88. 2
      weed/command/version.go
  89. 19
      weed/command/volume.go
  90. 65
      weed/command/watch.go
  91. 56
      weed/command/webdav.go
  92. 17
      weed/filer2/abstract_sql/abstract_sql_store.go
  93. 23
      weed/filer2/cassandra/cassandra_store.go
  94. 12
      weed/filer2/entry.go
  95. 6
      weed/filer2/entry_codec.go
  96. 22
      weed/filer2/etcd/etcd_store.go
  97. 78
      weed/filer2/filechunks.go
  98. 2
      weed/filer2/filechunks_test.go
  99. 88
      weed/filer2/filer.go
  100. 121
      weed/filer2/filer_buckets.go

1
.github/ISSUE_TEMPLATE/bug_report.md

@ -8,6 +8,7 @@ assignees: ''
---
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs
example of a good issue report:
https://github.com/chrislusf/seaweedfs/issues/1005

37
.github/workflows/go.yml

@ -0,0 +1,37 @@
name: Go
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
cd weed; go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Build
run: cd weed; go build -v .
- name: Test
run: cd weed; go test -v .

4
.travis.yml

@ -1,9 +1,9 @@
sudo: false
language: go
go:
- 1.11.x
- 1.12.x
- 1.13.x
- 1.14.x
before_install:
- export PATH=/home/travis/gopath/bin:$PATH
@ -45,4 +45,4 @@ deploy:
on:
tags: true
repo: chrislusf/seaweedfs
go: 1.13.x
go: 1.14.x

11
Makefile

@ -8,11 +8,14 @@ appname := weed
sources := $(wildcard *.go)
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR)
COMMIT ?= $(shell git rev-parse --short HEAD)
LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT}
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR)
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3)
zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3)
@ -31,11 +34,11 @@ deps:
rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
build: deps
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR)
linux: deps
mkdir -p linux
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR)
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build

81
README.md

@ -50,6 +50,7 @@ Your support will be really appreciated by me and other supporters!
- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest)
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS)
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki)
- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
@ -89,7 +90,7 @@ There is only 40 bytes of disk storage overhead for each file's metadata. It is
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc.
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc.
[Back to TOC](#table-of-contents)
@ -98,9 +99,10 @@ On top of the object store, optional [Filer] can support directories and POSIX a
* Automatic master servers failover - no single point of failure (SPOF).
* Automatic Gzip compression depending on file mime type.
* Automatic compaction to reclaim disk space after deletion or update.
* Servers in the same cluster can have different disk spaces, file systems, OS etc.
* [Automatic entry TTL expiration][VolumeServerTTL].
* Any server with some disk spaces can add to the total storage space.
* Adding/Removing servers does **not** cause any data re-balancing.
* Optionally fix the orientation for jpeg pictures.
* Optional picture resizing.
* Support ETag, Accept-Range, Last-Modified, etc.
* Support in-memory/leveldb/readonly mode tuning for memory/performance balance.
* Support rebalancing the writable and readonly volumes.
@ -116,6 +118,8 @@ On top of the object store, optional [Filer] can support directories and POSIX a
* [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs.
* [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices.
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
* [File TTL][FilerTTL] automatically purge file metadata and actual file data.
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount
@ -125,6 +129,9 @@ On top of the object store, optional [Filer] can support directories and POSIX a
[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV
[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage
[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier
[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption
[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores
[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live
[Back to TOC](#table-of-contents)
@ -354,7 +361,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized.
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized.
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files |
@ -363,6 +370,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
| SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes |
| GlusterFS | hashing | | FUSE, NFS | | |
| Ceph | hashing + rules | | FUSE | Yes | |
| MooseFS | in memory | | FUSE | | No |
[Back to TOC](#table-of-contents)
@ -374,6 +382,14 @@ GlusterFS hashes the path and filename into ids, and assigned to virtual volumes
[Back to TOC](#table-of-contents)
### Compared to MooseFS ###
MooseFS chooses to neglect small file issue. From moosefs 3.0 manual, "even a small file will occupy 64KiB plus additionally 4KiB of checksums and 1KiB for the header", because it "was initially designed for keeping large amounts (like several thousands) of very big files"
MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode.
[Back to TOC](#table-of-contents)
### Compared to Ceph ###
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120)
@ -386,7 +402,7 @@ Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS pl
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. There are proven, scalable, and easier to manage.
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. These stores are proven, scalable, and easier to manage.
| SeaweedFS | comparable to Ceph | advantage |
| ------------- | ------------- | ---------------- |
@ -434,6 +450,12 @@ go get github.com/chrislusf/seaweedfs/weed
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
Note:
* If you got into this problem, try to `rm -Rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace` and build again.
```
panic: /debug/requests is already registered. You may have two independent copies of golang.org/x/net/trace in your binary, trying to maintain separate state. This may involve a vendored copy of golang.org/x/net/trace.
```
Step 4: after you modify your code locally, you could start a local build by calling `go install` under
```
@ -461,50 +483,49 @@ My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CP
Write 1 million 1KB file:
```
Concurrency Level: 16
Time taken for tests: 88.796 seconds
Time taken for tests: 66.753 seconds
Complete requests: 1048576
Failed requests: 0
Total transferred: 1106764659 bytes
Requests per second: 11808.87 [#/sec]
Transfer rate: 12172.05 [Kbytes/sec]
Total transferred: 1106789009 bytes
Requests per second: 15708.23 [#/sec]
Transfer rate: 16191.69 [Kbytes/sec]
Connection Times (ms)
min avg max std
Total: 0.2 1.3 44.8 0.9
Total: 0.3 1.0 84.3 0.9
Percentage of the requests served within a certain time (ms)
50% 1.1 ms
66% 1.3 ms
75% 1.5 ms
80% 1.7 ms
90% 2.1 ms
95% 2.6 ms
98% 3.7 ms
99% 4.6 ms
100% 44.8 ms
50% 0.8 ms
66% 1.0 ms
75% 1.1 ms
80% 1.2 ms
90% 1.4 ms
95% 1.7 ms
98% 2.1 ms
99% 2.6 ms
100% 84.3 ms
```
Randomly read 1 million files:
```
Concurrency Level: 16
Time taken for tests: 34.263 seconds
Time taken for tests: 22.301 seconds
Complete requests: 1048576
Failed requests: 0
Total transferred: 1106762945 bytes
Requests per second: 30603.34 [#/sec]
Transfer rate: 31544.49 [Kbytes/sec]
Total transferred: 1106812873 bytes
Requests per second: 47019.38 [#/sec]
Transfer rate: 48467.57 [Kbytes/sec]
Connection Times (ms)
min avg max std
Total: 0.0 0.5 20.7 0.7
Total: 0.0 0.3 54.1 0.2
Percentage of the requests served within a certain time (ms)
50% 0.4 ms
75% 0.5 ms
95% 0.6 ms
98% 0.8 ms
99% 1.2 ms
100% 20.7 ms
50% 0.3 ms
90% 0.4 ms
98% 0.6 ms
99% 0.7 ms
100% 54.1 ms
```
[Back to TOC](#table-of-contents)

1
backers.md

@ -6,6 +6,7 @@
- [4Sight Imaging](https://www.4sightimaging.com/)
- [Evercam Camera Management Software](https://evercam.io/)
- [Admiral](https://getadmiral.com)
<h2 align="center">Backers</h2>

29
docker/Dockerfile.local

@ -0,0 +1,29 @@
FROM alpine AS final
LABEL author="Chris Lu"
COPY ./weed /usr/bin/
RUN mkdir -p /etc/seaweedfs
COPY ./filer.toml /etc/seaweedfs/filer.toml
COPY ./entrypoint.sh /entrypoint.sh
# volume server grpc port
EXPOSE 18080
# volume server http port
EXPOSE 8080
# filer server grpc port
EXPOSE 18888
# filer server http port
EXPOSE 8888
# master server shared grpc port
EXPOSE 19333
# master server shared http port
EXPOSE 9333
# s3 server http port
EXPOSE 8333
RUN mkdir -p /data/filerldb2
VOLUME /data
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

19
docker/Makefile

@ -0,0 +1,19 @@
all: gen
.PHONY : gen
gen: dev
build:
cd ../weed; GOOS=linux go build; mv weed ../docker/
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
rm ./weed
dev: build
docker-compose -f local-dev-compose.yml -p seaweedfs up
cluster: build
docker-compose -f local-cluster-compose.yml -p seaweedfs up
clean:
rm ./weed

4
docker/README.md

@ -25,7 +25,5 @@ docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up
```bash
cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker
docker-compose -f local-dev-compose.yml -p seaweedfs up
make
```

25
docker/entrypoint.sh

@ -3,44 +3,33 @@
case "$1" in
'master')
ARGS="-mdir /data"
# Is this instance linked with an other master? (Docker commandline "--link master1:master")
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
fi
ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
exec /usr/bin/weed $@ $ARGS
;;
'volume')
ARGS="-ip `hostname -i` -dir /data"
# Is this instance linked with a master? (Docker commandline "--link master1:master")
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
ARGS="-dir=/data -max=0"
if [[ $@ == *"-max="* ]]; then
ARGS="-dir=/data"
fi
exec /usr/bin/weed $@ $ARGS
;;
'server')
ARGS="-ip `hostname -i` -dir /data"
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
if [[ $@ == *"-volume.max="* ]]; then
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
fi
exec /usr/bin/weed $@ $ARGS
;;
'filer')
ARGS=""
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
fi
exec /usr/bin/weed $@ $ARGS
;;
's3')
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
if [ -n "$FILER_PORT_8888_TCP_ADDR" ] ; then
ARGS="$ARGS -filer=$FILER_PORT_8888_TCP_ADDR:$FILER_PORT_8888_TCP_PORT"
fi
exec /usr/bin/weed $@ $ARGS
;;

53
docker/local-cluster-compose.yml

@ -0,0 +1,53 @@
version: '2'
services:
master0:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335"
master1:
image: chrislusf/seaweedfs:local
ports:
- 9334:9334
- 19334:19334
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335"
master2:
image: chrislusf/seaweedfs:local
ports:
- 9335:9335
- 19335:19335
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335"
volume:
image: chrislusf/seaweedfs:local
ports:
- 8080:8080
- 18080:18080
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume'
depends_on:
- master0
- master1
- master2
filer:
image: chrislusf/seaweedfs:local
ports:
- 8888:8888
- 18888:18888
command: 'filer -master="master0:9333,master1:9334,master2:9335"'
depends_on:
- master0
- master1
- master2
- volume
s3:
image: chrislusf/seaweedfs:local
ports:
- 8333:8333
command: 's3 -filer="filer:8888"'
depends_on:
- master0
- master1
- master2
- volume
- filer

22
docker/local-dev-compose.yml

@ -2,41 +2,33 @@ version: '2'
services:
master:
build:
context: .
dockerfile: Dockerfile.go_build
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master"
volume:
build:
context: .
dockerfile: Dockerfile.go_build
image: chrislusf/seaweedfs:local
ports:
- 8080:8080
- 18080:18080
command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume'
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
depends_on:
- master
filer:
build:
context: .
dockerfile: Dockerfile.go_build
image: chrislusf/seaweedfs:local
ports:
- 8888:8888
- 18888:18888
command: '-v=4 filer -master="master:9333"'
command: 'filer -master="master:9333"'
depends_on:
- master
- volume
s3:
build:
context: .
dockerfile: Dockerfile.go_build
image: chrislusf/seaweedfs:local
ports:
- 8333:8333
command: '-v=4 s3 -filer="filer:8888"'
command: 's3 -filer="filer:8888"'
depends_on:
- master
- volume

2
docker/seaweedfs-compose.yml

@ -12,7 +12,7 @@ services:
ports:
- 8080:8080
- 18080:18080
command: 'volume -max=15 -mserver="master:9333" -port=8080'
command: 'volume -mserver="master:9333" -port=8080'
depends_on:
- master
filer:

6
docker/seaweedfs-dev-compose.yml

@ -12,7 +12,7 @@ services:
ports:
- 8080:8080
- 18080:18080
command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume'
command: 'volume -mserver="master:9333" -port=8080 -ip=volume'
depends_on:
- master
filer:
@ -20,7 +20,7 @@ services:
ports:
- 8888:8888
- 18888:18888
command: '-v=4 filer -master="master:9333"'
command: 'filer -master="master:9333"'
depends_on:
- master
- volume
@ -28,7 +28,7 @@ services:
image: chrislusf/seaweedfs:dev # use a remote dev image
ports:
- 8333:8333
command: '-v=4 s3 -filer="filer:8888"'
command: 's3 -filer="filer:8888"'
depends_on:
- master
- volume

43
go.mod

@ -7,39 +7,45 @@ require (
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
github.com/Azure/azure-storage-blob-go v0.8.0
github.com/DataDog/zstd v1.4.1 // indirect
github.com/OneOfOne/xxhash v1.2.2
github.com/Shopify/sarama v1.23.1
github.com/aws/aws-sdk-go v1.23.13
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92
github.com/coreos/bbolt v1.3.3 // indirect
github.com/coreos/etcd v3.3.15+incompatible // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/disintegration/imaging v1.6.1
github.com/disintegration/imaging v1.6.2
github.com/dustin/go-humanize v1.0.0
github.com/eapache/go-resiliency v1.2.0 // indirect
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/frankban/quicktest v1.7.2 // indirect
github.com/gabriel-vasile/mimetype v1.0.0
github.com/go-redis/redis v6.15.2+incompatible
github.com/go-redis/redis v6.15.7+incompatible
github.com/go-sql-driver/mysql v1.4.1
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
github.com/golang/protobuf v1.3.2
github.com/google/btree v1.0.0
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.3
github.com/gorilla/websocket v1.4.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/karlseguin/ccache v2.0.3+incompatible
github.com/karlseguin/expect v1.0.1 // indirect
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/crc32 v1.2.0
github.com/klauspost/reedsolomon v1.9.2
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.2.0
github.com/magiconair/properties v1.8.1 // indirect
@ -48,48 +54,43 @@ require (
github.com/nats-io/nats-server/v2 v2.0.4 // indirect
github.com/onsi/ginkgo v1.10.1 // indirect
github.com/onsi/gomega v1.7.0 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/pelletier/go-toml v1.4.0 // indirect
github.com/peterh/liner v1.1.0
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 // indirect
github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b
github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b // indirect
github.com/prometheus/client_golang v1.1.0
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 // indirect
github.com/prometheus/procfs v0.0.4 // indirect
github.com/rakyll/statik v0.1.6
github.com/rakyll/statik v0.1.7
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff
github.com/sirupsen/logrus v1.4.2 // indirect
github.com/seaweedfs/goexif v1.0.2
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.2.2 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.4.0
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect
github.com/stretchr/testify v1.4.0
github.com/stretchr/testify v1.3.0
github.com/syndtr/goleveldb v1.0.0
github.com/tidwall/gjson v1.3.2
github.com/tidwall/match v1.0.1
github.com/uber-go/atomic v1.4.0 // indirect
github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect
github.com/uber/jaeger-lib v2.0.0+incompatible // indirect
github.com/willf/bitset v1.1.10 // indirect
github.com/willf/bloom v2.0.3+incompatible
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect
go.etcd.io/bbolt v1.3.3 // indirect
go.etcd.io/etcd v3.3.15+incompatible
go.mongodb.org/mongo-driver v1.3.2
go.uber.org/multierr v1.2.0 // indirect
gocloud.dev v0.16.0
gocloud.dev/pubsub/natspubsub v0.16.0
gocloud.dev/pubsub/rabbitpubsub v0.16.0
golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 // indirect
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110
google.golang.org/api v0.9.0
google.golang.org/appengine v1.6.2 // indirect
google.golang.org/grpc v1.23.0
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 // indirect
google.golang.org/grpc v1.26.0
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect

258
go.sum

@ -35,13 +35,12 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs=
github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY=
github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
@ -55,63 +54,49 @@ github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA=
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72/go.mod h1:OEE5igu/CDjGegM1Jn6ZMo7R6LlV/JChAkjfQQIRLpg=
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI=
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE=
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA=
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE=
github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
@ -122,12 +107,18 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M=
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA=
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk=
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
@ -135,40 +126,51 @@ github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3B
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gabriel-vasile/mimetype v0.3.17 h1:NGWgggJJqTofUcTV1E7hkk2zVjZ54EfJa1z5O3z6By4=
github.com/gabriel-vasile/mimetype v0.3.17/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI=
github.com/gabriel-vasile/mimetype v1.0.0 h1:0QKnAQQhG6oOsb4GK7iPlet7RtjHi9us8RF/nXoTxhI=
github.com/gabriel-vasile/mimetype v1.0.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w=
github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4=
github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A=
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8=
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34=
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
@ -176,7 +178,6 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
@ -185,7 +186,6 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -204,9 +204,7 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:x
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=
@ -216,14 +214,8 @@ github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
@ -234,8 +226,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@ -256,8 +246,6 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f h1:X+tnaqoCcBgAwSTJtoYW6p0qKiuPyMfofEHEFUf2kdU=
github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f/go.mod h1:Ip4fOwzCrnDVuluHBd7FXIMb7SHOKfkt9/UDrYSZvqI=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
@ -272,17 +260,18 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU=
github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w=
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M=
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I=
@ -296,7 +285,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -309,22 +297,19 @@ github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDe
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -336,9 +321,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s=
github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY=
github.com/nats-io/jwt v0.2.14 h1:wA50KvFz/JXGXMHRygTWsRGh/ixxgC5E3kHvmtGLNf4=
@ -354,31 +337,18 @@ github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c=
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8=
github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os=
@ -386,65 +356,29 @@ github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc=
github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik=
github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM=
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c h1:hvQd3aOLKLF7xvRV6DzvPkKY4QXzfVbjU1BhW0d9yL8=
github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI=
github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8=
github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
github.com/pingcap/kvproto v0.0.0-20190822090350-11ea838aedf7/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY=
github.com/pingcap/kvproto v0.0.0-20190910074005-0e61b6f435c1/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY=
github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 h1:XFh7n8Cheo00pakfhpUofnlptHCuz9lkp4p/jXPb8lM=
github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY=
github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw=
github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA=
github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100 h1:TRyps2d+2TsJv1Vk4S2D+5COMDVKClnAO5aNmGGVyj0=
github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA=
github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 h1:GIEq+wZfrl2bcJxpuSrEH4H7/nlf5YdmpS+dU9lNIt8=
github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0/go.mod h1:G/6rJpnYwM0LKMec2rI82/5Kg6GaZMvlfB+e6/tvYmI=
github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b h1:6GfcYOX9/CCxPnNOivVxiDYXbZrCHU1mRp691iw9EYs=
github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b/go.mod h1:YfrHdQ613A+E2FSugyXOdJmeZQbXNjpXX2doNe8MGj8=
github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU=
github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
github.com/pingcap/tipb v0.0.0-20191015023537-709b39e7f8bb/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b h1:DZ0cTsn4lGMNaRjkUFKBtHn4s2F8KFMm83lWvSo+x7c=
github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
@ -453,28 +387,24 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78=
github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs=
github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg=
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0=
github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v2.18.10+incompatible h1:cy84jW6EVRPa5g9HAHrlbxMSIjBhDSX0OFYyMYminYs=
github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
@ -493,7 +423,6 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
@ -510,72 +439,50 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI=
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU=
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=
github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-client-go v2.17.0+incompatible h1:35tpDuT3k0oBiN/aGoSWuiFaqKgKZSciSMnWrazhSHE=
github.com/uber/jaeger-client-go v2.17.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw=
github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg=
github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20=
github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw=
go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ=
go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug=
go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4=
go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM=
@ -584,25 +491,24 @@ gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPG
gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI=
gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E=
gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM=
golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190829233526-b3c06291d021 h1:j6QOxNFMpEL1wIQX6TUdBPNfGZKmBOJS/vfSm8a7tdM=
golang.org/x/image v0.0.0-20190829233526-b3c06291d021/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg=
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -612,7 +518,6 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -637,6 +542,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -649,23 +555,23 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI=
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -677,10 +583,14 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 h1:6S6bidS7O4yAwA5ORRbRIjvNQ9tGbLd5e+LRIaTeVDQ=
@ -703,9 +613,7 @@ google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -714,30 +622,27 @@ google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs=
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo=
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
@ -754,8 +659,6 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/karlseguin/expect.v1 v1.0.1 h1:9u0iUltnhFbJTHaSIH0EP+cuTU5rafIgmcsEsg2JQFw=
gopkg.in/karlseguin/expect.v1 v1.0.1/go.mod h1:uB7QIJBcclvYbwlUDkSCsGjAOMis3fP280LyhuDEf2I=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@ -771,6 +674,3 @@ pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ=
sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k=

23
k8s/README.md

@ -0,0 +1,23 @@
## SEAWEEDFS - helm chart (2.x)
### info:
* master/filer/volume are stateful sets with anti-affinity on the hostname,
so your deployment will be spread/HA.
* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances)
and backup/HA memsql can provide.
* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer
with ENV.
* cert config exists and can be enabled, but not been tested.
### current instances config (AIO):
1 instance for each type (master/filer/volume/s3)
instances need node labels:
* sw-volume: true (for volume instance, specific tag)
* sw-backend: true (for all others, as they less resource demanding)
you can update the replicas count for each node type in values.yaml,
need to add more nodes with the corresponding label.
most of the configuration are available through values.yaml

22
k8s/seaweedfs/.helmignore

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

4
k8s/seaweedfs/Chart.yaml

@ -0,0 +1,4 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
version: 1.79

114
k8s/seaweedfs/templates/_helpers.tpl

@ -0,0 +1,114 @@
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to
this (by the DNS naming spec). If release name contains chart name it will
be used as a full name.
*/}}
{{- define "seaweedfs.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "seaweedfs.chart" -}}
{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "seaweedfs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Inject extra environment vars in the format key:value, if populated
*/}}
{{- define "seaweedfs.extraEnvironmentVars" -}}
{{- if .extraEnvironmentVars -}}
{{- range $key, $value := .extraEnvironmentVars }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end -}}
{{- end -}}
{{- end -}}
{{/* Return the proper filer image */}}
{{- define "filer.image" -}}
{{- if .Values.filer.imageOverride -}}
{{- $imageOverride := .Values.filer.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
{{/* Return the proper postgresqlSchema image */}}
{{- define "filer.dbSchema.image" -}}
{{- if .Values.filer.dbSchema.imageOverride -}}
{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.global.repository | toString -}}
{{- $name := .Values.filer.dbSchema.imageName | toString -}}
{{- $tag := .Values.filer.dbSchema.imageTag | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
{{/* Return the proper master image */}}
{{- define "master.image" -}}
{{- if .Values.master.imageOverride -}}
{{- $imageOverride := .Values.master.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
{{/* Return the proper s3 image */}}
{{- define "s3.image" -}}
{{- if .Values.s3.imageOverride -}}
{{- $imageOverride := .Values.s3.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
{{/* Return the proper volume image */}}
{{- define "volume.image" -}}
{{- if .Values.volume.imageOverride -}}
{{- $imageOverride := .Values.volume.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}

14
k8s/seaweedfs/templates/ca-cert.yaml

@ -0,0 +1,14 @@
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-ca-cert
namespace: {{ .Release.Namespace }}
spec:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
commonName: "{{ template "seaweedfs.name" . }}-root-ca"
isCA: true
issuerRef:
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
{{- end }}

8
k8s/seaweedfs/templates/cert-clusterissuer.yaml

@ -0,0 +1,8 @@
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: ClusterIssuer
metadata:
name: {{ template "seaweedfs.name" . }}-clusterissuer
spec:
selfSigned: {}
{{- end }}

33
k8s/seaweedfs/templates/client-cert.yaml

@ -0,0 +1,33 @@
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-client-cert
namespace: {{ .Release.Namespace }}
spec:
secretName: {{ template "seaweedfs.name" . }}-client-cert
issuerRef:
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
commonName: {{ .Values.certificates.commonName }}
organization:
- "SeaweedFS CA"
dnsNames:
- '*.{{ .Release.Namespace }}'
- '*.{{ .Release.Namespace }}.svc'
- '*.{{ .Release.Namespace }}.svc.cluster.local'
- '*.{{ template "seaweedfs.name" . }}-master'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
{{- if .Values.certificates.ipAddresses }}
ipAddresses:
{{- range .Values.certificates.ipAddresses }}
- {{ . }}
{{- end }}
{{- end }}
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
keySize: {{ .Values.certificates.keySize }}
duration: {{ .Values.certificates.duration }}
renewBefore: {{ .Values.certificates.renewBefore }}
{{- end }}

33
k8s/seaweedfs/templates/filer-cert.yaml

@ -0,0 +1,33 @@
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-filer-cert
namespace: {{ .Release.Namespace }}
spec:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
issuerRef:
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
commonName: {{ .Values.certificates.commonName }}
organization:
- "SeaweedFS CA"
dnsNames:
- '*.{{ .Release.Namespace }}'
- '*.{{ .Release.Namespace }}.svc'
- '*.{{ .Release.Namespace }}.svc.cluster.local'
- '*.{{ template "seaweedfs.name" . }}-master'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
{{- if .Values.certificates.ipAddresses }}
ipAddresses:
{{- range .Values.certificates.ipAddresses }}
- {{ . }}
{{- end }}
{{- end }}
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
keySize: {{ .Values.certificates.keySize }}
duration: {{ .Values.certificates.duration }}
renewBefore: {{ .Values.certificates.renewBefore }}
{{- end }}

22
k8s/seaweedfs/templates/filer-service.yaml

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
component: filer
spec:
clusterIP: None
ports:
- name: "swfs-filer"
port: {{ .Values.filer.port }}
targetPort: {{ .Values.filer.port }}
protocol: TCP
- name: "swfs-filer-grpc"
port: {{ .Values.filer.grpcPort }}
targetPort: {{ .Values.filer.grpcPort }}
protocol: TCP
selector:
app: {{ template "seaweedfs.name" . }}
component: filer

207
k8s/seaweedfs/templates/filer-statefulset.yaml

@ -0,0 +1,207 @@
{{- if .Values.filer.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-filer
podManagementPolicy: Parallel
replicas: {{ .Values.filer.replicas }}
{{- if (gt (int .Values.filer.updatePartition) 0) }}
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: {{ .Values.filer.updatePartition }}
{{- end }}
selector:
matchLabels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: filer
template:
metadata:
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: filer
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }}
{{- if .Values.filer.affinity }}
affinity:
{{ tpl .Values.filer.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.filer.tolerations }}
tolerations:
{{ tpl .Values.filer.tolerations . | nindent 8 | trim }}
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecrets }}
{{- end }}
serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration
terminationGracePeriodSeconds: 60
{{- if .Values.filer.priorityClassName }}
priorityClassName: {{ .Values.filer.priorityClassName | quote }}
{{- end }}
enableServiceLinks: false
containers:
- name: seaweedfs
image: {{ template "filer.image" . }}
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: WEED_MYSQL_USERNAME
valueFrom:
secretKeyRef:
name: secret-seaweedfs-db
key: user
- name: WEED_MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: secret-seaweedfs-db
key: password
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
{{- if .Values.filer.extraEnvironmentVars }}
{{- range $key, $value := .Values.filer.extraEnvironmentVars }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
command:
- "/bin/sh"
- "-ec"
- |
exec /usr/bin/weed -logdir=/logs \
{{- if .Values.filer.loggingOverrideLevel }}
-v={{ .Values.filer.loggingOverrideLevel }} \
{{- else }}
-v={{ .Values.global.loggingLevel }} \
{{- end }}
filer \
-port={{ .Values.filer.port }} \
{{- if .Values.filer.disableHttp }}
-disableHttp \
{{- end }}
{{- if .Values.filer.disableDirListing }}
-disableDirListing \
{{- end }}
-dirListLimit={{ .Values.filer.dirListLimit }} \
-ip=${POD_IP} \
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
{{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
volumeMounts:
- name: seaweedfs-filer-log-volume
mountPath: "/logs/"
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
mountPath: /etc/seaweedfs/security.toml
subPath: security.toml
- name: ca-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/ca/
- name: master-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/master/
- name: volume-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/volume/
- name: filer-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/filer/
- name: client-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/client/
{{- end }}
{{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }}
{{- end }}
ports:
- containerPort: {{ .Values.filer.port }}
name: swfs-filer
- containerPort: {{ .Values.filer.grpcPort }}
#name: swfs-filer-grpc
readinessProbe:
httpGet:
path: /
port: {{ .Values.filer.port }}
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 15
successThreshold: 1
failureThreshold: 100
livenessProbe:
httpGet:
path: /
port: {{ .Values.filer.port }}
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 30
successThreshold: 1
failureThreshold: 5
{{- if .Values.filer.resources }}
resources:
{{ tpl .Values.filer.resources . | nindent 12 | trim }}
{{- end }}
volumes:
- name: seaweedfs-filer-log-volume
hostPath:
path: /storage/logs/seaweedfs/filer
type: DirectoryOrCreate
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
name: {{ template "seaweedfs.name" . }}-security-config
- name: ca-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
- name: master-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-master-cert
- name: volume-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
- name: filer-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
- name: client-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-client-cert
{{- end }}
{{ tpl .Values.filer.extraVolumes . | indent 8 | trim }}
{{- if .Values.filer.nodeSelector }}
nodeSelector:
{{ tpl .Values.filer.nodeSelector . | indent 8 | trim }}
{{- end }}
{{/* volumeClaimTemplates:*/}}
{{/* - metadata:*/}}
{{/* name: data-{{ .Release.Namespace }}*/}}
{{/* spec:*/}}
{{/* accessModes:*/}}
{{/* - ReadWriteOnce*/}}
{{/* resources:*/}}
{{/* requests:*/}}
{{/* storage: {{ .Values.filer.storage }}*/}}
{{/* {{- if .Values.filer.storageClass }}*/}}
{{/* storageClassName: {{ .Values.filer.storageClass }}*/}}
{{/* {{- end }}*/}}
{{- end }}

59
k8s/seaweedfs/templates/ingress.yaml

@ -0,0 +1,59 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-filer
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
spec:
rules:
- http:
paths:
- path: /sw-filer/?(.*)
backend:
serviceName: {{ template "seaweedfs.name" . }}-filer
servicePort: {{ .Values.filer.port }}
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-master
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
spec:
rules:
- http:
paths:
- path: /sw-master/?(.*)
backend:
serviceName: {{ template "seaweedfs.name" . }}-master
servicePort: {{ .Values.master.port }}

33
k8s/seaweedfs/templates/master-cert.yaml

@ -0,0 +1,33 @@
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-master-cert
namespace: {{ .Release.Namespace }}
spec:
secretName: {{ template "seaweedfs.name" . }}-master-cert
issuerRef:
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
commonName: {{ .Values.certificates.commonName }}
organization:
- "SeaweedFS CA"
dnsNames:
- '*.{{ .Release.Namespace }}'
- '*.{{ .Release.Namespace }}.svc'
- '*.{{ .Release.Namespace }}.svc.cluster.local'
- '*.{{ template "seaweedfs.name" . }}-master'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
{{- if .Values.certificates.ipAddresses }}
ipAddresses:
{{- range .Values.certificates.ipAddresses }}
- {{ . }}
{{- end }}
{{- end }}
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
keySize: {{ .Values.certificates.keySize }}
duration: {{ .Values.certificates.duration }}
renewBefore: {{ .Values.certificates.renewBefore }}
{{- end }}

24
k8s/seaweedfs/templates/master-service.yaml

@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
component: master
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None
ports:
- name: "swfs-master"
port: {{ .Values.master.port }}
targetPort: {{ .Values.master.port }}
protocol: TCP
- name: "swfs-master-grpc"
port: {{ .Values.master.grpcPort }}
targetPort: {{ .Values.master.grpcPort }}
protocol: TCP
selector:
app: {{ template "seaweedfs.name" . }}
component: master

199
k8s/seaweedfs/templates/master-statefulset.yaml

@ -0,0 +1,199 @@
{{- if .Values.master.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-master
podManagementPolicy: Parallel
replicas: {{ .Values.master.replicas }}
{{- if (gt (int .Values.master.updatePartition) 0) }}
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: {{ .Values.master.updatePartition }}
{{- end }}
selector:
matchLabels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: master
template:
metadata:
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: master
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }}
{{- if .Values.master.affinity }}
affinity:
{{ tpl .Values.master.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations:
{{ tpl .Values.master.tolerations . | nindent 8 | trim }}
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecrets }}
{{- end }}
terminationGracePeriodSeconds: 60
{{- if .Values.master.priorityClassName }}
priorityClassName: {{ .Values.master.priorityClassName | quote }}
{{- end }}
enableServiceLinks: false
containers:
- name: seaweedfs
image: {{ template "master.image" . }}
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
command:
- "/bin/sh"
- "-ec"
- |
exec /usr/bin/weed -logdir=/logs \
{{- if .Values.master.loggingOverrideLevel }}
-v={{ .Values.master.loggingOverrideLevel }} \
{{- else }}
-v={{ .Values.global.loggingLevel }} \
{{- end }}
master \
-port={{ .Values.master.port }} \
-mdir=/data \
-ip.bind={{ .Values.master.ipBind }} \
{{- if .Values.master.volumePreallocate }}
-volumePreallocate \
{{- end }}
{{- if .Values.global.monitoring.enabled }}
-metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \
{{- end }}
-volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
{{- if .Values.master.disableHttp }}
-disableHttp \
{{- end }}
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
volumeMounts:
- name : data-{{ .Release.Namespace }}
mountPath: /data
- name: seaweedfs-master-log-volume
mountPath: "/logs/"
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
mountPath: /etc/seaweedfs/security.toml
subPath: security.toml
- name: ca-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/ca/
- name: master-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/master/
- name: volume-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/volume/
- name: filer-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/filer/
- name: client-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/client/
{{- end }}
{{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }}
ports:
- containerPort: {{ .Values.master.port }}
name: swfs-master
- containerPort: {{ .Values.master.grpcPort }}
#name: swfs-master-grpc
readinessProbe:
httpGet:
path: /cluster/status
port: {{ .Values.master.port }}
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 15
successThreshold: 2
failureThreshold: 100
livenessProbe:
httpGet:
path: /cluster/status
port: {{ .Values.master.port }}
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 10
successThreshold: 1
failureThreshold: 6
{{- if .Values.master.resources }}
resources:
{{ tpl .Values.master.resources . | nindent 12 | trim }}
{{- end }}
volumes:
- name: seaweedfs-master-log-volume
hostPath:
path: /storage/logs/seaweedfs/master
type: DirectoryOrCreate
- name: data-{{ .Release.Namespace }}
hostPath:
path: /ssd/seaweed-master/
type: DirectoryOrCreate
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
name: {{ template "seaweedfs.name" . }}-security-config
- name: ca-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
- name: master-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-master-cert
- name: volume-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
- name: filer-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
- name: client-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-client-cert
{{- end }}
{{ tpl .Values.master.extraVolumes . | indent 8 | trim }}
{{- if .Values.master.nodeSelector }}
nodeSelector:
{{ tpl .Values.master.nodeSelector . | indent 8 | trim }}
{{- end }}
{{/* volumeClaimTemplates:*/}}
{{/* - metadata:*/}}
{{/* name: data-{{ .Release.Namespace }}*/}}
{{/* spec:*/}}
{{/* accessModes:*/}}
{{/* - ReadWriteOnce*/}}
{{/* resources:*/}}
{{/* requests:*/}}
{{/* storage: {{ .Values.master.storage }}*/}}
{{/* {{- if .Values.master.storageClass }}*/}}
{{/* storageClassName: {{ .Values.master.storageClass }}*/}}
{{/* {{- end }}*/}}
{{- end }}

158
k8s/seaweedfs/templates/s3-deployment.yaml

@ -0,0 +1,158 @@
{{- if .Values.s3.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "seaweedfs.name" . }}-s3
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-s3
replicas: {{ .Values.s3.replicas }}
selector:
matchLabels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: s3
template:
metadata:
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: s3
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }}
{{- if .Values.s3.tolerations }}
tolerations:
{{ tpl .Values.s3.tolerations . | nindent 8 | trim }}
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecrets }}
{{- end }}
terminationGracePeriodSeconds: 10
{{- if .Values.s3.priorityClassName }}
priorityClassName: {{ .Values.s3.priorityClassName | quote }}
{{- end }}
enableServiceLinks: false
containers:
- name: seaweedfs
image: {{ template "s3.image" . }}
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
command:
- "/bin/sh"
- "-ec"
- |
exec /usr/bin/weed \
{{- if .Values.s3.loggingOverrideLevel }}
-v={{ .Values.s3.loggingOverrideLevel }} \
{{- else }}
-v={{ .Values.global.loggingLevel }} \
{{- end }}
s3 \
-port={{ .Values.s3.port }} \
{{- if .Values.global.enableSecurity }}
-cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-key.file=/usr/local/share/ca-certificates/client/tls.key \
{{- end }}
{{- if .Values.s3.domainName }}
-domainName={{ .Values.s3.domainName }} \
{{- end }}
-filer={{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.port }}
{{- if or (.Values.global.enableSecurity) (.Values.s3.extraVolumeMounts) }}
volumeMounts:
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
mountPath: /etc/seaweedfs/security.toml
subPath: security.toml
- name: ca-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/ca/
- name: master-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/master/
- name: volume-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/volume/
- name: filer-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/filer/
- name: client-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/client/
{{- end }}
{{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }}
{{- end }}
ports:
- containerPort: {{ .Values.s3.port }}
name: swfs-s3
readinessProbe:
httpGet:
path: /
port: {{ .Values.s3.port }}
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 15
successThreshold: 1
failureThreshold: 100
livenessProbe:
httpGet:
path: /
port: {{ .Values.s3.port }}
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 60
successThreshold: 1
failureThreshold: 20
{{- if .Values.s3.resources }}
resources:
{{ tpl .Values.s3.resources . | nindent 12 | trim }}
{{- end }}
volumes:
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
name: {{ template "seaweedfs.name" . }}-security-config
- name: ca-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
- name: master-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-master-cert
- name: volume-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
- name: filer-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
- name: client-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-client-cert
{{- end }}
{{ tpl .Values.s3.extraVolumes . | indent 8 | trim }}
{{- if .Values.s3.nodeSelector }}
nodeSelector:
{{ tpl .Values.s3.nodeSelector . | indent 8 | trim }}
{{- end }}
{{- end }}

17
k8s/seaweedfs/templates/s3-service.yaml

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-s3
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
component: s3
spec:
ports:
- name: "swfs-s3"
port: {{ .Values.s3.port }}
targetPort: {{ .Values.s3.port }}
protocol: TCP
selector:
app: {{ template "seaweedfs.name" . }}
component: s3

1352
k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml
File diff suppressed because it is too large
View File

14
k8s/seaweedfs/templates/secret-seaweedfs-db.yaml

@ -0,0 +1,14 @@
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: secret-seaweedfs-db
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/resource-policy": keep
"helm.sh/hook": "pre-install"
stringData:
user: "YourSWUser"
password: "HardCodedPassword"
# better to random generate and create in DB
# password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }}

52
k8s/seaweedfs/templates/security-configmap.yaml

@ -0,0 +1,52 @@
{{- if .Values.global.enableSecurity }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "seaweedfs.name" . }}-security-config
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
data:
security.toml: |-
# this file is read by master, volume server, and filer
# the jwt signing key is read by master and volume server
# a jwt expires in 10 seconds
[jwt.signing]
key = "{{ randAlphaNum 10 | b64enc }}"
# all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files.
[grpc]
ca = "/usr/local/share/ca-certificates/ca/tls.crt"
[grpc.volume]
cert = "/usr/local/share/ca-certificates/volume/tls.crt"
key = "/usr/local/share/ca-certificates/volume/tls.key"
[grpc.master]
cert = "/usr/local/share/ca-certificates/master/tls.crt"
key = "/usr/local/share/ca-certificates/master/tls.key"
[grpc.filer]
cert = "/usr/local/share/ca-certificates/filer/tls.crt"
key = "/usr/local/share/ca-certificates/filer/tls.key"
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
cert = "/usr/local/share/ca-certificates/client/tls.crt"
key = "/usr/local/share/ca-certificates/client/tls.key"
# volume server https options
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
[https.client]
enabled = false
[https.volume]
cert = ""
key = ""
{{- end }}

29
k8s/seaweedfs/templates/service-account.yaml

@ -0,0 +1,29 @@
#hack for delete pod master after migration
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: seaweefds-rw-cr
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: seaweefds-rw-sa
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: system:serviceaccount:seaweefds-rw-sa:default
subjects:
- kind: ServiceAccount
name: seaweefds-rw-sa
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: seaweefds-rw-cr

33
k8s/seaweedfs/templates/volume-cert.yaml

@ -0,0 +1,33 @@
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-volume-cert
namespace: {{ .Release.Namespace }}
spec:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
issuerRef:
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
commonName: {{ .Values.certificates.commonName }}
organization:
- "SeaweedFS CA"
dnsNames:
- '*.{{ .Release.Namespace }}'
- '*.{{ .Release.Namespace }}.svc'
- '*.{{ .Release.Namespace }}.svc.cluster.local'
- '*.{{ template "seaweedfs.name" . }}-master'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
{{- if .Values.certificates.ipAddresses }}
ipAddresses:
{{- range .Values.certificates.ipAddresses }}
- {{ . }}
{{- end }}
{{- end }}
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
keySize: {{ .Values.certificates.keySize }}
duration: {{ .Values.certificates.duration }}
renewBefore: {{ .Values.certificates.renewBefore }}
{{- end }}

22
k8s/seaweedfs/templates/volume-service.yaml

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-volume
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
component: volume
spec:
clusterIP: None
ports:
- name: "swfs-volume"
port: {{ .Values.volume.port }}
targetPort: {{ .Values.volume.port }}
protocol: TCP
- name: "swfs-volume-18080"
port: {{ .Values.volume.grpcPort }}
targetPort: {{ .Values.volume.grpcPort }}
protocol: TCP
selector:
app: {{ template "seaweedfs.name" . }}
component: volume

187
k8s/seaweedfs/templates/volume-statefulset.yaml

@ -0,0 +1,187 @@
{{- if .Values.volume.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "seaweedfs.name" . }}-volume
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-volume
replicas: {{ .Values.volume.replicas }}
selector:
matchLabels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: volume
template:
metadata:
labels:
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: volume
spec:
{{- if .Values.volume.affinity }}
affinity:
{{ tpl .Values.volume.affinity . | nindent 8 | trim }}
{{- end }}
restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }}
{{- if .Values.volume.tolerations }}
tolerations:
{{ tpl .Values.volume.tolerations . | nindent 8 | trim }}
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecrets }}
{{- end }}
terminationGracePeriodSeconds: 10
{{- if .Values.volume.priorityClassName }}
priorityClassName: {{ .Values.volume.priorityClassName | quote }}
{{- end }}
enableServiceLinks: false
containers:
- name: seaweedfs
image: {{ template "volume.image" . }}
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
command:
- "/bin/sh"
- "-ec"
- |
exec /usr/bin/weed -logdir=/logs \
{{- if .Values.volume.loggingOverrideLevel }}
-v={{ .Values.volume.loggingOverrideLevel }} \
{{- else }}
-v={{ .Values.global.loggingLevel }} \
{{- end }}
volume \
-port={{ .Values.volume.port }} \
-dir={{ .Values.volume.dir }} \
-max={{ .Values.volume.maxVolumes }} \
{{- if .Values.volume.rack }}
-rack={{ .Values.volume.rack }} \
{{- end }}
{{- if .Values.volume.dataCenter }}
-dataCenter={{ .Values.volume.dataCenter }} \
{{- end }}
-ip.bind={{ .Values.volume.ipBind }} \
-read.redirect={{ .Values.volume.readRedirect }} \
{{- if .Values.volume.whiteList }}
-whiteList={{ .Values.volume.whiteList }} \
{{- end }}
{{- if .Values.volume.imagesFixOrientation }}
-images.fix.orientation \
{{- end }}
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \
-compactionMBps={{ .Values.volume.compactionMBps }} \
-mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
volumeMounts:
- name: seaweedfs-volume-storage
mountPath: "/data/"
- name: seaweedfs-volume-log-volume
mountPath: "/logs/"
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
mountPath: /etc/seaweedfs/security.toml
subPath: security.toml
- name: ca-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/ca/
- name: master-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/master/
- name: volume-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/volume/
- name: filer-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/filer/
- name: client-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/client/
{{- end }}
{{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }}
ports:
- containerPort: {{ .Values.volume.port }}
name: swfs-vol
- containerPort: {{ .Values.volume.grpcPort }}
#name: swfs-vol-grpc
readinessProbe:
httpGet:
path: /status
port: {{ .Values.volume.port }}
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 15
successThreshold: 1
failureThreshold: 100
livenessProbe:
httpGet:
path: /status
port: {{ .Values.volume.port }}
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
{{- if .Values.volume.resources }}
resources:
{{ tpl .Values.volume.resources . | nindent 12 | trim }}
{{- end }}
volumes:
- name: seaweedfs-volume-log-volume
hostPath:
path: /storage/logs/seaweedfs/volume
type: DirectoryOrCreate
- name: seaweedfs-volume-storage
hostPath:
path: /storage/object_store/
type: DirectoryOrCreate
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
name: {{ template "seaweedfs.name" . }}-security-config
- name: ca-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
- name: master-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-master-cert
- name: volume-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
- name: filer-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
- name: client-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-client-cert
{{- end }}
{{- if .Values.volume.extraVolumes }}
{{ tpl .Values.volume.extraVolumes . | indent 8 | trim }}
{{- end }}
{{- if .Values.volume.nodeSelector }}
nodeSelector:
{{ tpl .Values.volume.nodeSelector . | indent 8 | trim }}
{{- end }}
{{- end }}

309
k8s/seaweedfs/values.yaml

@ -0,0 +1,309 @@
# Available parameters and their default values for the SeaweedFS chart.
global:
registry: ""
repository: ""
imageName: chrislusf/seaweedfs
imageTag: "1.79"
imagePullPolicy: IfNotPresent
imagePullSecrets: imagepullsecret
restartPolicy: Always
loggingLevel: 1
enableSecurity: false
monitoring:
enabled: false
gatewayHost: null
gatewayPort: null
image:
registry: ""
repository: ""
master:
enabled: true
repository: null
imageName: null
imageTag: null
imageOverride: null
restartPolicy: null
replicas: 1
port: 9333
grpcPort: 19333
ipBind: "0.0.0.0"
volumePreallocate: false
volumeSizeLimitMB: 30000
loggingOverrideLevel: null
# Disable http request, only gRpc operations are allowed
disableHttp: false
extraVolumes: ""
extraVolumeMounts: ""
# storage and storageClass are the settings for configuring stateful
# storage for the master pods. storage should be set to the disk size of
# the attached volume. storageClass is the class of storage which defaults
# to null (the Kube cluster will pick the default).
storage: 25Gi
storageClass: null
# Resource requests, limits, etc. for the master cluster placement. This
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
# is made.
resources: null
# updatePartition is used to control a careful rolling update of SeaweedFS
# masters.
updatePartition: 0
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ template "seaweedfs.name" . }}
release: "{{ .Release.Name }}"
component: master
topologyKey: kubernetes.io/hostname
# Toleration Settings for master pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: ""
# nodeSelector labels for master pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: |
sw-backend: "true"
# used to assign priority to master pods
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
volume:
enabled: true
repository: null
imageName: null
imageTag: null
imageOverride: null
restartPolicy: null
port: 8080
grpcPort: 18080
ipBind: "0.0.0.0"
replicas: 1
loggingOverrideLevel: null
# limit background compaction or copying speed in mega bytes per second
compactionMBps: "40"
# Directories to store data files. dir[,dir]... (default "/tmp")
dir: "/data"
# Maximum numbers of volumes, count[,count]...
# If set to zero on non-windows OS, the limit will be auto configured. (default "7")
maxVolumes: "0"
# Volume server's rack name
rack: null
# Volume server's data center name
dataCenter: null
# Redirect moved or non-local volumes. (default true)
readRedirect: true
# Comma separated Ip addresses having write permission. No limit if empty.
whiteList: null
# Adjust jpg orientation when uploading.
imagesFixOrientation: false
extraVolumes: ""
extraVolumeMounts: ""
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ template "seaweedfs.name" . }}
release: "{{ .Release.Name }}"
component: volume
topologyKey: kubernetes.io/hostname
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
# is made.
resources: null
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: ""
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: |
sw-volume: "true"
# used to assign priority to server pods
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
filer:
enabled: true
repository: null
imageName: null
imageTag: null
imageOverride: null
restartPolicy: null
replicas: 1
port: 8888
grpcPort: 18888
loggingOverrideLevel: null
# Limit sub dir listing size (default 100000)
dirListLimit: 100000
# Turn off directory listing
disableDirListing: false
# Disable http request, only gRpc operations are allowed
disableHttp: false
# storage and storageClass are the settings for configuring stateful
# storage for the master pods. storage should be set to the disk size of
# the attached volume. storageClass is the class of storage which defaults
# to null (the Kube cluster will pick the default).
storage: 25Gi
storageClass: null
extraVolumes: ""
extraVolumeMounts: ""
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ template "seaweedfs.name" . }}
release: "{{ .Release.Name }}"
component: filer
topologyKey: kubernetes.io/hostname
# updatePartition is used to control a careful rolling update of SeaweedFS
# masters.
updatePartition: 0
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
# is made.
resources: null
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: ""
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: |
sw-backend: "true"
# used to assign priority to server pods
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
dbSchema:
imageName: db-schema
imageTag: "development"
imageOverride: ""
# extraEnvVars is a list of extra enviroment variables to set with the stateful set.
extraEnvironmentVars:
WEED_MYSQL_ENABLED: "true"
WEED_MYSQL_HOSTNAME: "mysql-db-host"
WEED_MYSQL_PORT: "3306"
WEED_MYSQL_DATABASE: "sw_database"
WEED_MYSQL_CONNECTION_MAX_IDLE: "10"
WEED_MYSQL_CONNECTION_MAX_OPEN: "150"
# enable usage of memsql as filer backend
WEED_MYSQL_INTERPOLATEPARAMS: "true"
WEED_LEVELDB2_ENABLED: "false"
# with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false"
# directories under this folder will be automatically creating a separate bucket
WEED_FILER_BUCKETS_FOLDER: "/buckets"
# directories under this folder will be store message queue data
WEED_FILER_QUEUES_FOLDER: "/queues"
s3:
enabled: true
repository: null
imageName: null
imageTag: null
restartPolicy: null
replicas: 1
port: 8333
loggingOverrideLevel: null
# Suffix of the host name, {bucket}.{domainName}
domainName: ""
extraVolumes: ""
extraVolumeMounts: ""
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
# is made.
resources: null
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: ""
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: |
sw-backend: "true"
# used to assign priority to server pods
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
certificates:
commonName: "SeaweedFS CA"
ipAddresses: []
keyAlgorithm: rsa
keySize: 2048
duration: 2160h # 90d
renewBefore: 360h # 15d

15
other/java/client/pom.xml

@ -1,10 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.2.4</version>
<version>1.2.8</version>
<parent>
<groupId>org.sonatype.oss</groupId>
@ -88,8 +89,8 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>7</source>
<target>7</target>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
<plugin>
@ -97,9 +98,11 @@
<artifactId>protobuf-maven-plugin</artifactId>
<version>0.6.1</version>
<configuration>
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}</protocArtifact>
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
</protocArtifact>
<pluginId>grpc-java</pluginId>
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}</pluginArtifact>
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
</pluginArtifact>
</configuration>
<executions>
<execution>

139
other/java/client/pom_debug.xml

@ -0,0 +1,139 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.2.8</version>
<parent>
<groupId>org.sonatype.oss</groupId>
<artifactId>oss-parent</artifactId>
<version>9</version>
</parent>
<properties>
<protobuf.version>3.9.1</protobuf.version>
<!-- follow https://github.com/grpc/grpc-java -->
<grpc.version>1.23.0</grpc.version>
<guava.version>28.0-jre</guava.version>
</properties>
<dependencies>
<dependency>
<groupId>com.moandjiezana.toml</groupId>
<artifactId>toml4j</artifactId>
<version>0.7.2</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java -->
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>${protobuf.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-netty-shaded</artifactId>
<version>${grpc.version}</version>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-protobuf</artifactId>
<version>${grpc.version}</version>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-stub</artifactId>
<version>${grpc.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId>
<version>4.5.6</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<extensions>
<extension>
<groupId>kr.motd.maven</groupId>
<artifactId>os-maven-plugin</artifactId>
<version>1.6.2</version>
</extension>
</extensions>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
<plugin>
<groupId>org.xolstice.maven.plugins</groupId>
<artifactId>protobuf-maven-plugin</artifactId>
<version>0.6.1</version>
<configuration>
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
</protocArtifact>
<pluginId>grpc-java</pluginId>
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
</pluginArtifact>
</configuration>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>compile-custom</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>2.2.1</version>
<executions>
<execution>
<id>attach-sources</id>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>2.9.1</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

27
other/java/client/src/main/java/seaweedfs/client/ChunkCache.java

@ -0,0 +1,27 @@
package seaweedfs.client;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import java.util.concurrent.TimeUnit;
public class ChunkCache {
private final Cache<String, byte[]> cache;
public ChunkCache(int maxEntries) {
this.cache = CacheBuilder.newBuilder()
.maximumSize(maxEntries)
.expireAfterAccess(1, TimeUnit.HOURS)
.build();
}
public byte[] getChunk(String fileId) {
return this.cache.getIfPresent(fileId);
}
public void setChunk(String fileId, byte[] data) {
this.cache.put(fileId, data);
}
}

9
other/java/client/src/main/java/seaweedfs/client/FilerClient.java

@ -14,7 +14,7 @@ public class FilerClient {
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
private FilerGrpcClient filerGrpcClient;
private final FilerGrpcClient filerGrpcClient;
public FilerClient(String host, int grpcPort) {
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
@ -181,7 +181,7 @@ public class FilerClient {
.setLimit(limit)
.build());
List<FilerProto.Entry> entries = new ArrayList<>();
while (iter.hasNext()){
while (iter.hasNext()) {
FilerProto.ListEntriesResponse resp = iter.next();
entries.add(fixEntryAfterReading(resp.getEntry()));
}
@ -195,9 +195,12 @@ public class FilerClient {
.setDirectory(directory)
.setName(entryName)
.build()).getEntry();
if (entry == null) {
return null;
}
return fixEntryAfterReading(entry);
} catch (Exception e) {
if (e.getMessage().indexOf("filer: no entry is found in filer store")>0){
if (e.getMessage().indexOf("filer: no entry is found in filer store") > 0) {
return null;
}
LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e);

33
other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java

@ -14,12 +14,6 @@ import java.util.concurrent.TimeUnit;
public class FilerGrpcClient {
private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class);
private final ManagedChannel channel;
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub;
static SslContext sslContext;
static {
@ -30,6 +24,14 @@ public class FilerGrpcClient {
}
}
private final ManagedChannel channel;
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub;
private boolean cipher = false;
private String collection = "";
private String replication = "";
public FilerGrpcClient(String host, int grpcPort) {
this(host, grpcPort, sslContext);
}
@ -42,6 +44,13 @@ public class FilerGrpcClient {
.negotiationType(NegotiationType.TLS)
.sslContext(sslContext));
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
this.getBlockingStub().getFilerConfiguration(
FilerProto.GetFilerConfigurationRequest.newBuilder().build());
cipher = filerConfigurationResponse.getCipher();
collection = filerConfigurationResponse.getCollection();
replication = filerConfigurationResponse.getReplication();
}
public FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) {
@ -51,6 +60,18 @@ public class FilerGrpcClient {
futureStub = SeaweedFilerGrpc.newFutureStub(channel);
}
public boolean isCipher() {
return cipher;
}
public String getCollection() {
return collection;
}
public String getReplication() {
return replication;
}
public void shutdown() throws InterruptedException {
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
}

37
other/java/client/src/main/java/seaweedfs/client/Gzip.java

@ -0,0 +1,37 @@
package seaweedfs.client;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
public class Gzip {
public static byte[] compress(byte[] data) throws IOException {
ByteArrayOutputStream bos = new ByteArrayOutputStream(data.length);
GZIPOutputStream gzip = new GZIPOutputStream(bos);
gzip.write(data);
gzip.close();
byte[] compressed = bos.toByteArray();
bos.close();
return compressed;
}
public static byte[] decompress(byte[] compressed) throws IOException {
ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
GZIPInputStream gis = new GZIPInputStream(bis);
return readAll(gis);
}
private static byte[] readAll(InputStream input) throws IOException {
try( ByteArrayOutputStream output = new ByteArrayOutputStream()){
byte[] buffer = new byte[4096];
int n;
while (-1 != (n = input.read(buffer))) {
output.write(buffer, 0, n);
}
return output.toByteArray();
}
}
}

55
other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java

@ -0,0 +1,55 @@
package seaweedfs.client;
import javax.crypto.Cipher;
import javax.crypto.spec.GCMParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import java.security.SecureRandom;
public class SeaweedCipher {
// AES-GCM parameters
public static final int AES_KEY_SIZE = 256; // in bits
public static final int GCM_NONCE_LENGTH = 12; // in bytes
public static final int GCM_TAG_LENGTH = 16; // in bytes
private static SecureRandom random = new SecureRandom();
public static byte[] genCipherKey() throws Exception {
byte[] key = new byte[AES_KEY_SIZE / 8];
random.nextBytes(key);
return key;
}
public static byte[] encrypt(byte[] clearTextbytes, byte[] cipherKey) throws Exception {
return encrypt(clearTextbytes, 0, clearTextbytes.length, cipherKey);
}
public static byte[] encrypt(byte[] clearTextbytes, int offset, int length, byte[] cipherKey) throws Exception {
final byte[] nonce = new byte[GCM_NONCE_LENGTH];
random.nextBytes(nonce);
GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce);
SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
AES_cipherInstance.init(Cipher.ENCRYPT_MODE, keySpec, spec);
byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length);
byte[] iv = AES_cipherInstance.getIV();
byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH];
System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH);
System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length);
return message;
}
public static byte[] decrypt(byte[] encryptedText, byte[] cipherKey) throws Exception {
final Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
GCMParameterSpec params = new GCMParameterSpec(GCM_TAG_LENGTH * 8, encryptedText, 0, GCM_NONCE_LENGTH);
SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
AES_cipherInstance.init(Cipher.DECRYPT_MODE, keySpec, params);
byte[] decryptedText = AES_cipherInstance.doFinal(encryptedText, GCM_NONCE_LENGTH, encryptedText.length - GCM_NONCE_LENGTH);
return decryptedText;
}
}

88
other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java

@ -6,16 +6,19 @@ import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.*;
public class SeaweedRead {
// private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
static ChunkCache chunkCache = new ChunkCache(1000);
// returns bytesRead
public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals,
@ -56,26 +59,38 @@ public class SeaweedRead {
}
private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
byte[] chunkData = chunkCache.getChunk(chunkView.fileId);
if (chunkData == null) {
chunkData = doFetchFullChunkData(chunkView, locations);
}
int len = (int) chunkView.size;
LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} buffer.length:{} startOffset:{} len:{}",
chunkView.fileId, chunkData.length, chunkView.offset, buffer.length, startOffset, len);
System.arraycopy(chunkData, (int) chunkView.offset, buffer, startOffset, len);
chunkCache.setChunk(chunkView.fileId, chunkData);
return len;
}
private static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException {
HttpClient client = new DefaultHttpClient();
HttpGet request = new HttpGet(
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
if (!chunkView.isFullChunk) {
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
request.setHeader(HttpHeaders.RANGE,
String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size - 1));
}
byte[] data = null;
try {
HttpResponse response = client.execute(request);
HttpEntity entity = response.getEntity();
int len = (int) (chunkView.logicOffset - position + chunkView.size);
OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
entity.writeTo(outputStream);
// LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
return len;
data = EntityUtils.toByteArray(entity);
} finally {
if (client instanceof Closeable) {
@ -83,6 +98,21 @@ public class SeaweedRead {
t.close();
}
}
if (chunkView.isGzipped) {
data = Gzip.decompress(data);
}
if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
try {
data = SeaweedCipher.decrypt(data, chunkView.cipherKey);
} catch (Exception e) {
throw new IOException("fail to decrypt", e);
}
}
return data;
}
protected static List<ChunkView> viewFromVisibles(List<VisibleInterval> visibleIntervals, long offset, long size) {
@ -97,7 +127,9 @@ public class SeaweedRead {
offset - chunk.start,
Math.min(chunk.stop, stop) - offset,
offset,
isFullChunk
isFullChunk,
chunk.cipherKey,
chunk.isGzipped
));
offset = Math.min(chunk.stop, stop);
}
@ -131,7 +163,9 @@ public class SeaweedRead {
chunk.getOffset() + chunk.getSize(),
chunk.getFileId(),
chunk.getMtime(),
true
true,
chunk.getCipherKey().toByteArray(),
chunk.getIsGzipped()
);
// easy cases to speed up
@ -151,7 +185,9 @@ public class SeaweedRead {
chunk.getOffset(),
v.fileId,
v.modifiedTime,
false
false,
v.cipherKey,
v.isGzipped
));
}
long chunkStop = chunk.getOffset() + chunk.getSize();
@ -161,7 +197,9 @@ public class SeaweedRead {
v.stop,
v.fileId,
v.modifiedTime,
false
false,
v.cipherKey,
v.isGzipped
));
}
if (chunkStop <= v.start || v.stop <= chunk.getOffset()) {
@ -208,13 +246,17 @@ public class SeaweedRead {
public final long modifiedTime;
public final String fileId;
public final boolean isFullChunk;
public final byte[] cipherKey;
public final boolean isGzipped;
public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk) {
public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
this.start = start;
this.stop = stop;
this.modifiedTime = modifiedTime;
this.fileId = fileId;
this.isFullChunk = isFullChunk;
this.cipherKey = cipherKey;
this.isGzipped = isGzipped;
}
@Override
@ -225,6 +267,8 @@ public class SeaweedRead {
", modifiedTime=" + modifiedTime +
", fileId='" + fileId + '\'' +
", isFullChunk=" + isFullChunk +
", cipherKey=" + Arrays.toString(cipherKey) +
", isGzipped=" + isGzipped +
'}';
}
}
@ -235,13 +279,17 @@ public class SeaweedRead {
public final long size;
public final long logicOffset;
public final boolean isFullChunk;
public final byte[] cipherKey;
public final boolean isGzipped;
public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk) {
public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
this.fileId = fileId;
this.offset = offset;
this.size = size;
this.logicOffset = logicOffset;
this.isFullChunk = isFullChunk;
this.cipherKey = cipherKey;
this.isGzipped = isGzipped;
}
@Override
@ -252,6 +300,8 @@ public class SeaweedRead {
", size=" + size +
", logicOffset=" + logicOffset +
", isFullChunk=" + isFullChunk +
", cipherKey=" + Arrays.toString(cipherKey) +
", isGzipped=" + isGzipped +
'}';
}
}

43
other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java

@ -1,5 +1,6 @@
package seaweedfs.client;
import com.google.protobuf.ByteString;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPost;
@ -11,9 +12,12 @@ import java.io.ByteArrayInputStream;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.security.SecureRandom;
public class SeaweedWrite {
private static SecureRandom random = new SecureRandom();
public static void writeData(FilerProto.Entry.Builder entry,
final String replication,
final FilerGrpcClient filerGrpcClient,
@ -22,10 +26,9 @@ public class SeaweedWrite {
final long bytesOffset, final long bytesLength) throws IOException {
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
FilerProto.AssignVolumeRequest.newBuilder()
.setCollection("")
.setReplication(replication)
.setCollection(filerGrpcClient.getCollection())
.setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
.setDataCenter("")
.setReplication("")
.setTtlSec(0)
.build());
String fileId = response.getFileId();
@ -33,7 +36,17 @@ public class SeaweedWrite {
String auth = response.getAuth();
String targetUrl = String.format("http://%s/%s", url, fileId);
String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength);
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
byte[] cipherKey = null;
if (filerGrpcClient.isCipher()) {
cipherKey = genCipherKey();
cipherKeyString = ByteString.copyFrom(cipherKey);
}
String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey);
// cache fileId ~ bytes
SeaweedRead.chunkCache.setChunk(fileId, bytes);
entry.addChunks(FilerProto.FileChunk.newBuilder()
.setFileId(fileId)
@ -41,6 +54,7 @@ public class SeaweedWrite {
.setSize(bytesLength)
.setMtime(System.currentTimeMillis() / 10000L)
.setETag(etag)
.setCipherKey(cipherKeyString)
);
}
@ -58,11 +72,22 @@ public class SeaweedWrite {
private static String multipartUpload(String targetUrl,
String auth,
final byte[] bytes,
final long bytesOffset, final long bytesLength) throws IOException {
final long bytesOffset, final long bytesLength,
byte[] cipherKey) throws IOException {
HttpClient client = new DefaultHttpClient();
InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
InputStream inputStream = null;
if (cipherKey == null || cipherKey.length == 0) {
inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
} else {
try {
byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey);
inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length);
} catch (Exception e) {
throw new IOException("fail to encrypt data", e);
}
}
HttpPost post = new HttpPost(targetUrl);
if (auth != null && auth.length() != 0) {
@ -92,4 +117,10 @@ public class SeaweedWrite {
}
}
private static byte[] genCipherKey() {
byte[] b = new byte[32];
random.nextBytes(b);
return b;
}
}

69
other/java/client/src/main/proto/filer.proto

@ -21,6 +21,9 @@ service SeaweedFiler {
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
}
rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
}
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
}
@ -42,6 +45,15 @@ service SeaweedFiler {
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
}
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
}
rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
}
rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
}
}
//////////////////////////////////////////////////
@ -96,6 +108,8 @@ message FileChunk {
string source_file_id = 6; // to be deprecated
FileId fid = 7;
FileId source_fid = 8;
bytes cipher_key = 9;
bool is_gzipped = 10;
}
message FileId {
@ -118,6 +132,7 @@ message FuseAttributes {
string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs
string symlink_target = 13;
bytes md5 = 14;
}
message CreateEntryRequest {
@ -137,6 +152,14 @@ message UpdateEntryRequest {
message UpdateEntryResponse {
}
message AppendToEntryRequest {
string directory = 1;
string entry_name = 2;
repeated FileChunk chunks = 3;
}
message AppendToEntryResponse {
}
message DeleteEntryRequest {
string directory = 1;
string name = 2;
@ -147,6 +170,7 @@ message DeleteEntryRequest {
}
message DeleteEntryResponse {
string error = 1;
}
message AtomicRenameEntryRequest {
@ -165,6 +189,7 @@ message AssignVolumeRequest {
string replication = 3;
int32 ttl_sec = 4;
string data_center = 5;
string parent_path = 6;
}
message AssignVolumeResponse {
@ -173,6 +198,9 @@ message AssignVolumeResponse {
string public_url = 3;
int32 count = 4;
string auth = 5;
string collection = 6;
string replication = 7;
string error = 8;
}
message LookupVolumeRequest {
@ -219,4 +247,45 @@ message GetFilerConfigurationResponse {
string replication = 2;
string collection = 3;
uint32 max_mb = 4;
string dir_buckets = 5;
bool cipher = 7;
}
message SubscribeMetadataRequest {
string client_name = 1;
string path_prefix = 2;
int64 since_ns = 3;
}
message SubscribeMetadataResponse {
string directory = 1;
EventNotification event_notification = 2;
int64 ts_ns = 3;
}
message LogEntry {
int64 ts_ns = 1;
int32 partition_key_hash = 2;
bytes data = 3;
}
message KeepConnectedRequest {
string name = 1;
uint32 grpc_port = 2;
repeated string resources = 3;
}
message KeepConnectedResponse {
}
message LocateBrokerRequest {
string resource = 1;
}
message LocateBrokerResponse {
bool found = 1;
// if found, send the exact address
// if not found, send the full list of existing brokers
message Resource {
string grpc_addresses = 1;
int32 resource_count = 2;
}
repeated Resource resources = 2;
}

42
other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java

@ -0,0 +1,42 @@
package seaweedfs.client;
import org.junit.Test;
import java.util.Base64;
import static seaweedfs.client.SeaweedCipher.decrypt;
import static seaweedfs.client.SeaweedCipher.encrypt;
public class SeaweedCipherTest {
@Test
public void testSameAsGoImplemnetation() throws Exception {
byte[] secretKey = "256-bit key for AES 256 GCM encr".getBytes();
String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
System.out.println("Original Text : " + plainText);
byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
byte[] decryptedText = decrypt(cipherText, secretKey);
System.out.println("DeCrypted Text : " + new String(decryptedText));
}
@Test
public void testEncryptDecrypt() throws Exception {
byte[] secretKey = SeaweedCipher.genCipherKey();
String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
System.out.println("Original Text : " + plainText);
byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
byte[] decryptedText = decrypt(cipherText, secretKey);
System.out.println("DeCrypted Text : " + new String(decryptedText));
}
}

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -127,7 +127,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.2.4</seaweedfs.client.version>
<seaweedfs.client.version>1.2.8</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>
</project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.2.4</seaweedfs.client.version>
<seaweedfs.client.version>1.2.8</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>

12
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -64,6 +64,16 @@ public class SeaweedFileSystemStore {
public FileStatus[] listEntries(final Path path) {
LOG.debug("listEntries path: {}", path);
FileStatus pathStatus = getFileStatus(path);
if (pathStatus == null) {
return new FileStatus[0];
}
if (!pathStatus.isDirectory()) {
return new FileStatus[]{pathStatus};
}
List<FileStatus> fileStatuses = new ArrayList<FileStatus>();
List<FilerProto.Entry> entries = filerClient.listEntries(path.toUri().getPath());
@ -74,7 +84,9 @@ public class SeaweedFileSystemStore {
fileStatuses.add(fileStatus);
}
LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
return fileStatuses.toArray(new FileStatus[0]);
}
public FileStatus getFileStatus(final Path path) {

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -127,7 +127,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.2.4</seaweedfs.client.version>
<seaweedfs.client.version>1.2.8</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>
</project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.2.4</seaweedfs.client.version>
<seaweedfs.client.version>1.2.8</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>

12
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -64,6 +64,16 @@ public class SeaweedFileSystemStore {
public FileStatus[] listEntries(final Path path) {
LOG.debug("listEntries path: {}", path);
FileStatus pathStatus = getFileStatus(path);
if (pathStatus == null) {
return new FileStatus[0];
}
if (!pathStatus.isDirectory()) {
return new FileStatus[]{pathStatus};
}
List<FileStatus> fileStatuses = new ArrayList<FileStatus>();
List<FilerProto.Entry> entries = filerClient.listEntries(path.toUri().getPath());
@ -74,7 +84,9 @@ public class SeaweedFileSystemStore {
fileStatuses.add(fileStatus);
}
LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
return fileStatuses.toArray(new FileStatus[0]);
}
public FileStatus getFileStatus(final Path path) {

49
snap/README.md

@ -0,0 +1,49 @@
Hi
This PR adds support for building a snap package of seaweedfs. Snaps are cross distro Linux software packages. One snap can be installed on Ubuntu all supported LTS and non LTS releases from 14.04 onward. Additionally they can installed on Debian, Manjaro, Fedora, OpenSUSE and others. Making a snap of seaweedfs enables you to provide automatic updates on your schedule to your users via the snap store.
If accepted, you can use snapcraft locally, a CI system such as travis or circle-ci, or our free build system (build.snapcraft.io) to create snaps and upload to the store (snapcraft.io/store). The store supports
To test this PR locally, I used an Ubuntu 16.04 VM, with the following steps.
```
snap install snapcraft --classic
git clone https://github.com/popey/seaweedfs
cd seaweedfs
git checkout add-snapcraft
snapcraft
```
The generated a .snap file from the tip of master (I could have checked out a stable release instead). It can be installed with:-
snap install seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --dangerous
(the --dangerous is necessary because we’re installing an app which hasn’t gone through the snap store review process)
Once installed, the (namespaced) weed command can be executed. If you accept this and land the snap in the store, we can request an ‘alias’ so users can use the ‘weed’ command rather than the namespaced ‘seaweedfs.weed’
- Run the command
- Create sample config. Snaps are securely confined so their home directory is in a different place
mkdir ~/snap/seaweedfs/current/.seaweedfs
seaweedfs.weed scaffold > ~/snap/seaweed/current/.seaweedfs/filer.toml
- Run a server
seaweedfs.weed server
- Run a benchmark
seaweedfs.weed benchmark
Results from my test run: https://paste.ubuntu.com/p/95Xk8zFQ7w/
If landed, you will need to:-
- Register an account in the snap store https://snapcraft.io/account
- Register the ‘seaweedfs’ name in the store
- snapcraft login
- snapcraft register seaweedfs
- Upload a built snap to the store
- snapcraft push seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --release edge
- Test installing on a clean Ubuntu 16.04 machine
- snap install seaweedfs --edge
The store supports multiple risk levels as “channels” with the edge channel typically used to host the latest build from git master. Stable is where stable releases are pushed. Optionally beta and candidate channels can also be used if needed.
Once you are happy, you can push a stable release to the stable channel, update the store page, and promote the application online (we can help there).

53
snap/snapcraft.yaml

@ -0,0 +1,53 @@
# Name of snap as registered in the store
name: seaweedfs
# Automatically derive snap version from git tags
version: git
# Short human readable name as seen in 'snap find $SNAPNAME'
summary: SeaweedFS
# Longer multi-line description found in 'snap info $SNAPNAME'
description: |
SeaweedFS is a simple and highly scalable distributed file system,
to store and serve billions of files fast!
SeaweedFS implements an object store with O(1) disk seek,
transparent cloud integration, and an optional Filer with POSIX interface,
supporting S3 API, Rack-Aware Erasure Coding for warm storage,
FUSE mount, Hadoop compatible, WebDAV.
# Grade is stable for snaps expected to land in the stable channel
grade: stable
# Uses the strict confinement model and uses interfaces to open up access to
# resources on the target host
confinement: strict
# List of parts which comprise the snap
parts:
# The main part which defines how to build the application in the snap
seaweedfs:
# This part needs a newer version of golang, so we use a separate part
# which defines how to get a newer golang during the build
after: [go]
# The go plugin knows how to build go applications into a snap
plugin: go
# Snapcraft will look in this location for the source of the application
source: .
go-importpath: github.com/chrislusf/seaweedfs
go:
# Defines the version of golang which will be bootstrapped into the snap
source-tag: go1.14
# Apps exposes the binaries inside the snap to the host system once installed
apps:
# We expose the weed command.
# This differs from the snap name, so it will be namespaced as seaweedfs.weed
# An alias can be added to expose this as 'weed' if requested in the snapcraft forum
weed:
# The path to the binary inside the snap, relative to the $SNAP home
command: bin/weed
# Plugs connect the snap to resources on the host system. We enable network connectivity
# We also add home and removable-media (latter not autoconnected by default)
# so users can access files in their home or on removable disks
plugs:
- network
- network-bind
- home
- removable-media

42
unmaintained/check_disk_size/check_disk_size.go

@ -0,0 +1,42 @@
package main
import (
"flag"
"fmt"
"runtime"
"syscall"
)
var (
dir = flag.String("dir", ".", "the directory which uses a disk")
)
func main() {
flag.Parse()
fillInDiskStatus(*dir)
fmt.Printf("OS: %v\n", runtime.GOOS)
fmt.Printf("Arch: %v\n", runtime.GOARCH)
}
func fillInDiskStatus(dir string) {
fs := syscall.Statfs_t{}
err := syscall.Statfs(dir, &fs)
if err != nil {
fmt.Printf("failed to statfs on %s: %v\n", dir, err)
return
}
fmt.Printf("statfs: %+v\n", fs)
fmt.Println()
total := fs.Blocks * uint64(fs.Bsize)
free := fs.Bfree * uint64(fs.Bsize)
fmt.Printf("Total: %d blocks x %d block size = %d bytes\n", fs.Blocks, uint64(fs.Bsize), total)
fmt.Printf("Free : %d blocks x %d block size = %d bytes\n", fs.Bfree, uint64(fs.Bsize), free)
fmt.Printf("Used : %d blocks x %d block size = %d bytes\n", fs.Blocks-fs.Bfree, uint64(fs.Bsize), total-free)
fmt.Printf("Free Percentage : %.2f%%\n", float32((float64(free)/float64(total))*100))
fmt.Printf("Used Percentage : %.2f%%\n", float32((float64(total-free)/float64(total))*100))
return
}

4
unmaintained/compact_leveldb/compact_leveldb.go

@ -5,6 +5,7 @@ import (
"log"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
@ -25,6 +26,9 @@ func main() {
}
db, err := leveldb.OpenFile(*dir, opts)
if errors.IsCorrupted(err) {
db, err = leveldb.RecoverFile(*dir, opts)
}
if err != nil {
log.Fatal(err)
}

46
unmaintained/repeated_vacuum/repeated_vacuum.go

@ -1,11 +1,13 @@
package main
import (
"bytes"
"flag"
"fmt"
"log"
"math/rand"
"time"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
@ -15,6 +17,8 @@ import (
var (
master = flag.String("master", "127.0.0.1:9333", "the master server")
repeat = flag.Int("n", 5, "repeat how many times")
garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold")
replication = flag.String("replication", "", "replication 000, 001, 002, etc")
)
func main() {
@ -23,27 +27,47 @@ func main() {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
genFile(grpcDialOption, 0)
go func() {
for {
println("vacuum threshold", *garbageThreshold)
_, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
if err != nil {
log.Fatalf("vacuum: %v", err)
}
time.Sleep(time.Second)
}
}()
for i := 0; i < *repeat; i++ {
assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1})
// create 2 files, and delete one of them
assignResult, targetUrl := genFile(grpcDialOption, i)
util.Delete(targetUrl, string(assignResult.Auth))
}
}
func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{
Count: 1,
Replication: *replication,
})
if err != nil {
log.Fatalf("assign: %v", err)
}
data := make([]byte, 1024)
rand.Read(data)
reader := bytes.NewReader(data)
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
_, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth)
_, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth)
if err != nil {
log.Fatalf("upload: %v", err)
}
util.Delete(targetUrl, string(assignResult.Auth))
util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master))
}
return assignResult, targetUrl
}

4
unmaintained/see_dat/see_dat.go

@ -2,6 +2,7 @@ package main
import (
"flag"
"github.com/chrislusf/seaweedfs/weed/util"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
@ -31,7 +32,8 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x appendedAt %v", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie, t)
glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
*volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t)
return nil
}

3
unmaintained/see_idx/see_idx.go

@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/util"
"os"
"path"
"strconv"
@ -36,7 +37,7 @@ func main() {
defer indexFile.Close()
idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size)
fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size)))
return nil
})

75
unmaintained/see_log_entry/see_log_entry.go

@ -0,0 +1,75 @@
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir)
)
func main() {
flag.Parse()
dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644)
if err != nil {
log.Fatalf("failed to open %s: %v", *logdataFile, err)
}
defer dst.Close()
err = walkLogEntryFile(dst)
if err != nil {
log.Fatalf("failed to visit %s: %v", *logdataFile, err)
}
}
func walkLogEntryFile(dst *os.File) error {
sizeBuf := make([]byte, 4)
for {
if n, err := dst.Read(sizeBuf); n != 4 {
if err == io.EOF {
return nil
}
return err
}
size := util.BytesToUint32(sizeBuf)
data := make([]byte, int(size))
if n, err := dst.Read(data); n != len(data) {
return err
}
logEntry := &filer_pb.LogEntry{}
err := proto.Unmarshal(data, logEntry)
if err != nil {
log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err)
return nil
}
event := &filer_pb.SubscribeMetadataResponse{}
err = proto.Unmarshal(logEntry.Data, event)
if err != nil {
log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
return nil
}
fmt.Printf("event: %+v\n", event)
}
}

6
unmaintained/see_meta/see_meta.go

@ -7,10 +7,10 @@ import (
"log"
"os"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
)
var (
@ -58,7 +58,7 @@ func walkMetaFile(dst *os.File) error {
return err
}
fmt.Fprintf(os.Stdout, "file %s %v\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
for i, chunk := range fullEntry.Entry.Chunks {
fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String())
}

3
unmaintained/volume_tailer/volume_tailer.go

@ -9,7 +9,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
util2 "github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
"golang.org/x/tools/godoc/util"
)
@ -25,7 +24,7 @@ func main() {
flag.Parse()
util2.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client")
vid := needle.VolumeId(*volumeId)

2
weed/command/backup.go

@ -119,7 +119,7 @@ func runBackup(cmd *Command, args []string) bool {
}
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
if err = v.Compact2(30 * 1024 * 1024 * 1024); err != nil {
if err = v.Compact2(30*1024*1024*1024, 0); err != nil {
fmt.Printf("Compact Volume before synchronizing %v\n", err)
return true
}

60
weed/command/benchmark.go

@ -19,6 +19,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
@ -40,6 +41,8 @@ type BenchmarkOptions struct {
maxCpu *int
grpcDialOption grpc.DialOption
masterClient *wdclient.MasterClient
grpcRead *bool
fsync *bool
}
var (
@ -64,6 +67,8 @@ func init() {
b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
b.grpcRead = cmdBenchmark.Flag.Bool("grpcRead", false, "use grpc API to read")
b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write")
sharedBytes = make([]byte, 1024)
}
@ -110,7 +115,7 @@ func runBenchmark(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if *b.maxCpu < 1 {
*b.maxCpu = runtime.NumCPU()
}
@ -124,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile()
}
b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ","))
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, strings.Split(*b.masters, ","))
go b.masterClient.KeepConnectedToMaster()
b.masterClient.WaitUntilConnected()
@ -224,9 +229,10 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
start := time.Now()
fileSize := int64(*b.fileSize + random.Intn(64))
fp := &operation.FilePart{
Reader: &FakeReader{id: uint64(id), size: fileSize},
Reader: &FakeReader{id: uint64(id), size: fileSize, random: random},
FileSize: fileSize,
MimeType: "image/bench", // prevent gzip benchmark content
Fsync: *b.fsync,
}
ar := &operation.VolumeAssignRequest{
Count: 1,
@ -238,7 +244,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
if !isSecure && assignResult.Auth != "" {
isSecure = true
}
if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil {
if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil {
if random.Intn(100) < *b.deletePercentage {
s.total++
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
@ -278,21 +284,59 @@ func readFiles(fileIdLineChan chan string, s *stat) {
fmt.Printf("reading file %s\n", fid)
}
start := time.Now()
var bytesRead int
var err error
if *b.grpcRead {
volumeServer, err := b.masterClient.LookupVolumeServer(fid)
if err != nil {
s.failed++
println("!!!! ", fid, " location not found!!!!!")
continue
}
bytesRead, err = grpcFileGet(volumeServer, fid, b.grpcDialOption)
} else {
url, err := b.masterClient.LookupFileId(fid)
if err != nil {
s.failed++
println("!!!! ", fid, " location not found!!!!!")
continue
}
if bytesRead, err := util.Get(url); err == nil {
var bytes []byte
bytes, err = util.Get(url)
bytesRead = len(bytes)
}
if err == nil {
s.completed++
s.transferred += int64(len(bytesRead))
s.transferred += int64(bytesRead)
readStats.addSample(time.Now().Sub(start))
} else {
s.failed++
fmt.Printf("Failed to read %s error:%v\n", url, err)
fmt.Printf("Failed to read %s error:%v\n", fid, err)
}
}
}
func grpcFileGet(volumeServer, fid string, grpcDialOption grpc.DialOption) (bytesRead int, err error) {
err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
fileGetClient, err := client.FileGet(context.Background(), &volume_server_pb.FileGetRequest{FileId: fid})
if err != nil {
return err
}
for {
resp, respErr := fileGetClient.Recv()
if resp != nil {
bytesRead += len(resp.Data)
}
if respErr != nil {
if respErr == io.EOF {
return nil
}
return respErr
}
}
})
return
}
func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) {
@ -511,6 +555,7 @@ func (s *stats) printStats() {
type FakeReader struct {
id uint64 // an id number
size int64 // max bytes
random *rand.Rand
}
func (l *FakeReader) Read(p []byte) (n int, err error) {
@ -526,6 +571,7 @@ func (l *FakeReader) Read(p []byte) (n int, err error) {
for i := 0; i < 8; i++ {
p[i] = byte(l.id >> uint(i*8))
}
l.random.Read(p[8:])
}
l.size -= int64(n)
return

16
weed/command/command.go

@ -12,20 +12,22 @@ var Commands = []*Command{
cmdBackup,
cmdCompact,
cmdCopy,
cmdFix,
cmdDownload,
cmdExport,
cmdFiler,
cmdFilerReplicate,
cmdServer,
cmdFix,
cmdMaster,
cmdFiler,
cmdMount,
cmdS3,
cmdUpload,
cmdDownload,
cmdMsgBroker,
cmdScaffold,
cmdServer,
cmdShell,
cmdWatch,
cmdUpload,
cmdVersion,
cmdVolume,
cmdExport,
cmdMount,
cmdWebDav,
}

2
weed/command/compact.go

@ -50,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
} else {
if err = v.Compact2(preallocate); err != nil {
if err = v.Compact2(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
}

1
weed/command/download.go

@ -71,6 +71,7 @@ func downloadToFile(server, fileId, saveDir string) error {
}
f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
if err != nil {
io.Copy(ioutil.Discard, rc)
return err
}
defer f.Close()

2
weed/command/export.go

@ -195,6 +195,8 @@ func runExport(cmd *Command, args []string) bool {
vid := needle.VolumeId(*export.volumeId)
needleMap := needle_map.NewMemDb()
defer needleMap.Close()
if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil {
glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
}

24
weed/command/filer.go

@ -9,6 +9,7 @@ import (
"google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
@ -22,17 +23,18 @@ var (
type FilerOptions struct {
masters *string
ip *string
bindIp *string
port *int
publicPort *int
collection *string
defaultReplicaPlacement *string
redirectOnRead *bool
disableDirListing *bool
maxMB *int
dirListingLimit *int
dataCenter *string
enableNotification *bool
disableHttp *bool
cipher *bool
// default leveldb directory, used in "weed server" mode
defaultLevelDbDirectory *string
@ -42,16 +44,17 @@ func init() {
cmdFiler.Run = runFiler // break init cycle
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address")
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size")
f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center")
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
}
var cmdFiler = &Command{
@ -102,22 +105,23 @@ func (fo *FilerOptions) startFiler() {
Masters: strings.Split(*fo.masters, ","),
Collection: *fo.collection,
DefaultReplication: *fo.defaultReplicaPlacement,
RedirectOnRead: *fo.redirectOnRead,
DisableDirListing: *fo.disableDirListing,
MaxMB: *fo.maxMB,
DirListingLimit: *fo.dirListingLimit,
DataCenter: *fo.dataCenter,
DefaultLevelDbDir: defaultLevelDbDirectory,
DisableHttp: *fo.disableHttp,
Port: *fo.port,
Host: *fo.ip,
Port: uint32(*fo.port),
Cipher: *fo.cipher,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
}
if *fo.publicPort != 0 {
publicListeningAddress := *fo.ip + ":" + strconv.Itoa(*fo.publicPort)
glog.V(0).Infoln("Start Seaweed filer server", util.VERSION, "public at", publicListeningAddress)
publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, 0)
if e != nil {
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
@ -129,9 +133,9 @@ func (fo *FilerOptions) startFiler() {
}()
}
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port)
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
filerListener, e := util.NewListener(
*fo.ip+":"+strconv.Itoa(*fo.port),
*fo.bindIp+":"+strconv.Itoa(*fo.port),
time.Duration(10)*time.Second,
)
if e != nil {
@ -144,7 +148,7 @@ func (fo *FilerOptions) startFiler() {
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
reflection.Register(grpcS)
go grpcS.Serve(grpcL)

160
weed/command/filer_copy.go

@ -16,9 +16,13 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
@ -37,9 +41,10 @@ type CopyOptions struct {
masterClient *wdclient.MasterClient
concurrenctFiles *int
concurrenctChunks *int
compressionLevel *int
grpcDialOption grpc.DialOption
masters []string
cipher bool
ttlSec int32
}
func init() {
@ -52,7 +57,6 @@ func init() {
copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9")
}
var cmdCopy = &Command{
@ -107,9 +111,7 @@ func runCopy(cmd *Command, args []string) bool {
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
ctx := context.Background()
masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress)
masters, collection, replication, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
if err != nil {
fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
return false
@ -124,13 +126,17 @@ func runCopy(cmd *Command, args []string) bool {
*copy.maxMB = int(maxMB)
}
copy.masters = masters
copy.cipher = cipher
copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters)
go copy.masterClient.KeepConnectedToMaster()
copy.masterClient.WaitUntilConnected()
ttl, err := needle.ReadTTL(*copy.ttl)
if err != nil {
fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err)
return false
}
copy.ttlSec = int32(ttl.Minutes()) * 60
if *cmdCopy.IsDebug {
util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
}
fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles)
@ -153,7 +159,7 @@ func runCopy(cmd *Command, args []string) bool {
filerHost: filerUrl.Host,
filerGrpcAddress: filerGrpcAddress,
}
if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil {
if err := worker.copyFiles(fileCopyTaskChan); err != nil {
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
return
}
@ -164,13 +170,14 @@ func runCopy(cmd *Command, args []string) bool {
return true
}
func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) {
err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{})
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, cipher bool, err error) {
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb
cipher = resp.Cipher
return nil
})
return
@ -215,9 +222,9 @@ type FileCopyWorker struct {
filerGrpcAddress string
}
func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error {
func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
for task := range fileCopyTaskChan {
if err := worker.doEachCopy(ctx, task); err != nil {
if err := worker.doEachCopy(task); err != nil {
return err
}
}
@ -233,7 +240,7 @@ type FileCopyTask struct {
gid uint32
}
func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error {
func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
f, err := os.Open(task.sourceLocation)
if err != nil {
@ -261,36 +268,55 @@ func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask)
}
if chunkCount == 1 {
return worker.uploadFileAsOne(ctx, task, f)
return worker.uploadFileAsOne(task, f)
}
return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize)
return worker.uploadFileInChunks(task, f, chunkCount, chunkSize)
}
func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error {
func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error {
// upload the file content
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
data, err := ioutil.ReadAll(f)
if err != nil {
return err
}
var chunks []*filer_pb.FileChunk
var assignResult *filer_pb.AssignVolumeResponse
var assignError error
if task.fileSize > 0 {
// assign a volume
assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{
err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
Ttl: *worker.options.ttl,
TtlSec: worker.options.ttlSec,
ParentPath: task.destinationUrlPath,
}
assignResult, assignError = client.AssignVolume(context.Background(), request)
if assignError != nil {
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
}
if assignResult.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
}
return nil
})
if err != nil {
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel)
uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth))
if err != nil {
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
}
@ -299,18 +325,12 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
}
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
chunks = append(chunks, &filer_pb.FileChunk{
FileId: assignResult.Fid,
Offset: 0,
Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
ETag: uploadResult.ETag,
})
chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0))
fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
}
if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{
@ -325,13 +345,13 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
Mime: mimeType,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
},
}
if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
@ -342,7 +362,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
return nil
}
func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
@ -352,6 +372,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks)
var wg sync.WaitGroup
var uploadError error
var collection, replication string
fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount)
for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ {
@ -363,22 +384,42 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
<-concurrentChunks
}()
// assign a volume
assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{
var assignResult *filer_pb.AssignVolumeResponse
var assignError error
err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
Ttl: *worker.options.ttl,
TtlSec: worker.options.ttlSec,
ParentPath: task.destinationUrlPath,
}
assignResult, assignError = client.AssignVolume(context.Background(), request)
if assignError != nil {
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
}
if assignResult.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
}
return nil
})
if err != nil {
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
if err != nil {
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
if collection == "" {
collection = assignResult.Collection
}
if replication == "" {
replication = assignResult.Replication
}
uploadResult, err := operation.Upload(targetUrl,
fileName+"-"+strconv.FormatInt(i+1, 10),
io.NewSectionReader(f, i*chunkSize, chunkSize),
false, "", nil, assignResult.Auth)
uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth))
if err != nil {
uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
return
@ -387,13 +428,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
return
}
chunksChan <- &filer_pb.FileChunk{
FileId: assignResult.Fid,
Offset: i * chunkSize,
Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
ETag: uploadResult.ETag,
}
chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize)
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
}(i)
}
@ -410,11 +446,11 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId)
}
operation.DeleteFiles(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, fileIds)
operation.DeleteFiles(copy.masters[0], false, worker.options.grpcDialOption, fileIds)
return uploadError
}
if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{
@ -427,15 +463,15 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
FileSize: uint64(task.fileSize),
FileMode: uint32(task.fileMode),
Mime: mimeType,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
Replication: replication,
Collection: collection,
TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
},
}
if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
@ -457,18 +493,12 @@ func detectMimeType(f *os.File) string {
}
if err != nil {
fmt.Printf("read head of %v: %v\n", f.Name(), err)
return "application/octet-stream"
return ""
}
f.Seek(0, io.SeekStart)
mimeType := http.DetectContentType(head[:n])
if mimeType == "application/octet-stream" {
return ""
}
return mimeType
}
func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
return util.WithCachedGrpcClient(ctx, func(ctx context.Context, clientConn *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(clientConn)
return fn(client)
}, filerAddress, grpcDialOption)
}

1
weed/command/filer_replication.go

@ -121,7 +121,6 @@ func runFilerReplicate(cmd *Command, args []string) bool {
}
}
return true
}
func validateOneEnabledInput(config *viper.Viper) {

1
weed/command/fix.go

@ -70,6 +70,7 @@ func runFix(cmd *Command, args []string) bool {
indexFileName := path.Join(*fixVolumePath, baseFileName+".idx")
nm := needle_map.NewMemDb()
defer nm.Close()
vid := needle.VolumeId(*fixVolumeId)
scanner := &VolumeFileScanner4Fix{

16
weed/command/master.go

@ -8,10 +8,12 @@ import (
"strings"
"github.com/chrislusf/raft/protobuf"
"github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/gorilla/mux"
"google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
@ -43,10 +45,10 @@ type MasterOptions struct {
func init() {
cmdMaster.Run = runMaster // break init cycle
m.port = cmdMaster.Flag.Int("port", 9333, "http listen port")
m.ip = cmdMaster.Flag.String("ip", "localhost", "master <ip>|<server> address")
m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master <ip>|<server> address")
m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094")
m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095")
m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.")
m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
@ -81,7 +83,7 @@ func runMaster(cmd *Command, args []string) bool {
util.LoadConfiguration("master", false)
runtime.GOMAXPROCS(runtime.NumCPU())
util.SetupProfiling(*masterCpuProfile, *masterMemProfile)
grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
if err := util.TestFolderWritable(*m.metaFolder); err != nil {
glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
@ -109,7 +111,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
glog.V(0).Infof("Start Seaweed Master %s at %s", util.VERSION, listeningAddress)
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil {
glog.Fatalf("Master startup error: %v", e)
@ -129,11 +131,11 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
}
// Create your protocol servers.
grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
master_pb.RegisterSeaweedServer(grpcS, ms)
protobuf.RegisterRaftServer(grpcS, raftServer)
reflection.Register(grpcS)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterOption.ipBind, grpcPort)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
go grpcS.Serve(grpcL)
go ms.MasterClient.KeepConnectedToMaster()
@ -146,6 +148,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
}
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
if peers != "" {
cleanedPeers = strings.Split(peers, ",")
@ -170,6 +173,7 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
return &weed_server.MasterOption{
Host: *m.ip,
Port: *m.port,
MetaFolder: *m.metaFolder,
VolumeSizeLimitMB: *m.volumeSizeLimitMB,

38
weed/command/mount.go

@ -1,9 +1,7 @@
package command
import (
"fmt"
"strconv"
"strings"
"os"
)
type MountOptions struct {
@ -15,9 +13,14 @@ type MountOptions struct {
replication *string
ttlSec *int
chunkSizeLimitMB *int
cacheDir *string
cacheSizeMB *int64
dataCenter *string
allowOthers *bool
umaskString *string
nonempty *bool
outsideContainerClusterMode *bool
asyncMetaDataCaching *bool
}
var (
@ -35,12 +38,17 @@ func init() {
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 16, "local write buffer size, also chunk large files")
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB (0 will disable cache)")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file")
mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file")
mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access the file system")
mountOptions.asyncMetaDataCaching = cmdMount.Flag.Bool("asyncMetaDataCaching", true, "async meta data caching. this feature will be permanent and this option will be removed.")
}
var cmdMount = &Command{
@ -58,21 +66,11 @@ var cmdMount = &Command{
On OS X, it requires OSXFUSE (http://osxfuse.github.com/).
`,
}
func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) {
hostnameAndPort := strings.Split(filer, ":")
if len(hostnameAndPort) != 2 {
return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort)
}
If the SeaweedFS system runs in a container cluster, e.g. managed by kubernetes or docker compose,
the volume servers are not accessible by their own ip addresses.
In "outsideContainerClusterMode", the mount will use the filer ip address instead, assuming:
* All volume server containers are accessible through the same hostname or IP address as the filer.
* All volume server container ports are open external to the cluster.
filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
if parseErr != nil {
return "", fmt.Errorf("filer port parse error: %v", parseErr)
}
filerGrpcPort := int(filerPort) + 10000
return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
`,
}

4
weed/command/mount_linux.go

@ -138,9 +138,7 @@ func parseInfoFile(r io.Reader) ([]*Info, error) {
}
func osSpecificMountOptions() []fuse.MountOption {
return []fuse.MountOption{
fuse.AllowNonEmptyMount(),
}
return []fuse.MountOption{}
}
func checkMountPointAvailable(dir string) bool {

127
weed/command/mount_std.go

@ -3,6 +3,7 @@
package command
import (
"context"
"fmt"
"os"
"os/user"
@ -12,19 +13,20 @@ import (
"strings"
"time"
"github.com/jacobsa/daemonize"
"github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
func runMount(cmd *Command, args []string) bool {
util.SetupProfiling(*mountCpuProfile, *mountMemProfile)
grace.SetupProfiling(*mountCpuProfile, *mountMemProfile)
umask, umaskErr := strconv.ParseUint(*mountOptions.umaskString, 8, 64)
if umaskErr != nil {
@ -32,27 +34,46 @@ func runMount(cmd *Command, args []string) bool {
return false
}
return RunMount(
*mountOptions.filer,
*mountOptions.filerMountRootPath,
*mountOptions.dir,
*mountOptions.collection,
*mountOptions.replication,
*mountOptions.dataCenter,
*mountOptions.chunkSizeLimitMB,
*mountOptions.allowOthers,
*mountOptions.ttlSec,
*mountOptions.dirListCacheLimit,
os.FileMode(umask),
)
if len(args)>0 {
return false
}
return RunMount(&mountOptions, os.FileMode(umask))
}
func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int,
allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode) bool {
func RunMount(option *MountOptions, umask os.FileMode) bool {
filer := *option.filer
// parse filer grpc address
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer)
if err != nil {
glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
return true
}
// try to connect to filer, filerBucketsPath may be useful later
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var cipher bool
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err)
}
cipher = resp.Cipher
return nil
})
if err != nil {
glog.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
return true
}
filerMountRootPath := *option.filerMountRootPath
dir := *option.dir
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
util.LoadConfiguration("security", false)
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"")
return false
@ -90,7 +111,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
// Ensure target mount point availability
if isValid := checkMountPointAvailable(dir); !isValid {
glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
return false
return true
}
mountName := path.Base(dir)
@ -99,7 +120,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
fuse.VolumeName(mountName),
fuse.FSName(filer + ":" + filerMountRootPath),
fuse.Subtype("seaweedfs"),
fuse.NoAppleDouble(),
// fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders
fuse.NoAppleXattr(),
fuse.NoBrowse(),
fuse.AutoXattr(),
@ -110,51 +131,34 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
fuse.MaxReadahead(1024 * 128),
fuse.AsyncRead(),
fuse.WritebackCache(),
fuse.AllowNonEmptyMount(),
}
options = append(options, osSpecificMountOptions()...)
if allowOthers {
if *option.allowOthers {
options = append(options, fuse.AllowOther())
}
c, err := fuse.Mount(dir, options...)
if err != nil {
glog.V(0).Infof("mount: %v", err)
daemonize.SignalOutcome(err)
return true
}
util.OnInterrupt(func() {
fuse.Unmount(dir)
c.Close()
})
filerGrpcAddress, err := parseFilerGrpcAddress(filer)
if err != nil {
glog.V(0).Infof("parseFilerGrpcAddress: %v", err)
daemonize.SignalOutcome(err)
return true
if *option.nonempty {
options = append(options, fuse.AllowNonEmptyMount())
}
// find mount point
mountRoot := filerMountRootPath
if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") {
mountRoot = mountRoot[0 : len(mountRoot)-1]
}
daemonize.SignalOutcome(nil)
err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"),
GrpcDialOption: grpcDialOption,
FilerMountRootPath: mountRoot,
Collection: collection,
Replication: replication,
TtlSec: int32(ttlSec),
Collection: *option.collection,
Replication: *option.replication,
TtlSec: int32(*option.ttlSec),
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
DataCenter: dataCenter,
DirListCacheLimit: dirListCacheLimit,
CacheDir: *option.cacheDir,
CacheSizeMB: *option.cacheSizeMB,
DataCenter: *option.dataCenter,
DirListCacheLimit: *option.dirListCacheLimit,
EntryCacheTtl: 3 * time.Second,
MountUid: uid,
MountGid: gid,
@ -162,16 +166,31 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
MountCtime: fileInfo.ModTime(),
MountMtime: time.Now(),
Umask: umask,
}))
OutsideContainerClusterMode: *mountOptions.outsideContainerClusterMode,
AsyncMetaDataCaching: *mountOptions.asyncMetaDataCaching,
Cipher: cipher,
})
// mount
c, err := fuse.Mount(dir, options...)
if err != nil {
fuse.Unmount(dir)
glog.V(0).Infof("mount: %v", err)
return true
}
defer fuse.Unmount(dir)
grace.OnInterrupt(func() {
fuse.Unmount(dir)
c.Close()
})
glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
err = fs.Serve(c, seaweedFileSystem)
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
glog.V(0).Infof("mount process: %v", err)
daemonize.SignalOutcome(err)
return true
}

114
weed/command/msg_broker.go

@ -0,0 +1,114 @@
package command
import (
"context"
"fmt"
"strconv"
"time"
"google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/messaging/broker"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
messageBrokerStandaloneOptions MessageBrokerOptions
)
type MessageBrokerOptions struct {
filer *string
ip *string
port *int
cpuprofile *string
memprofile *string
}
func init() {
cmdMsgBroker.Run = runMsgBroker // break init cycle
messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address")
messageBrokerStandaloneOptions.ip = cmdMsgBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address")
messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "broker gRPC listen port")
messageBrokerStandaloneOptions.cpuprofile = cmdMsgBroker.Flag.String("cpuprofile", "", "cpu profile output file")
messageBrokerStandaloneOptions.memprofile = cmdMsgBroker.Flag.String("memprofile", "", "memory profile output file")
}
var cmdMsgBroker = &Command{
UsageLine: "msgBroker [-port=17777] [-filer=<ip:port>]",
Short: "start a message queue broker",
Long: `start a message queue broker
The broker can accept gRPC calls to write or read messages. The messages are stored via filer.
The brokers are stateless. To scale up, just add more brokers.
`,
}
func runMsgBroker(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
return messageBrokerStandaloneOptions.startQueueServer()
}
func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile)
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer)
if err != nil {
glog.Fatal(err)
return false
}
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker")
cipher := false
for {
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
cipher = resp.Cipher
return nil
})
if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
break
}
}
qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{
Filers: []string{*msgBrokerOpt.filer},
DefaultReplication: "",
MaxMB: 0,
Ip: *msgBrokerOpt.ip,
Port: *msgBrokerOpt.port,
Cipher: cipher,
}, grpcDialOption)
// start grpc listener
grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
}
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs)
reflection.Register(grpcS)
grpcS.Serve(grpcL)
return true
}

103
weed/command/s3.go

@ -1,10 +1,13 @@
package command
import (
"context"
"fmt"
"net/http"
"time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/gorilla/mux"
@ -20,8 +23,8 @@ var (
type S3Options struct {
filer *string
filerBucketsPath *string
port *int
config *string
domainName *string
tlsPrivateKey *string
tlsCertificate *string
@ -30,18 +33,78 @@ type S3Options struct {
func init() {
cmdS3.Run = runS3 // break init cycle
s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port")
s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}")
s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file")
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
}
var cmdS3 = &Command{
UsageLine: "s3 -port=8333 -filer=<ip:port>",
UsageLine: "s3 [-port=8333] [-filer=<ip:port>] [-config=</path/to/config.json>]",
Short: "start a s3 API compatible server that is backed by a filer",
Long: `start a s3 API compatible server that is backed by a filer.
By default, you can use any access key and secret key to access the S3 APIs.
To enable credential based access, create a config.json file similar to this:
{
"identities": [
{
"name": "some_name",
"credentials": [
{
"accessKey": "some_access_key1",
"secretKey": "some_secret_key1"
}
],
"actions": [
"Admin",
"Read",
"Write"
]
},
{
"name": "some_read_only_user",
"credentials": [
{
"accessKey": "some_access_key2",
"secretKey": "some_secret_key2"
}
],
"actions": [
"Read"
]
},
{
"name": "some_normal_user",
"credentials": [
{
"accessKey": "some_access_key3",
"secretKey": "some_secret_key3"
}
],
"actions": [
"Read",
"Write"
]
},
{
"name": "user_limited_to_bucket1",
"credentials": [
{
"accessKey": "some_access_key4",
"secretKey": "some_secret_key4"
}
],
"actions": [
"Read:bucket1",
"Write:bucket1"
]
}
]
}
`,
}
@ -55,20 +118,44 @@ func runS3(cmd *Command, args []string) bool {
func (s3opt *S3Options) startS3Server() bool {
filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer)
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer)
if err != nil {
glog.Fatal(err)
return false
}
filerBucketsPath := "/buckets"
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for {
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
filerBucketsPath = resp.DirBuckets
glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
return nil
})
if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
break
}
}
router := mux.NewRouter().SkipClean(true)
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
Filer: *s3opt.filer,
FilerGrpcAddress: filerGrpcAddress,
Config: *s3opt.config,
DomainName: *s3opt.domainName,
BucketsPath: *s3opt.filerBucketsPath,
GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"),
BucketsPath: filerBucketsPath,
GrpcDialOption: grpcDialOption,
})
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
@ -83,12 +170,12 @@ func (s3opt *S3Options) startS3Server() bool {
}
if *s3opt.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3opt.port)
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
} else {
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3opt.port)
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
if err = httpS.Serve(s3ApiListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}

54
weed/command/scaffold.go

@ -18,7 +18,7 @@ var cmdScaffold = &Command{
For example, the filer.toml mysql password can be overwritten by environment variable
export WEED_MYSQL_PASSWORD=some_password
Environment variable rules:
* Prefix fix with "WEED_"
* Prefix the variable name with "WEED_"
* Upppercase the reset of variable name.
* Replace '.' with '_'
@ -74,7 +74,12 @@ const (
# with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
recursive_delete = false
# directories under this folder will be automatically creating a separate bucket
buckets_folder = "/buckets"
buckets_fsync = [ # a list of buckets with all write requests fsync=true
"important_bucket",
"should_always_fsync",
]
####################################################
# The following are filer store options
@ -136,13 +141,13 @@ hosts=[
"localhost:9042",
]
[redis]
[redis2]
enabled = false
address = "localhost:6379"
password = ""
database = 0
[redis_cluster]
[redis_cluster2]
enabled = false
addresses = [
"localhost:30001",
@ -163,11 +168,11 @@ enabled = false
servers = "localhost:2379"
timeout = "3s"
[tikv]
[mongodb]
enabled = false
pdAddress = "192.168.199.113:2379"
uri = "mongodb://localhost:27017"
option_pool_size = 0
database = "seaweedfs"
`
NOTIFICATION_TOML_EXAMPLE = `
@ -262,6 +267,7 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory
endpoint = ""
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
@ -323,6 +329,10 @@ key = ""
cert = ""
key = ""
[grpc.msg_broker]
cert = ""
key = ""
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
@ -352,15 +362,19 @@ key = ""
[master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
lock
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
volume.fix.replication
unlock
"""
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
default_filer_url = "http://localhost:8888/"
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
[master.sequencer]
type = "memory" # Choose [memory|etcd] type for storing the file id sequence
@ -378,13 +392,27 @@ sequencer_etcd_urls = "http://127.0.0.1:2379"
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
endpoint = ""
# create this number of logical volumes if no more writable volumes
# count_x means how many copies of data.
# e.g.:
# 000 has only one copy, copy_1
# 010 and 001 has two copies, copy_2
# 011 has only 3 copies, copy_3
[master.volume_growth]
count_1 = 7 # create 1 x 7 = 7 actual volumes
count_2 = 6 # create 2 x 6 = 12 actual volumes
count_3 = 3 # create 3 x 3 = 9 actual volumes
count_other = 1 # create n x 1 = n actual volumes
copy_1 = 7 # create 1 x 7 = 7 actual volumes
copy_2 = 6 # create 2 x 6 = 12 actual volumes
copy_3 = 3 # create 3 x 3 = 9 actual volumes
copy_other = 1 # create n x 1 = n actual volumes
# configuration flags for replication
[master.replication]
# any replication counts should be considered minimums. If you specify 010 and
# have 3 different racks, that's still considered writable. Writes will still
# try to replicate to all available volumes. You should only use this option
# if you are doing your own replication or periodic sync of volumes.
treat_replication_as_minimums = false
`
)

29
weed/command/server.go

@ -22,6 +22,7 @@ var (
masterOptions MasterOptions
filerOptions FilerOptions
s3Options S3Options
msgBrokerOptions MessageBrokerOptions
)
func init() {
@ -45,7 +46,7 @@ var cmdServer = &Command{
}
var (
serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name")
serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name")
serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds")
serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name")
@ -53,10 +54,11 @@ var (
serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...")
volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]... If set to zero on non-windows OS, the limit will be auto configured.")
pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker")
serverWhiteList []string
)
@ -78,10 +80,10 @@ func init() {
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit")
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
@ -92,11 +94,13 @@ func init() {
serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}")
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")
}
@ -114,11 +118,10 @@ func runServer(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile()
}
if *filerOptions.redirectOnRead {
if *isStartingS3 {
*isStartingFiler = true
}
if *isStartingS3 {
if *isStartingMsgBroker {
*isStartingFiler = true
}
@ -129,13 +132,15 @@ func runServer(cmd *Command, args []string) bool {
masterOptions.ip = serverIp
masterOptions.ipBind = serverBindIp
filerOptions.masters = &peers
filerOptions.ip = serverBindIp
filerOptions.ip = serverIp
filerOptions.bindIp = serverBindIp
serverOptions.v.ip = serverIp
serverOptions.v.bindIp = serverBindIp
serverOptions.v.masters = &peers
serverOptions.v.idleConnectionTimeout = serverTimeout
serverOptions.v.dataCenter = serverDataCenter
serverOptions.v.rack = serverRack
msgBrokerOptions.ip = serverIp
serverOptions.v.pulseSeconds = pulseSeconds
masterOptions.pulseSeconds = pulseSeconds
@ -148,6 +153,7 @@ func runServer(cmd *Command, args []string) bool {
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
s3Options.filer = &filerAddress
msgBrokerOptions.filer = &filerAddress
if *filerOptions.defaultReplicaPlacement == "" {
*filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication
@ -191,6 +197,13 @@ func runServer(cmd *Command, args []string) bool {
}()
}
if *isStartingMsgBroker {
go func() {
time.Sleep(2 * time.Second)
msgBrokerOptions.startQueueServer()
}()
}
// start volume server
{
go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption)

13
weed/command/shell.go

@ -10,13 +10,13 @@ import (
var (
shellOptions shell.ShellOptions
shellInitialFilerUrl *string
shellInitialFiler *string
)
func init() {
cmdShell.Run = runShell // break init cycle
shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
shellInitialFilerUrl = cmdShell.Flag.String("filer.url", "http://localhost:8888/", "initial filer url")
shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
}
var cmdShell = &Command{
@ -32,12 +32,13 @@ func runShell(command *Command, args []string) bool {
util.LoadConfiguration("security", false)
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
var filerPwdErr error
shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl)
if filerPwdErr != nil {
fmt.Printf("failed to parse url filer.url=%s : %v\n", *shellInitialFilerUrl, filerPwdErr)
var err error
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
if err != nil {
fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
return false
}
shellOptions.Directory = "/"
shell.RunShell(shellOptions)

10
weed/command/upload.go

@ -24,6 +24,7 @@ type UploadOptions struct {
dataCenter *string
ttl *string
maxMB *int
usePublicUrl *bool
}
func init() {
@ -37,6 +38,7 @@ func init() {
upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name")
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit")
upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server")
}
var cmdUpload = &Command{
@ -79,9 +81,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
return e
}
results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
*upload.replication, *upload.collection, *upload.dataCenter,
*upload.ttl, *upload.maxMB)
results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
if e != nil {
@ -98,9 +98,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
fmt.Println(e.Error())
}
results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
*upload.replication, *upload.collection, *upload.dataCenter,
*upload.ttl, *upload.maxMB)
results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
}

2
weed/command/version.go

@ -19,6 +19,6 @@ func runVersion(cmd *Command, args []string) bool {
cmd.Usage()
}
fmt.Printf("version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
return true
}

19
weed/command/volume.go

@ -10,9 +10,11 @@ import (
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/spf13/viper"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util/httpdown"
@ -56,7 +58,7 @@ func init() {
cmdVolume.Run = runVolume // break init cycle
v.port = cmdVolume.Flag.Int("port", 8080, "http listen port")
v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public")
v.ip = cmdVolume.Flag.String("ip", "", "ip or server name")
v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name")
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
@ -83,7 +85,7 @@ var cmdVolume = &Command{
var (
volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...")
maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]... If set to zero on non-windows OS, the limit will be auto configured.")
volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
)
@ -92,7 +94,7 @@ func runVolume(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
runtime.GOMAXPROCS(runtime.NumCPU())
util.SetupProfiling(*v.cpuProfile, *v.memProfile)
grace.SetupProfiling(*v.cpuProfile, *v.memProfile)
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption)
@ -126,7 +128,8 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
}
if *v.ip == "" {
*v.ip = "127.0.0.1"
*v.ip = util.DetectedHostAddress()
glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
}
if *v.publicPort == 0 {
@ -181,7 +184,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
clusterHttpServer := v.startClusterHttpService(volumeMux)
stopChain := make(chan struct{})
util.OnInterrupt(func() {
grace.OnInterrupt(func() {
fmt.Println("volume server has be killed")
var startTime time.Time
@ -234,7 +237,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
reflection.Register(grpcS)
go func() {
@ -247,7 +250,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress)
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
@ -274,7 +277,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
}
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress)
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)

65
weed/command/watch.go

@ -0,0 +1,65 @@
package command
import (
"context"
"fmt"
"io"
"time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
cmdWatch.Run = runWatch // break init cycle
}
var cmdWatch = &Command{
UsageLine: "watch <wip> [-filer=localhost:8888] [-target=/]",
Short: "see recent changes on a filer",
Long: `See recent changes on a filer.
`,
}
var (
watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port")
watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
)
func runWatch(cmd *Command, args []string) bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
ClientName: "watch",
PathPrefix: *watchTarget,
SinceNs: time.Now().Add(-*watchStart).UnixNano(),
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
fmt.Printf("events: %+v\n", resp.EventNotification)
}
})
if watchErr != nil {
fmt.Printf("watch %s: %v\n", *watchFiler, watchErr)
}
return true
}

56
weed/command/webdav.go

@ -1,13 +1,17 @@
package command
import (
"context"
"fmt"
"net/http"
"os"
"os/user"
"strconv"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
@ -23,6 +27,8 @@ type WebDavOption struct {
collection *string
tlsPrivateKey *string
tlsCertificate *string
cacheDir *string
cacheSizeMB *int64
}
func init() {
@ -32,11 +38,13 @@ func init() {
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB")
}
var cmdWebDav = &Command{
UsageLine: "webdav -port=7333 -filer=<ip:port>",
Short: "<unstable> start a webdav server that is backed by a filer",
Short: "start a webdav server that is backed by a filer",
Long: `start a webdav server that is backed by a filer.
`,
@ -46,7 +54,7 @@ func runWebDav(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.VERSION, *webDavStandaloneOptions.port)
glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
return webDavStandaloneOptions.startWebDav()
@ -54,12 +62,6 @@ func runWebDav(cmd *Command, args []string) bool {
func (wo *WebDavOption) startWebDav() bool {
filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer)
if err != nil {
glog.Fatal(err)
return false
}
// detect current user
uid, gid := uint32(0), uint32(0)
if u, err := user.Current(); err == nil {
@ -71,13 +73,45 @@ func (wo *WebDavOption) startWebDav() bool {
}
}
// parse filer grpc address
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer)
if err != nil {
glog.Fatal(err)
return false
}
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var cipher bool
// connect to filer
for {
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
cipher = resp.Cipher
return nil
})
if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
break
}
}
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
Filer: *wo.filer,
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"),
GrpcDialOption: grpcDialOption,
Collection: *wo.collection,
Uid: uid,
Gid: gid,
Cipher: cipher,
CacheDir: *wo.cacheDir,
CacheSizeMB: *wo.cacheSizeMB,
})
if webdavServer_err != nil {
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
@ -92,12 +126,12 @@ func (wo *WebDavOption) startWebDav() bool {
}
if *wo.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.VERSION, *wo.port)
glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}
} else {
glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.VERSION, *wo.port)
glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
if err = httpS.Serve(webDavListener); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}

17
weed/filer2/abstract_sql/abstract_sql_store.go

@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -98,13 +99,13 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En
return nil
}
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) {
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer2.Entry, error) {
dir, name := fullpath.DirAndName()
row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
var data []byte
if err := row.Scan(&data); err != nil {
return nil, filer2.ErrNotFound
return nil, filer_pb.ErrNotFound
}
entry := &filer2.Entry{
@ -117,7 +118,7 @@ func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.Fu
return entry, nil
}
func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error {
func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
@ -134,7 +135,7 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.
return nil
}
func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error {
func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath)
if err != nil {
@ -149,7 +150,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
return nil
}
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
sqlText := store.SqlListExclusive
if inclusive {
@ -171,7 +172,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat
}
entry := &filer2.Entry{
FullPath: filer2.NewFullPath(string(fullpath), name),
FullPath: util.NewFullPath(string(fullpath), name),
}
if err = entry.DecodeAttributesAndChunks(data); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
@ -183,3 +184,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat
return entries, nil
}
func (store *AbstractSqlStore) Shutdown() {
store.DB.Close()
}

23
weed/filer2/cassandra/cassandra_store.go

@ -3,10 +3,13 @@ package cassandra
import (
"context"
"fmt"
"github.com/gocql/gocql"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/gocql/gocql"
)
func init() {
@ -72,7 +75,7 @@ func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entr
return store.InsertEntry(ctx, entry)
}
func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
dir, name := fullpath.DirAndName()
var data []byte
@ -80,12 +83,12 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.Full
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
if err != gocql.ErrNotFound {
return nil, filer2.ErrNotFound
return nil, filer_pb.ErrNotFound
}
}
if len(data) == 0 {
return nil, filer2.ErrNotFound
return nil, filer_pb.ErrNotFound
}
entry = &filer2.Entry{
@ -99,7 +102,7 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.Full
return entry, nil
}
func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error {
func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
@ -112,7 +115,7 @@ func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.Fu
return nil
}
func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error {
func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
if err := store.session.Query(
"DELETE FROM filemeta WHERE directory=?",
@ -123,7 +126,7 @@ func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath
return nil
}
func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
@ -136,7 +139,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
for iter.Scan(&name, &data) {
entry := &filer2.Entry{
FullPath: filer2.NewFullPath(string(fullpath), name),
FullPath: util.NewFullPath(string(fullpath), name),
}
if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil {
err = decodeErr
@ -151,3 +154,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
return entries, err
}
func (store *CassandraStore) Shutdown() {
store.session.Close()
}

12
weed/filer2/entry.go

@ -5,6 +5,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
type Attr struct {
@ -20,6 +21,7 @@ type Attr struct {
UserName string
GroupNames []string
SymlinkTarget string
Md5 []byte
}
func (attr Attr) IsDirectory() bool {
@ -27,7 +29,7 @@ func (attr Attr) IsDirectory() bool {
}
type Entry struct {
FullPath
util.FullPath
Attr
Extended map[string][]byte
@ -71,3 +73,11 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
Entry: entry.ToProtoEntry(),
}
}
func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
return &Entry{
FullPath: util.NewFullPath(dir, entry.Name),
Attr: PbToEntryAttribute(entry.Attributes),
Chunks: entry.Chunks,
}
}

6
weed/filer2/entry_codec.go

@ -52,6 +52,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
UserName: entry.Attr.UserName,
GroupName: entry.Attr.GroupNames,
SymlinkTarget: entry.Attr.SymlinkTarget,
Md5: entry.Attr.Md5,
}
}
@ -71,6 +72,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
t.UserName = attr.UserName
t.GroupNames = attr.GroupName
t.SymlinkTarget = attr.SymlinkTarget
t.Md5 = attr.Md5
return t
}
@ -93,6 +95,10 @@ func EqualEntry(a, b *Entry) bool {
return false
}
if !bytes.Equal(a.Md5, b.Md5) {
return false
}
for i := 0; i < len(a.Chunks); i++ {
if !proto.Equal(a.Chunks[i], b.Chunks[i]) {
return false

22
weed/filer2/etcd/etcd_store.go

@ -6,10 +6,12 @@ import (
"strings"
"time"
"go.etcd.io/etcd/clientv3"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
"go.etcd.io/etcd/clientv3"
)
const (
@ -90,7 +92,7 @@ func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (e
return store.InsertEntry(ctx, entry)
}
func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
key := genKey(fullpath.DirAndName())
resp, err := store.client.Get(ctx, string(key))
@ -99,7 +101,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath)
}
if len(resp.Kvs) == 0 {
return nil, filer2.ErrNotFound
return nil, filer_pb.ErrNotFound
}
entry = &filer2.Entry{
@ -113,7 +115,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath)
return entry, nil
}
func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
key := genKey(fullpath.DirAndName())
if _, err := store.client.Delete(ctx, string(key)); err != nil {
@ -123,7 +125,7 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat
return nil
}
func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil {
@ -134,7 +136,7 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer
}
func (store *EtcdStore) ListDirectoryEntries(
ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int,
ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
) (entries []*filer2.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
@ -157,7 +159,7 @@ func (store *EtcdStore) ListDirectoryEntries(
break
}
entry := &filer2.Entry{
FullPath: filer2.NewFullPath(string(fullpath), fileName),
FullPath: weed_util.NewFullPath(string(fullpath), fileName),
}
if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil {
err = decodeErr
@ -177,7 +179,7 @@ func genKey(dirPath, fileName string) (key []byte) {
return key
}
func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = []byte(string(fullpath))
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
if len(startFileName) > 0 {
@ -194,3 +196,7 @@ func getNameFromKey(key []byte) string {
return string(key[sepIndex+1:])
}
func (store *EtcdStore) Shutdown() {
store.client.Close()
}

78
weed/filer2/filechunks.go

@ -3,6 +3,7 @@ package filer2
import (
"fmt"
"hash/fnv"
"math"
"sort"
"sync"
@ -19,7 +20,21 @@ func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
return
}
func ETag(chunks []*filer_pb.FileChunk) (etag string) {
func ETag(entry *filer_pb.Entry) (etag string) {
if entry.Attributes == nil || entry.Attributes.Md5 == nil {
return ETagChunks(entry.Chunks)
}
return fmt.Sprintf("%x", entry.Attributes.Md5)
}
func ETagEntry(entry *Entry) (etag string) {
if entry.Attr.Md5 == nil {
return ETagChunks(entry.Chunks)
}
return fmt.Sprintf("%x", entry.Attr.Md5)
}
func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
if len(chunks) == 1 {
return chunks[0].ETag
}
@ -70,10 +85,16 @@ type ChunkView struct {
Offset int64
Size uint64
LogicOffset int64
IsFullChunk bool
ChunkSize uint64
CipherKey []byte
IsGzipped bool
}
func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) {
func (cv *ChunkView) IsFullChunk() bool {
return cv.Size == cv.ChunkSize
}
func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
visibles := NonOverlappingVisibleIntervals(chunks)
@ -81,19 +102,27 @@ func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views
}
func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) {
func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
stop := offset + int64(size)
stop := offset + size
if size == math.MaxInt64 {
stop = math.MaxInt64
}
if stop < offset {
stop = math.MaxInt64
}
for _, chunk := range visibles {
if chunk.start <= offset && offset < chunk.stop && offset < stop {
isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop
views = append(views, &ChunkView{
FileId: chunk.fileId,
Offset: offset - chunk.start, // offset is the data starting location in this file id
Size: uint64(min(chunk.stop, stop) - offset),
LogicOffset: offset,
IsFullChunk: isFullChunk,
ChunkSize: chunk.chunkSize,
CipherKey: chunk.cipherKey,
IsGzipped: chunk.isGzipped,
})
offset = min(chunk.stop, stop)
}
@ -120,13 +149,7 @@ var bufPool = sync.Pool{
func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval {
newV := newVisibleInterval(
chunk.Offset,
chunk.Offset+int64(chunk.Size),
chunk.GetFileIdString(),
chunk.Mtime,
true,
)
newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, chunk.Size, chunk.CipherKey, chunk.IsGzipped)
length := len(visibles)
if length == 0 {
@ -140,23 +163,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.
logPrintf(" before", visibles)
for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop {
newVisibles = append(newVisibles, newVisibleInterval(
v.start,
chunk.Offset,
v.fileId,
v.modifiedTime,
false,
))
newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
}
chunkStop := chunk.Offset + int64(chunk.Size)
if v.start < chunkStop && chunkStop < v.stop {
newVisibles = append(newVisibles, newVisibleInterval(
chunkStop,
v.stop,
v.fileId,
v.modifiedTime,
false,
))
newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
}
if chunkStop <= v.start || v.stop <= chunk.Offset {
newVisibles = append(newVisibles, v)
@ -187,6 +198,7 @@ func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []Vi
var newVisibles []VisibleInterval
for _, chunk := range chunks {
newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk)
t := visibles[:0]
visibles = newVisibles
@ -207,16 +219,20 @@ type VisibleInterval struct {
stop int64
modifiedTime int64
fileId string
isFullChunk bool
chunkSize uint64
cipherKey []byte
isGzipped bool
}
func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval {
func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
return VisibleInterval{
start: start,
stop: stop,
fileId: fileId,
modifiedTime: modifiedTime,
isFullChunk: isFullChunk,
chunkSize: chunkSize,
cipherKey: cipherKey,
isGzipped: isGzipped,
}
}

2
weed/filer2/filechunks_test.go

@ -218,7 +218,7 @@ func TestChunksReading(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Offset int64
Size int
Size int64
Expected []*ChunkView
}{
// case 0: normal

88
weed/filer2/filer.go

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
@ -13,6 +12,9 @@ import (
"github.com/karlseguin/ccache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
@ -27,17 +29,27 @@ type Filer struct {
store *FilerStoreWrapper
directoryCache *ccache.Cache
MasterClient *wdclient.MasterClient
fileIdDeletionChan chan string
fileIdDeletionQueue *util.UnboundedQueue
GrpcDialOption grpc.DialOption
DirBucketsPath string
FsyncBuckets []string
buckets *FilerBuckets
Cipher bool
MetaLogBuffer *log_buffer.LogBuffer
metaLogCollection string
metaLogReplication string
}
func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer {
func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerHost string, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
f := &Filer{
directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters),
fileIdDeletionChan: make(chan string, PaginationSize),
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, masters),
fileIdDeletionQueue: util.NewUnboundedQueue(),
GrpcDialOption: grpcDialOption,
}
f.MetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
f.metaLogCollection = collection
f.metaLogReplication = replication
go f.loopProcessingDeletion()
@ -85,7 +97,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro
var lastDirectoryEntry *Entry
for i := 1; i < len(dirParts); i++ {
dirPath := "/" + filepath.ToSlash(filepath.Join(dirParts[:i]...))
dirPath := "/" + util.Join(dirParts[:i]...)
// fmt.Printf("%d directory: %+v\n", i, dirPath)
// first check local cache
@ -94,7 +106,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro
// not found, check the store directly
if dirEntry == nil {
glog.V(4).Infof("find uncached directory: %s", dirPath)
dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath))
dirEntry, _ = f.FindEntry(ctx, util.FullPath(dirPath))
} else {
// glog.V(4).Infof("found cached directory: %s", dirPath)
}
@ -106,24 +118,29 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro
now := time.Now()
dirEntry = &Entry{
FullPath: FullPath(dirPath),
FullPath: util.FullPath(dirPath),
Attr: Attr{
Mtime: now,
Crtime: now,
Mode: os.ModeDir | 0770,
Mode: os.ModeDir | entry.Mode | 0110,
Uid: entry.Uid,
Gid: entry.Gid,
Collection: entry.Collection,
Replication: entry.Replication,
UserName: entry.UserName,
GroupNames: entry.GroupNames,
},
}
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil {
if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound {
if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
} else {
f.maybeAddBucket(dirEntry)
f.NotifyUpdateEvent(nil, dirEntry, false)
}
@ -174,6 +191,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro
}
}
f.maybeAddBucket(entry)
f.NotifyUpdateEvent(oldEntry, entry, true)
f.deleteChunksIfNotNew(oldEntry, entry)
@ -197,7 +215,7 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er
return f.store.UpdateEntry(ctx, entry)
}
func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) {
func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
now := time.Now()
@ -213,14 +231,51 @@ func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err er
},
}, nil
}
return f.store.FindEntry(ctx, p)
entry, err = f.store.FindEntry(ctx, p)
if entry != nil && entry.TtlSec > 0 {
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
f.store.DeleteEntry(ctx, p.Child(entry.Name()))
return nil, filer_pb.ErrNotFound
}
}
return
}
func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
p = p[0 : len(p)-1]
}
return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
var makeupEntries []*Entry
entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
for expiredCount > 0 && err == nil {
makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount)
if err == nil {
entries = append(entries, makeupEntries...)
}
}
return entries, err
}
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) {
listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
if listErr != nil {
return listedEntries, expiredCount, "", listErr
}
for _, entry := range listedEntries {
lastFileName = entry.Name()
if entry.TtlSec > 0 {
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
f.store.DeleteEntry(ctx, p.Child(entry.Name()))
expiredCount++
continue
}
}
entries = append(entries, entry)
}
return
}
func (f *Filer) cacheDelDirectory(dirpath string) {
@ -261,3 +316,8 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
}
func (f *Filer) Shutdown() {
f.MetaLogBuffer.Shutdown()
f.store.Shutdown()
}

121
weed/filer2/filer_buckets.go

@ -0,0 +1,121 @@
package filer2
import (
"context"
"math"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
)
type BucketName string
type BucketOption struct {
Name BucketName
Replication string
fsync bool
}
type FilerBuckets struct {
dirBucketsPath string
buckets map[BucketName]*BucketOption
sync.RWMutex
}
func (f *Filer) LoadBuckets() {
f.buckets = &FilerBuckets{
buckets: make(map[BucketName]*BucketOption),
}
limit := math.MaxInt32
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
if err != nil {
glog.V(1).Infof("no buckets found: %v", err)
return
}
shouldFsyncMap := make(map[string]bool)
for _, bucket := range f.FsyncBuckets {
shouldFsyncMap[bucket] = true
}
glog.V(1).Infof("buckets found: %d", len(entries))
f.buckets.Lock()
for _, entry := range entries {
_, shouldFsnyc := shouldFsyncMap[entry.Name()]
f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
Name: BucketName(entry.Name()),
Replication: entry.Replication,
fsync: shouldFsnyc,
}
}
f.buckets.Unlock()
}
func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
f.buckets.RLock()
defer f.buckets.RUnlock()
option, found := f.buckets.buckets[BucketName(buketName)]
if !found {
return "", false
}
return option.Replication, option.fsync
}
func (f *Filer) isBucket(entry *Entry) bool {
if !entry.IsDirectory() {
return false
}
parent, dirName := entry.FullPath.DirAndName()
if parent != f.DirBucketsPath {
return false
}
f.buckets.RLock()
defer f.buckets.RUnlock()
_, found := f.buckets.buckets[BucketName(dirName)]
return found
}
func (f *Filer) maybeAddBucket(entry *Entry) {
if !entry.IsDirectory() {
return
}
parent, dirName := entry.FullPath.DirAndName()
if parent != f.DirBucketsPath {
return
}
f.addBucket(dirName, &BucketOption{
Name: BucketName(dirName),
Replication: entry.Replication,
})
}
func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) {
f.buckets.Lock()
defer f.buckets.Unlock()
f.buckets.buckets[BucketName(buketName)] = bucketOption
}
func (f *Filer) deleteBucket(buketName string) {
f.buckets.Lock()
defer f.buckets.Unlock()
delete(f.buckets.buckets, BucketName(buketName))
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save