Browse Source

Merge branch 'master' into ftp

ftp
Chris Lu 4 years ago
parent
commit
a9e6db1a8e
  1. 22
      .github/workflows/cleanup.yml
  2. 16
      .github/workflows/release.yml
  3. 26
      README.md
  4. 3
      docker/Dockerfile.go_build
  5. 3
      docker/Dockerfile.go_build_large
  6. 1
      docker/Dockerfile.local
  7. 13
      docker/local-dev-compose.yml
  8. 39
      docker/local-k8s-compose.yml
  9. 30
      docker/nginx/proxy.conf
  10. 13
      docker/prometheus/prometheus.yml
  11. 18
      docker/seaweedfs-compose.yml
  12. 15
      go.mod
  13. 67
      go.sum
  14. 3
      k8s/seaweedfs/Chart.yaml
  15. 12
      k8s/seaweedfs/templates/_helpers.tpl
  16. 1
      k8s/seaweedfs/templates/cronjob.yaml
  17. 31
      k8s/seaweedfs/templates/filer-service-client.yaml
  18. 9
      k8s/seaweedfs/templates/filer-service.yaml
  19. 12
      k8s/seaweedfs/templates/filer-statefulset.yaml
  20. 1
      k8s/seaweedfs/templates/master-service.yaml
  21. 10
      k8s/seaweedfs/templates/master-statefulset.yaml
  22. 27
      k8s/seaweedfs/templates/s3-deployment.yaml
  23. 13
      k8s/seaweedfs/templates/volume-statefulset.yaml
  24. 21
      k8s/seaweedfs/values.yaml
  25. 3
      weed/command/command.go
  26. 7
      weed/command/filer.go
  27. 118
      weed/command/filer_cat.go
  28. 2
      weed/command/filer_copy.go
  29. 201
      weed/command/filer_meta_tail.go
  30. 3
      weed/command/filer_replication.go
  31. 2
      weed/command/mount.go
  32. 5
      weed/command/mount_std.go
  33. 3
      weed/command/s3.go
  34. 84
      weed/command/scaffold.go
  35. 10
      weed/command/server.go
  36. 25
      weed/command/shell.go
  37. 25
      weed/command/upload.go
  38. 113
      weed/command/watch.go
  39. 192
      weed/filer/abstract_sql/abstract_sql_store.go
  40. 23
      weed/filer/abstract_sql/abstract_sql_store_kv.go
  41. 22
      weed/filer/cassandra/cassandra_store.go
  42. 6
      weed/filer/configuration.go
  43. 58
      weed/filer/elastic/v7/elastic_store.go
  44. 23
      weed/filer/etcd/etcd_store.go
  45. 7
      weed/filer/filechunk_manifest.go
  46. 9
      weed/filer/filechunks.go
  47. 119
      weed/filer/filer.go
  48. 4
      weed/filer/filer_buckets.go
  49. 11
      weed/filer/filer_delete_entry.go
  50. 6
      weed/filer/filer_notify.go
  51. 70
      weed/filer/filer_search.go
  52. 6
      weed/filer/filerstore.go
  53. 24
      weed/filer/filerstore_translate_path.go
  54. 58
      weed/filer/filerstore_wrapper.go
  55. 19
      weed/filer/hbase/hbase_store.go
  56. 1
      weed/filer/hbase/hbase_store_kv.go
  57. 28
      weed/filer/leveldb/leveldb_store.go
  58. 33
      weed/filer/leveldb/leveldb_store_test.go
  59. 27
      weed/filer/leveldb2/leveldb2_store.go
  60. 6
      weed/filer/leveldb2/leveldb2_store_test.go
  61. 375
      weed/filer/leveldb3/leveldb3_store.go
  62. 46
      weed/filer/leveldb3/leveldb3_store_kv.go
  63. 88
      weed/filer/leveldb3/leveldb3_store_test.go
  64. 22
      weed/filer/mongodb/mongodb_store.go
  65. 52
      weed/filer/mysql/mysql_sql_gen.go
  66. 23
      weed/filer/mysql/mysql_store.go
  67. 82
      weed/filer/mysql2/mysql2_store.go
  68. 53
      weed/filer/postgres/postgres_sql_gen.go
  69. 18
      weed/filer/postgres/postgres_store.go
  70. 87
      weed/filer/postgres2/postgres2_store.go
  71. 8
      weed/filer/reader_at.go
  72. 6
      weed/filer/redis/redis_cluster_store.go
  73. 2
      weed/filer/redis/redis_store.go
  74. 49
      weed/filer/redis/universal_redis_store.go
  75. 8
      weed/filer/redis/universal_redis_store_kv.go
  76. 6
      weed/filer/redis2/redis_cluster_store.go
  77. 2
      weed/filer/redis2/redis_store.go
  78. 51
      weed/filer/redis2/universal_redis_store.go
  79. 8
      weed/filer/redis2/universal_redis_store_kv.go
  80. 41
      weed/filer/rocksdb/README.md
  81. 302
      weed/filer/rocksdb/rocksdb_store.go
  82. 47
      weed/filer/rocksdb/rocksdb_store_kv.go
  83. 117
      weed/filer/rocksdb/rocksdb_store_test.go
  84. 40
      weed/filer/rocksdb/rocksdb_ttl.go
  85. 8
      weed/filer/stream.go
  86. 110
      weed/filesys/dir.go
  87. 2
      weed/filesys/filehandle.go
  88. 16
      weed/filesys/meta_cache/meta_cache.go
  89. 7
      weed/filesys/meta_cache/meta_cache_init.go
  90. 10
      weed/messaging/broker/broker_grpc_server_subscribe.go
  91. 5
      weed/notification/configuration.go
  92. 3
      weed/operation/submit.go
  93. 3
      weed/replication/sink/filersink/filer_sink.go
  94. 31
      weed/s3api/auth_credentials.go
  95. 6
      weed/s3api/s3api_objects_list_handlers.go
  96. 1
      weed/s3api/s3api_server.go
  97. 7
      weed/security/tls.go
  98. 9
      weed/server/common.go
  99. 44
      weed/server/filer_grpc_server.go
  100. 4
      weed/server/filer_grpc_server_rename.go

22
.github/workflows/cleanup.yml

@ -0,0 +1,22 @@
name: Cleanup
on:
push:
branches: [ master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Delete old release assets
uses: mknejp/delete-release-assets@v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*

16
.github/workflows/release.yml

@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
goos: [linux, windows, darwin ]
goos: [linux, windows, darwin, freebsd ]
goarch: [amd64, arm] goarch: [amd64, arm]
exclude: exclude:
- goarch: arm - goarch: arm
@ -24,20 +24,16 @@ jobs:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Delete old release assets
uses: mknejp/delete-release-assets@v1
- name: Wait for the deletion
uses: jakejarvis/wait-action@master
with: with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*
time: '30s'
- name: Set BUILD_TIME env - name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
- name: Go Release Binaries - name: Go Release Binaries
uses: wangyoucao577/go-release-action@feature/asset-name
uses: wangyoucao577/go-release-action@v1.14
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -52,7 +48,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries - name: Go Release Binaries
uses: wangyoucao577/go-release-action@feature/asset-name
uses: wangyoucao577/go-release-action@v1.14
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

26
README.md

@ -67,7 +67,7 @@ Table of Contents
* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe` * Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway. * Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
Also, to increase capacity, just add more volume servers by `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally or a different machine. That is it!
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thoudsands of machines. That is it!
## Introduction ## ## Introduction ##
@ -76,15 +76,17 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two
1. to store billions of files! 1. to store billions of files!
2. to serve the files fast! 2. to serve the files fast!
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation).
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages volumes on volume servers, and these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation).
SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity, without any client side changes.
SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity. What's more, the cloud storage access API cost is minimized. Faster and Cheaper than direct cloud storage!
There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Cassandra, Elastic Search, LevelDB, MemSql, TiDB, Etcd, CockroachDB, etc.
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, MemSql, TiDB, Etcd, CockroachDB, etc.
For any distributed key value stores, the large values can be offloaded to SeaweedFS. With the fast access speed and linearly scalable capacity, SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore].
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
@ -117,7 +119,10 @@ On top of the object store, optional [Filer] can support directories and POSIX a
* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices. * [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices.
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. * [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB. * [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
## Kubernetes ##
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/) * [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
* [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator)
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
[SuperLargeFiles]: https://github.com/chrislusf/seaweedfs/wiki/Data-Structure-for-Large-Files [SuperLargeFiles]: https://github.com/chrislusf/seaweedfs/wiki/Data-Structure-for-Large-Files
@ -134,6 +139,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a
[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver [SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver
[ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization [ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization
[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication [FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication
[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
@ -335,7 +341,7 @@ Usually hot data are fresh and warm data are old. SeaweedFS puts the newly creat
With the O(1) access time, the network latency cost is kept at minimum. With the O(1) access time, the network latency cost is kept at minimum.
If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
If the hot/warm data is split as 20/80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
@ -365,7 +371,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files. * SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached. * SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized.
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized.
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc. * SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files | | System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
@ -403,11 +409,11 @@ SeaweedFS has a centralized master group to look up free volumes, while Ceph use
Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews. Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS places data by assigned volumes.
Ceph uses CRUSH hashing to automatically manage the data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes are also as simple as it can be.
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read. SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Elastic Search, Cassandra, MemSql, TiDB, CockroachCB, Etcd, to manage file directories. These stores are proven, scalable, and easier to manage.
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Elastic Search, Cassandra, HBase, MemSql, TiDB, CockroachCB, Etcd, to manage file directories. These stores are proven, scalable, and easier to manage.
| SeaweedFS | comparable to Ceph | advantage | | SeaweedFS | comparable to Ceph | advantage |
| ------------- | ------------- | ---------------- | | ------------- | ------------- | ---------------- |
@ -436,9 +442,7 @@ MinIO has specific requirements on storage layout. It is not flexible to adjust
## Dev Plan ## ## Dev Plan ##
* More tools and documentation, on how to manage and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.
* Integrate with Kubernetes. build [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator).
* Add ftp server.
* More tools and documentation, on how to manage and scale the system.
* Read and write stream data. * Read and write stream data.
* Support structured data. * Support structured data.

3
docker/Dockerfile.go_build

@ -1,5 +1,5 @@
FROM frolvlad/alpine-glibc as builder FROM frolvlad/alpine-glibc as builder
RUN apk add git go g++
RUN apk add git go g++ fuse
RUN mkdir -p /go/src/github.com/chrislusf/ RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
ARG BRANCH=${BRANCH:-master} ARG BRANCH=${BRANCH:-master}
@ -14,6 +14,7 @@ COPY --from=builder /root/go/bin/weed /usr/bin/
RUN mkdir -p /etc/seaweedfs RUN mkdir -p /etc/seaweedfs
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
RUN apk add fuse # for weed mount
# volume server gprc port # volume server gprc port
EXPOSE 18080 EXPOSE 18080

3
docker/Dockerfile.go_build_large

@ -1,5 +1,5 @@
FROM frolvlad/alpine-glibc as builder FROM frolvlad/alpine-glibc as builder
RUN apk add git go g++
RUN apk add git go g++ fuse
RUN mkdir -p /go/src/github.com/chrislusf/ RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
ARG BRANCH=${BRANCH:-master} ARG BRANCH=${BRANCH:-master}
@ -14,6 +14,7 @@ COPY --from=builder /root/go/bin/weed /usr/bin/
RUN mkdir -p /etc/seaweedfs RUN mkdir -p /etc/seaweedfs
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
RUN apk add fuse # for weed mount
# volume server gprc port # volume server gprc port
EXPOSE 18080 EXPOSE 18080

1
docker/Dockerfile.local

@ -4,6 +4,7 @@ COPY ./weed /usr/bin/
RUN mkdir -p /etc/seaweedfs RUN mkdir -p /etc/seaweedfs
COPY ./filer.toml /etc/seaweedfs/filer.toml COPY ./filer.toml /etc/seaweedfs/filer.toml
COPY ./entrypoint.sh /entrypoint.sh COPY ./entrypoint.sh /entrypoint.sh
RUN apk add fuse # for weed mount
# volume server grpc port # volume server grpc port
EXPOSE 18080 EXPOSE 18080

13
docker/local-dev-compose.yml

@ -12,7 +12,7 @@ services:
ports: ports:
- 8080:8080 - 8080:8080
- 18080:18080 - 18080:18080
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
depends_on: depends_on:
- master - master
filer: filer:
@ -33,3 +33,14 @@ services:
- master - master
- volume - volume
- filer - filer
mount:
image: chrislusf/seaweedfs:local
privileged: true
cap_add:
- SYS_ADMIN
mem_limit: 4096m
command: '-v=4 mount -filer="filer:8888" -dirAutoCreate -dir=/mnt/seaweedfs -cacheCapacityMB=100 -concurrentWriters=128'
depends_on:
- master
- volume
- filer

39
docker/local-k8s-compose.yml

@ -38,28 +38,57 @@ services:
- WEED_MYSQL_USERNAME=seaweedfs - WEED_MYSQL_USERNAME=seaweedfs
- WEED_MYSQL_PASSWORD=secret - WEED_MYSQL_PASSWORD=secret
- WEED_MYSQL_ENABLED=true - WEED_MYSQL_ENABLED=true
- WEED_MYSQL_CONNECTION_MAX_IDLE=5
- WEED_MYSQL_CONNECTION_MAX_OPEN=75
# "refresh" connection every 10 minutes, eliminating mysql closing "old" connections
- WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS=600
# enable usage of memsql as filer backend
- WEED_MYSQL_INTERPOLATEPARAMS=true
- WEED_LEVELDB2_ENABLED=false - WEED_LEVELDB2_ENABLED=false
command: 'filer -master="master:9333"'
command: '-v 9 filer -master="master:9333"'
depends_on: depends_on:
- master - master
- volume - volume
- mysql - mysql
ingress: ingress:
image: jwilder/nginx-proxy
image: jwilder/nginx-proxy:alpine
ports: ports:
- "80:80" - "80:80"
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- /tmp/nginx:/etc/nginx/conf.d
- ./nginx/proxy.conf:/etc/nginx/proxy.conf
s3: s3:
image: chrislusf/seaweedfs:local image: chrislusf/seaweedfs:local
ports: ports:
- 8333:8333 - 8333:8333
command: 's3 -filer="filer:8888"'
command: '-v 9 s3 -filer="filer:8888"'
depends_on: depends_on:
- master - master
- volume - volume
- filer - filer
environment: environment:
- VIRTUAL_HOST=s3
- VIRTUAL_HOST=ingress
- VIRTUAL_PORT=8333 - VIRTUAL_PORT=8333
registry:
image: registry:2
environment:
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
REGISTRY_LOG_LEVEL: "debug"
REGISTRY_STORAGE: "s3"
REGISTRY_STORAGE_S3_REGION: "us-east-1"
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://ingress"
REGISTRY_STORAGE_S3_BUCKET: "registry"
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
REGISTRY_STORAGE_S3_V4AUTH: "true"
REGISTRY_STORAGE_S3_SECURE: "false"
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
REGISTRY_STORAGE_DELETE_ENABLED: "true"
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
REGISTRY_VALIDATION_DISABLED: "true"
ports:
- 5001:5001
depends_on:
- s3
- ingress

30
docker/nginx/proxy.conf

@ -0,0 +1,30 @@
# HTTP 1.1 support
proxy_http_version 1.1;
#proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# Mitigate httpoxy attack (see README for details)
proxy_set_header Proxy "";
# aws default max_concurrent_requests 10
# aws default multipart_threshold 8MB
proxy_buffering on; # GET buffering or “X-Accel-Buffering” enables or disables buffering of a response;
proxy_buffers 64 1m; # buffers used for reading a response from the proxied server, for a single connection
proxy_buffer_size 8k; # maximum size of the data that nginx can receive from the server at a time is set
proxy_busy_buffers_size 2m;
proxy_request_buffering on; # PUT buffering
client_body_buffer_size 64m; # buffer size for reading client request body
client_max_body_size 64m;
proxy_next_upstream error timeout non_idempotent http_500; # PUT request should be passed to the next server:
proxy_connect_timeout 200ms;
proxy_read_timeout 3s; #timeout is set only between two successive read operations
proxy_send_timeout 3s; #timeout is set only between two successive write operations

13
docker/prometheus/prometheus.yml

@ -0,0 +1,13 @@
global:
scrape_interval: 30s
scrape_timeout: 10s
scrape_configs:
- job_name: services
metrics_path: /metrics
static_configs:
- targets:
- 'prometheus:9090'
- 'volume:9325'
- 'filer:9326'
- 's3:9327'

18
docker/seaweedfs-compose.yml

@ -12,7 +12,8 @@ services:
ports: ports:
- 8080:8080 - 8080:8080
- 18080:18080 - 18080:18080
command: 'volume -mserver="master:9333" -port=8080'
- 9325:9325
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325'
depends_on: depends_on:
- master - master
filer: filer:
@ -20,7 +21,8 @@ services:
ports: ports:
- 8888:8888 - 8888:8888
- 18888:18888 - 18888:18888
command: 'filer -master="master:9333"'
- 9326:9326
command: 'filer -master="master:9333" -metricsPort=9326'
tty: true tty: true
stdin_open: true stdin_open: true
depends_on: depends_on:
@ -40,8 +42,18 @@ services:
image: chrislusf/seaweedfs # use a remote image image: chrislusf/seaweedfs # use a remote image
ports: ports:
- 8333:8333 - 8333:8333
command: 's3 -filer="filer:8888"'
- 9327:9327
command: 's3 -filer="filer:8888" -metricsPort=9327'
depends_on: depends_on:
- master - master
- volume - volume
- filer - filer
prometheus:
image: prom/prometheus:v2.21.0
ports:
- 9000:9090
volumes:
- ./prometheus:/etc/prometheus
command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml
depends_on:
- s3

15
go.mod

@ -8,10 +8,10 @@ require (
github.com/Azure/azure-storage-blob-go v0.8.0 github.com/Azure/azure-storage-blob-go v0.8.0
github.com/OneOfOne/xxhash v1.2.2 github.com/OneOfOne/xxhash v1.2.2
github.com/Shopify/sarama v1.23.1 github.com/Shopify/sarama v1.23.1
github.com/aws/aws-sdk-go v1.33.5
github.com/aws/aws-sdk-go v1.34.30
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v1.0.3
github.com/chrislusf/raft v1.0.4
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/disintegration/imaging v1.6.2 github.com/disintegration/imaging v1.6.2
@ -25,7 +25,7 @@ require (
github.com/fclairamb/ftpserverlib v0.8.0 github.com/fclairamb/ftpserverlib v0.8.0
github.com/frankban/quicktest v1.7.2 // indirect github.com/frankban/quicktest v1.7.2 // indirect
github.com/go-errors/errors v1.1.1 // indirect github.com/go-errors/errors v1.1.1 // indirect
github.com/go-redis/redis v6.15.7+incompatible
github.com/go-redis/redis/v8 v8.4.4
github.com/go-sql-driver/mysql v1.5.0 github.com/go-sql-driver/mysql v1.5.0
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
@ -39,7 +39,7 @@ require (
github.com/jcmturner/gofork v1.0.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect
github.com/json-iterator/go v1.1.10 github.com/json-iterator/go v1.1.10
github.com/karlseguin/ccache v2.0.3+incompatible github.com/karlseguin/ccache v2.0.3+incompatible
github.com/karlseguin/expect v1.0.1 // indirect
github.com/karlseguin/ccache/v2 v2.0.7
github.com/klauspost/compress v1.10.9 // indirect github.com/klauspost/compress v1.10.9 // indirect
github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/crc32 v1.2.0 github.com/klauspost/crc32 v1.2.0
@ -51,14 +51,12 @@ require (
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/olivere/elastic/v7 v7.0.19 github.com/olivere/elastic/v7 v7.0.19
github.com/onsi/ginkgo v1.10.1 // indirect
github.com/onsi/gomega v1.7.0 // indirect
github.com/peterh/liner v1.1.0 github.com/peterh/liner v1.1.0
github.com/pierrec/lz4 v2.2.7+incompatible // indirect github.com/pierrec/lz4 v2.2.7+incompatible // indirect
github.com/prometheus/client_golang v1.3.0 github.com/prometheus/client_golang v1.3.0
github.com/rakyll/statik v0.1.7 github.com/rakyll/statik v0.1.7
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/seaweedfs/fuse v1.0.8
github.com/seaweedfs/fuse v1.0.9
github.com/seaweedfs/goexif v1.0.2 github.com/seaweedfs/goexif v1.0.2
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
@ -66,6 +64,7 @@ require (
github.com/spf13/viper v1.4.0 github.com/spf13/viper v1.4.0
github.com/stretchr/testify v1.6.1 github.com/stretchr/testify v1.6.1
github.com/syndtr/goleveldb v1.0.0 github.com/syndtr/goleveldb v1.0.0
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
github.com/tidwall/gjson v1.3.2 github.com/tidwall/gjson v1.3.2
github.com/tidwall/match v1.0.1 github.com/tidwall/match v1.0.1
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
@ -82,7 +81,7 @@ require (
gocloud.dev/pubsub/natspubsub v0.16.0 gocloud.dev/pubsub/natspubsub v0.16.0
gocloud.dev/pubsub/rabbitpubsub v0.16.0 gocloud.dev/pubsub/rabbitpubsub v0.16.0
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20200202094626-16171245cfb2
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 golang.org/x/tools v0.0.0-20200103221440-774c71fcf114

67
go.sum

@ -61,6 +61,8 @@ github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U= github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.34.30 h1:izATc/E0+HcT5YHmaQVjn7GHCoqaBxn0PGo6Zq5UNFA=
github.com/aws/aws-sdk-go v1.34.30/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
@ -84,8 +86,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chrislusf/raft v1.0.3 h1:11YrnzJtVa5z7m9lhY2p8VcPHoUlC1UswyoAo+U1m1k=
github.com/chrislusf/raft v1.0.3/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/chrislusf/raft v1.0.4 h1:THhbsVik2hxdE0/VXX834f64Wn9RzgVPp+E+XCWZdKM=
github.com/chrislusf/raft v1.0.4/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@ -114,6 +116,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
@ -157,6 +161,8 @@ github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3B
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg= github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg=
@ -170,8 +176,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis/v8 v8.4.4 h1:fGqgxCTR1sydaKI00oQf3OmkU/DIe/I/fYXvGklCIuc=
github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -225,6 +231,7 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -256,6 +263,8 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic= github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=
github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk= github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=
@ -342,6 +351,10 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5i
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@ -356,8 +369,11 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU= github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU=
github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w= github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w=
github.com/karlseguin/ccache/v2 v2.0.7 h1:y5Pfi4eiyYCOD6LS/Kj+o6Nb4M5Ngpw9qFQs+v44ZYM=
github.com/karlseguin/ccache/v2 v2.0.7/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ=
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY= github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@ -456,6 +472,8 @@ github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@ -465,12 +483,15 @@ github.com/olivere/elastic/v7 v7.0.19/go.mod h1:4Jqt5xvjqpjCqgnTcHwl3j8TLs8mvoOK
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U=
github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
@ -542,6 +563,7 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhD
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -551,10 +573,10 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seaweedfs/fuse v1.0.7 h1:tESMXhI3gXzN+dlWsCUrkIZDiWA4dZX18rQMoqmvazw=
github.com/seaweedfs/fuse v1.0.7/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8=
github.com/seaweedfs/fuse v1.0.8 h1:HBPJTC77OlxwSd2JiTwvLPn8bWTElqQp3xs9vf3C15s= github.com/seaweedfs/fuse v1.0.8 h1:HBPJTC77OlxwSd2JiTwvLPn8bWTElqQp3xs9vf3C15s=
github.com/seaweedfs/fuse v1.0.8/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8= github.com/seaweedfs/fuse v1.0.8/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8=
github.com/seaweedfs/fuse v1.0.9 h1:3JZoGsW7cmmrd/U5KYcIGR2+EqyBvCYCoCpEdZAz/Dc=
github.com/seaweedfs/fuse v1.0.9/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E= github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk= github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw= github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw=
@ -618,6 +640,8 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI= github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
@ -672,6 +696,8 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw=
go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
@ -711,6 +737,8 @@ golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0=
@ -750,6 +778,9 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -788,9 +819,16 @@ golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd h1:WgqgiQvkiZWz7XLhphjt2GI2GcGCTIZs9jqXMWmH+oc= golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd h1:WgqgiQvkiZWz7XLhphjt2GI2GcGCTIZs9jqXMWmH+oc=
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -798,6 +836,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
@ -930,6 +970,11 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@ -941,7 +986,9 @@ honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXe
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o= modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o=
modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE=
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc=
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

3
k8s/seaweedfs/Chart.yaml

@ -1,4 +1,5 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
version: 2.14
appVersion: "2.21"
version: 2.21

12
k8s/seaweedfs/templates/_helpers.tpl

@ -52,12 +52,12 @@ Inject extra environment vars in the format key:value, if populated
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} {{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}} {{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}} {{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} {{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* Return the proper postgresqlSchema image */}}
{{/* Return the proper dbSchema image */}}
{{- define "filer.dbSchema.image" -}} {{- define "filer.dbSchema.image" -}}
{{- if .Values.filer.dbSchema.imageOverride -}} {{- if .Values.filer.dbSchema.imageOverride -}}
{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}} {{- $imageOverride := .Values.filer.dbSchema.imageOverride -}}
@ -80,7 +80,7 @@ Inject extra environment vars in the format key:value, if populated
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} {{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}} {{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}} {{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} {{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
@ -94,7 +94,7 @@ Inject extra environment vars in the format key:value, if populated
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} {{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}} {{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}} {{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} {{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
@ -108,7 +108,7 @@ Inject extra environment vars in the format key:value, if populated
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} {{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}} {{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}} {{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} {{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
@ -122,7 +122,7 @@ Inject extra environment vars in the format key:value, if populated
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} {{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}} {{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}} {{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Values.global.imageTag | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} {{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}

1
k8s/seaweedfs/templates/cronjob.yaml

@ -6,6 +6,7 @@ metadata:
name: {{ include "seaweedfs.fullname" . }}-cronjob name: {{ include "seaweedfs.fullname" . }}-cronjob
spec: spec:
schedule: "{{ .Values.cronjob.schedule }}" schedule: "{{ .Values.cronjob.schedule }}"
startingDeadlineSeconds: 200
concurrencyPolicy: Forbid concurrencyPolicy: Forbid
failedJobsHistoryLimit: 2 failedJobsHistoryLimit: 2
successfulJobsHistoryLimit: 2 successfulJobsHistoryLimit: 2

31
k8s/seaweedfs/templates/filer-service-client.yaml

@ -0,0 +1,31 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-filer-client
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "seaweedfs.name" . }}
component: filer
{{- if .Values.filer.metricsPort }}
monitoring: "true"
{{- end }}
spec:
clusterIP: None
ports:
- name: "swfs-filer"
port: {{ .Values.filer.port }}
targetPort: {{ .Values.filer.port }}
protocol: TCP
- name: "swfs-filer-grpc"
port: {{ .Values.filer.grpcPort }}
targetPort: {{ .Values.filer.grpcPort }}
protocol: TCP
{{- if .Values.filer.metricsPort }}
- name: "metrics"
port: {{ .Values.filer.metricsPort }}
targetPort: {{ .Values.filer.metricsPort }}
protocol: TCP
{{- end }}
selector:
app: {{ template "seaweedfs.name" . }}
component: filer

9
k8s/seaweedfs/templates/filer-service.yaml

@ -1,6 +1,8 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
name: {{ template "seaweedfs.name" . }}-filer name: {{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
@ -8,6 +10,7 @@ metadata:
component: filer component: filer
spec: spec:
clusterIP: None clusterIP: None
publishNotReadyAddresses: true
ports: ports:
- name: "swfs-filer" - name: "swfs-filer"
port: {{ .Values.filer.port }} port: {{ .Values.filer.port }}
@ -17,12 +20,6 @@ spec:
port: {{ .Values.filer.grpcPort }} port: {{ .Values.filer.grpcPort }}
targetPort: {{ .Values.filer.grpcPort }} targetPort: {{ .Values.filer.grpcPort }}
protocol: TCP protocol: TCP
{{- if .Values.filer.metricsPort }}
- name: "swfs-filer-metrics"
port: {{ .Values.filer.metricsPort }}
targetPort: {{ .Values.filer.metricsPort }}
protocol: TCP
{{- end }}
selector: selector:
app: {{ template "seaweedfs.name" . }} app: {{ template "seaweedfs.name" . }}
component: filer component: filer

12
k8s/seaweedfs/templates/filer-statefulset.yaml

@ -87,6 +87,12 @@ spec:
value: {{ $value | quote }} value: {{ $value | quote }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
command: command:
- "/bin/sh" - "/bin/sh"
- "-ec" - "-ec"
@ -128,7 +134,7 @@ spec:
{{- end }} {{- end }}
-ip=${POD_IP} \ -ip=${POD_IP} \
{{- if gt (.Values.filer.replicas | int) 1 }} {{- if gt (.Values.filer.replicas | int) 1 }}
-peers={{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}
-peers=$(echo -n "{{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}" | sed "s/$HOSTNAME.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}//" | sed 's/,$//; 's/^,//'; s/,,/,/;' ) \
{{- end }} {{- end }}
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
{{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }} {{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
@ -172,7 +178,7 @@ spec:
periodSeconds: 15 periodSeconds: 15
successThreshold: 1 successThreshold: 1
failureThreshold: 100 failureThreshold: 100
timeoutSeconds: 3
timeoutSeconds: 10
livenessProbe: livenessProbe:
httpGet: httpGet:
path: / path: /
@ -182,7 +188,7 @@ spec:
periodSeconds: 30 periodSeconds: 30
successThreshold: 1 successThreshold: 1
failureThreshold: 5 failureThreshold: 5
timeoutSeconds: 3
timeoutSeconds: 10
{{- if .Values.filer.resources }} {{- if .Values.filer.resources }}
resources: resources:
{{ tpl .Values.filer.resources . | nindent 12 | trim }} {{ tpl .Values.filer.resources . | nindent 12 | trim }}

1
k8s/seaweedfs/templates/master-service.yaml

@ -10,6 +10,7 @@ metadata:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec: spec:
clusterIP: None clusterIP: None
publishNotReadyAddresses: true
ports: ports:
- name: "swfs-master" - name: "swfs-master"
port: {{ .Values.master.port }} port: {{ .Values.master.port }}

10
k8s/seaweedfs/templates/master-statefulset.yaml

@ -76,6 +76,12 @@ spec:
value: {{ $value | quote }} value: {{ $value | quote }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
command: command:
- "/bin/sh" - "/bin/sh"
- "-ec" - "-ec"
@ -157,7 +163,7 @@ spec:
periodSeconds: 45 periodSeconds: 45
successThreshold: 2 successThreshold: 2
failureThreshold: 100 failureThreshold: 100
timeoutSeconds: 5
timeoutSeconds: 10
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /cluster/status path: /cluster/status
@ -167,7 +173,7 @@ spec:
periodSeconds: 30 periodSeconds: 30
successThreshold: 1 successThreshold: 1
failureThreshold: 4 failureThreshold: 4
timeoutSeconds: 5
timeoutSeconds: 10
{{- if .Values.master.resources }} {{- if .Values.master.resources }}
resources: resources:
{{ tpl .Values.master.resources . | nindent 12 | trim }} {{ tpl .Values.master.resources . | nindent 12 | trim }}

27
k8s/seaweedfs/templates/s3-deployment.yaml

@ -59,11 +59,17 @@ spec:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: SEAWEEDFS_FULLNAME - name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}" value: "{{ template "seaweedfs.name" . }}"
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
command: command:
- "/bin/sh" - "/bin/sh"
- "-ec" - "-ec"
- | - |
exec /usr/bin/weed \
exec /usr/bin/weed -logdir=/logs \
{{- if .Values.s3.loggingOverrideLevel }} {{- if .Values.s3.loggingOverrideLevel }}
-v={{ .Values.s3.loggingOverrideLevel }} \ -v={{ .Values.s3.loggingOverrideLevel }} \
{{- else }} {{- else }}
@ -81,9 +87,13 @@ spec:
{{- if .Values.s3.domainName }} {{- if .Values.s3.domainName }}
-domainName={{ .Values.s3.domainName }} \ -domainName={{ .Values.s3.domainName }} \
{{- end }} {{- end }}
-filer={{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.port }}
{{- if or (.Values.global.enableSecurity) (.Values.s3.extraVolumeMounts) }}
{{- if .Values.s3.allowEmptyFolder }}
-allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
{{- end }}
-filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }}
volumeMounts: volumeMounts:
- name: logs
mountPath: "/logs/"
{{- if .Values.global.enableSecurity }} {{- if .Values.global.enableSecurity }}
- name: security-config - name: security-config
readOnly: true readOnly: true
@ -106,7 +116,6 @@ spec:
mountPath: /usr/local/share/ca-certificates/client/ mountPath: /usr/local/share/ca-certificates/client/
{{- end }} {{- end }}
{{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }} {{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }}
{{- end }}
ports: ports:
- containerPort: {{ .Values.s3.port }} - containerPort: {{ .Values.s3.port }}
name: swfs-s3 name: swfs-s3
@ -119,7 +128,7 @@ spec:
periodSeconds: 15 periodSeconds: 15
successThreshold: 1 successThreshold: 1
failureThreshold: 100 failureThreshold: 100
timeoutSeconds: 3
timeoutSeconds: 10
livenessProbe: livenessProbe:
httpGet: httpGet:
path: / path: /
@ -129,12 +138,18 @@ spec:
periodSeconds: 60 periodSeconds: 60
successThreshold: 1 successThreshold: 1
failureThreshold: 20 failureThreshold: 20
timeoutSeconds: 3
timeoutSeconds: 10
{{- if .Values.s3.resources }} {{- if .Values.s3.resources }}
resources: resources:
{{ tpl .Values.s3.resources . | nindent 12 | trim }} {{ tpl .Values.s3.resources . | nindent 12 | trim }}
{{- end }} {{- end }}
volumes: volumes:
{{- if eq .Values.s3.logs.type "hostPath" }}
- name: logs
hostPath:
path: /storage/logs/seaweedfs/s3
type: DirectoryOrCreate
{{- end }}
{{- if .Values.global.enableSecurity }} {{- if .Values.global.enableSecurity }}
- name: security-config - name: security-config
configMap: configMap:

13
k8s/seaweedfs/templates/volume-statefulset.yaml

@ -64,6 +64,12 @@ spec:
fieldPath: status.hostIP fieldPath: status.hostIP
- name: SEAWEEDFS_FULLNAME - name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}" value: "{{ template "seaweedfs.name" . }}"
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
command: command:
- "/bin/sh" - "/bin/sh"
- "-ec" - "-ec"
@ -80,6 +86,9 @@ spec:
-metricsPort {{ .Values.volume.metricsPort }} \ -metricsPort {{ .Values.volume.metricsPort }} \
{{- end }} {{- end }}
-dir={{ .Values.volume.dir }} \ -dir={{ .Values.volume.dir }} \
{{- if .Values.volume.dir_idx }}
-dir.idx={{ .Values.volume.dir_idx }} \
{{- end }}
-max={{ .Values.volume.maxVolumes }} \ -max={{ .Values.volume.maxVolumes }} \
{{- if .Values.volume.rack }} {{- if .Values.volume.rack }}
-rack={{ .Values.volume.rack }} \ -rack={{ .Values.volume.rack }} \
@ -149,7 +158,7 @@ spec:
periodSeconds: 90 periodSeconds: 90
successThreshold: 1 successThreshold: 1
failureThreshold: 100 failureThreshold: 100
timeoutSeconds: 5
timeoutSeconds: 30
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /status path: /status
@ -159,7 +168,7 @@ spec:
periodSeconds: 90 periodSeconds: 90
successThreshold: 1 successThreshold: 1
failureThreshold: 4 failureThreshold: 4
timeoutSeconds: 5
timeoutSeconds: 30
{{- if .Values.volume.resources }} {{- if .Values.volume.resources }}
resources: resources:
{{ tpl .Values.volume.resources . | nindent 12 | trim }} {{ tpl .Values.volume.resources . | nindent 12 | trim }}

21
k8s/seaweedfs/values.yaml

@ -4,7 +4,7 @@ global:
registry: "" registry: ""
repository: "" repository: ""
imageName: chrislusf/seaweedfs imageName: chrislusf/seaweedfs
imageTag: "2.17"
# imageTag: "2.21" - started using {.Chart.appVersion}
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
imagePullSecrets: imagepullsecret imagePullSecrets: imagepullsecret
restartPolicy: Always restartPolicy: Always
@ -21,6 +21,10 @@ global:
# Y number of replica in other racks in the same data center # Y number of replica in other racks in the same data center
# Z number of replica in other servers in the same rack # Z number of replica in other servers in the same rack
replicationPlacment: "001" replicationPlacment: "001"
extraEnvironmentVars:
WEED_CLUSTER_DEFAULT: "sw"
WEED_CLUSTER_SW_MASTER: "seaweedfs-master:9333"
WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client:8888"
image: image:
registry: "" registry: ""
@ -140,6 +144,8 @@ volume:
# Directories to store data files. dir[,dir]... (default "/tmp") # Directories to store data files. dir[,dir]... (default "/tmp")
dir: "/data" dir: "/data"
# Directories to store index files. dir[,dir]... (default "/tmp")
dir_idx: null
# Maximum numbers of volumes, count[,count]... # Maximum numbers of volumes, count[,count]...
# If set to zero on non-windows OS, the limit will be auto configured. (default "7") # If set to zero on non-windows OS, the limit will be auto configured. (default "7")
@ -292,8 +298,10 @@ filer:
WEED_MYSQL_HOSTNAME: "mysql-db-host" WEED_MYSQL_HOSTNAME: "mysql-db-host"
WEED_MYSQL_PORT: "3306" WEED_MYSQL_PORT: "3306"
WEED_MYSQL_DATABASE: "sw_database" WEED_MYSQL_DATABASE: "sw_database"
WEED_MYSQL_CONNECTION_MAX_IDLE: "10"
WEED_MYSQL_CONNECTION_MAX_OPEN: "150"
WEED_MYSQL_CONNECTION_MAX_IDLE: "5"
WEED_MYSQL_CONNECTION_MAX_OPEN: "75"
# "refresh" connection every 10 minutes, eliminating mysql closing "old" connections
WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS: "600"
# enable usage of memsql as filer backend # enable usage of memsql as filer backend
WEED_MYSQL_INTERPOLATEPARAMS: "true" WEED_MYSQL_INTERPOLATEPARAMS: "true"
WEED_LEVELDB2_ENABLED: "false" WEED_LEVELDB2_ENABLED: "false"
@ -313,6 +321,8 @@ s3:
port: 8333 port: 8333
metricsPort: 9327 metricsPort: 9327
loggingOverrideLevel: null loggingOverrideLevel: null
#allow empty folders
allowEmptyFolder: true
# Suffix of the host name, {bucket}.{domainName} # Suffix of the host name, {bucket}.{domainName}
domainName: "" domainName: ""
@ -343,6 +353,11 @@ s3:
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: "" priorityClassName: ""
logs:
type: "hostPath"
size: ""
storageClass: ""
cronjob: cronjob:
enabled: false enabled: false
schedule: "*/7 * * * *" schedule: "*/7 * * * *"

3
weed/command/command.go

@ -15,6 +15,8 @@ var Commands = []*Command{
cmdDownload, cmdDownload,
cmdExport, cmdExport,
cmdFiler, cmdFiler,
cmdFilerCat,
cmdFilerMetaTail,
cmdFilerReplicate, cmdFilerReplicate,
cmdFilerSynchronize, cmdFilerSynchronize,
cmdFix, cmdFix,
@ -25,7 +27,6 @@ var Commands = []*Command{
cmdScaffold, cmdScaffold,
cmdServer, cmdServer,
cmdShell, cmdShell,
cmdWatch,
cmdUpload, cmdUpload,
cmdVersion, cmdVersion,
cmdVolume, cmdVolume,

7
weed/command/filer.go

@ -42,7 +42,7 @@ type FilerOptions struct {
cipher *bool cipher *bool
peers *string peers *string
metricsHttpPort *int metricsHttpPort *int
cacheToFilerLimit *int
saveToFilerLimit *int
defaultLevelDbDirectory *string defaultLevelDbDirectory *string
} }
@ -64,7 +64,7 @@ func init() {
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list") f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
f.cacheToFilerLimit = cmdFiler.Flag.Int("cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory") f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
// start s3 on filer // start s3 on filer
@ -74,6 +74,7 @@ func init() {
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file") filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file") filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file") filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
} }
var cmdFiler = &Command{ var cmdFiler = &Command{
@ -148,7 +149,7 @@ func (fo *FilerOptions) startFiler() {
Host: *fo.ip, Host: *fo.ip,
Port: uint32(*fo.port), Port: uint32(*fo.port),
Cipher: *fo.cipher, Cipher: *fo.cipher,
CacheToFilerLimit: int64(*fo.cacheToFilerLimit),
SaveToFilerLimit: *fo.saveToFilerLimit,
Filers: peers, Filers: peers,
}) })
if nfs_err != nil { if nfs_err != nil {

118
weed/command/filer_cat.go

@ -0,0 +1,118 @@
package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"google.golang.org/grpc"
"math"
"net/url"
"os"
"strings"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
filerCat FilerCatOptions
)
type FilerCatOptions struct {
grpcDialOption grpc.DialOption
filerAddress string
filerClient filer_pb.SeaweedFilerClient
output *string
}
func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType {
return func(fileId string) (targetUrls []string, err error) {
vid := filer.VolumeId(fileId)
resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {
return nil, err
}
locations := resp.LocationsMap[vid]
for _, loc := range locations.Locations {
targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId))
}
return
}
}
func init() {
cmdFilerCat.Run = runFilerCat // break init cycle
filerCat.output = cmdFilerCat.Flag.String("o", "", "write to file instead of stdout")
}
var cmdFilerCat = &Command{
UsageLine: "filer.cat [-o <file>] http://localhost:8888/path/to/file",
Short: "copy one file to local",
Long: `read one file to stdout or write to a file
`,
}
func runFilerCat(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
if len(args) == 0 {
return false
}
filerSource := args[len(args)-1]
filerUrl, err := url.Parse(filerSource)
if err != nil {
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
return false
}
urlPath := filerUrl.Path
if strings.HasSuffix(urlPath, "/") {
fmt.Printf("The last argument should be a file: %v\n", err)
return false
}
filerCat.filerAddress = filerUrl.Host
filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
dir, name := util.FullPath(urlPath).DirAndName()
writer := os.Stdout
if *filerCat.output != "" {
fmt.Printf("saving %s to %s\n", filerSource, *filerCat.output)
f, err := os.OpenFile(*filerCat.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
fmt.Printf("open file %s: %v\n", *filerCat.output, err)
return false
}
defer f.Close()
writer = f
}
pb.WithFilerClient(filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Name: name,
Directory: dir,
}
respLookupEntry, err := filer_pb.LookupEntry(client, request)
if err != nil {
return err
}
filerCat.filerClient = client
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
})
return true
}

2
weed/command/filer_copy.go

@ -92,7 +92,7 @@ func runCopy(cmd *Command, args []string) bool {
} }
urlPath := filerUrl.Path urlPath := filerUrl.Path
if !strings.HasSuffix(urlPath, "/") { if !strings.HasSuffix(urlPath, "/") {
fmt.Printf("The last argument should be a folder and end with \"/\": %v\n", err)
fmt.Printf("The last argument should be a folder and end with \"/\"\n")
return false return false
} }

201
weed/command/filer_meta_tail.go

@ -0,0 +1,201 @@
package command
import (
"context"
"fmt"
jsoniter "github.com/json-iterator/go"
"github.com/olivere/elastic/v7"
"io"
"path/filepath"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
cmdFilerMetaTail.Run = runFilerMetaTail // break init cycle
}
var cmdFilerMetaTail = &Command{
UsageLine: "filer.meta.tail [-filer=localhost:8888] [-target=/]",
Short: "see recent changes on a filer",
Long: `See recent changes on a filer.
`,
}
var (
tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port")
tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://<host:port>")
esIndex = cmdFilerMetaTail.Flag.String("es.index", "seaweedfs", "ES index name")
)
func runFilerMetaTail(cmd *Command, args []string) bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var filterFunc func(dir, fname string) bool
if *tailPattern != "" {
if strings.Contains(*tailPattern, "/") {
println("watch path pattern", *tailPattern)
filterFunc = func(dir, fname string) bool {
matched, err := filepath.Match(*tailPattern, dir+"/"+fname)
if err != nil {
fmt.Printf("error: %v", err)
}
return matched
}
} else {
println("watch file pattern", *tailPattern)
filterFunc = func(dir, fname string) bool {
matched, err := filepath.Match(*tailPattern, fname)
if err != nil {
fmt.Printf("error: %v", err)
}
return matched
}
}
}
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
if filterFunc == nil {
return true
}
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
return false
}
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
return true
}
if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
return true
}
return false
}
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
fmt.Printf("dir:%s %+v\n", resp.Directory, resp.EventNotification)
return nil
}
if *esServers != "" {
var err error
eachEntryFunc, err = sendToElasticSearchFunc(*esServers, *esIndex)
if err != nil {
fmt.Printf("create elastic search client to %s: %+v\n", *esServers, err)
return false
}
}
tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "tail",
PathPrefix: *tailTarget,
SinceNs: time.Now().Add(-*tailStart).UnixNano(),
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if !shouldPrint(resp) {
continue
}
if err = eachEntryFunc(resp); err != nil {
return err
}
}
})
if tailErr != nil {
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
}
return true
}
type EsDocument struct {
Dir string `json:"dir,omitempty"`
Name string `json:"name,omitempty"`
IsDirectory bool `json:"isDir,omitempty"`
Size uint64 `json:"size,omitempty"`
Uid uint32 `json:"uid,omitempty"`
Gid uint32 `json:"gid,omitempty"`
UserName string `json:"userName,omitempty"`
Collection string `json:"collection,omitempty"`
Crtime int64 `json:"crtime,omitempty"`
Mtime int64 `json:"mtime,omitempty"`
Mime string `json:"mime,omitempty"`
}
func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
entry := event.NewEntry
dir, name := event.NewParentPath, entry.Name
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
esEntry := &EsDocument{
Dir: dir,
Name: name,
IsDirectory: entry.IsDirectory,
Size: entry.Attributes.FileSize,
Uid: entry.Attributes.Uid,
Gid: entry.Attributes.Gid,
UserName: entry.Attributes.UserName,
Collection: entry.Attributes.Collection,
Crtime: entry.Attributes.Crtime,
Mtime: entry.Attributes.Mtime,
Mime: entry.Attributes.Mime,
}
return esEntry, id
}
func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
options := []elastic.ClientOptionFunc{}
options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
options = append(options, elastic.SetSniff(false))
options = append(options, elastic.SetHealthcheck(false))
client, err := elastic.NewClient(options...)
if err != nil {
return nil, err
}
return func(resp *filer_pb.SubscribeMetadataResponse) error {
event := resp.EventNotification
if event.OldEntry != nil &&
(event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
// delete or not update the same file
dir, name := resp.Directory, event.OldEntry.Name
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
println("delete", id)
_, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
return err
}
if event.NewEntry != nil {
// add a new file or update the same file
esEntry, id := toEsEntry(event)
value, err := jsoniter.Marshal(esEntry)
if err != nil {
return err
}
println(string(value))
_, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
return err
}
return nil
}, nil
}

3
weed/command/filer_replication.go

@ -14,7 +14,6 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
"github.com/chrislusf/seaweedfs/weed/replication/sub" "github.com/chrislusf/seaweedfs/weed/replication/sub"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
) )
func init() { func init() {
@ -123,7 +122,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
} }
func validateOneEnabledInput(config *viper.Viper) {
func validateOneEnabledInput(config *util.ViperProxy) {
enabledInput := "" enabledInput := ""
for _, input := range sub.NotificationInputs { for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") { if config.GetBool("notification." + input.GetName() + ".enabled") {

2
weed/command/mount.go

@ -43,7 +43,7 @@ func init() {
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.") mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files") mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 0, "limit concurrent goroutine writers if not 0")
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 128, "limit concurrent goroutine writers if not 0")
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data") mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)") mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")

5
weed/command/mount_std.go

@ -58,6 +58,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
return true return true
} }
util.LoadConfiguration("security", false)
// try to connect to filer, filerBucketsPath may be useful later // try to connect to filer, filerBucketsPath may be useful later
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var cipher bool var cipher bool
@ -78,8 +79,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
dir := util.ResolvePath(*option.dir) dir := util.ResolvePath(*option.dir)
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
util.LoadConfiguration("security", false)
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if dir == "" { if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"") fmt.Printf("Please specify the mount directory via \"-dir\"")
@ -151,6 +150,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
fuse.MaxReadahead(1024 * 128), fuse.MaxReadahead(1024 * 128),
fuse.AsyncRead(), fuse.AsyncRead(),
fuse.WritebackCache(), fuse.WritebackCache(),
fuse.MaxBackground(128),
fuse.CongestionThreshold(128),
} }
options = append(options, osSpecificMountOptions()...) options = append(options, osSpecificMountOptions()...)

3
weed/command/s3.go

@ -30,6 +30,7 @@ type S3Options struct {
tlsPrivateKey *string tlsPrivateKey *string
tlsCertificate *string tlsCertificate *string
metricsHttpPort *int metricsHttpPort *int
allowEmptyFolder *bool
} }
func init() { func init() {
@ -41,6 +42,7 @@ func init() {
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", false, "allow empty folders")
} }
var cmdS3 = &Command{ var cmdS3 = &Command{
@ -181,6 +183,7 @@ func (s3opt *S3Options) startS3Server() bool {
DomainName: *s3opt.domainName, DomainName: *s3opt.domainName,
BucketsPath: filerBucketsPath, BucketsPath: filerBucketsPath,
GrpcDialOption: grpcDialOption, GrpcDialOption: grpcDialOption,
AllowEmptyFolder: *s3opt.allowEmptyFolder,
}) })
if s3ApiServer_err != nil { if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)

84
weed/command/scaffold.go

@ -44,6 +44,8 @@ func runScaffold(cmd *Command, args []string) bool {
content = SECURITY_TOML_EXAMPLE content = SECURITY_TOML_EXAMPLE
case "master": case "master":
content = MASTER_TOML_EXAMPLE content = MASTER_TOML_EXAMPLE
case "shell":
content = SHELL_TOML_EXAMPLE
} }
if content == "" { if content == "" {
println("need a valid -config option") println("need a valid -config option")
@ -85,9 +87,21 @@ buckets_folder = "/buckets"
# local on disk, mostly for simple single-machine setup, fairly scalable # local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended. # faster than previous leveldb, recommended.
enabled = true enabled = true
dir = "." # directory to store level db files
dir = "./filerldb2" # directory to store level db files
[mysql] # or tidb
[leveldb3]
# similar to leveldb2.
# each bucket has its own meta store.
enabled = false
dir = "./filerldb3" # directory to store level db files
[rocksdb]
# local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
[mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta ( # CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) COMMENT 'directory or file name', # name VARCHAR(1000) COMMENT 'directory or file name',
@ -104,9 +118,31 @@ password = ""
database = "" # create or use an existing database database = "" # create or use an existing database
connection_max_idle = 2 connection_max_idle = 2
connection_max_open = 100 connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
[mysql2] # or memsql, tidb
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS %s (
dirhash BIGINT,
name VARCHAR(1000),
directory TEXT,
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
"""
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false interpolateParams = false
[postgres] # or cockroachdb
[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta ( # CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT, # dirhash BIGINT,
# name VARCHAR(65535), # name VARCHAR(65535),
@ -119,7 +155,29 @@ hostname = "localhost"
port = 5432 port = 5432
username = "postgres" username = "postgres"
password = "" password = ""
database = "" # create or use an existing database
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
[postgres2]
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS %s (
dirhash BIGINT,
name VARCHAR(65535),
directory VARCHAR(65535),
meta bytea,
PRIMARY KEY (dirhash, name)
);
"""
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable" sslmode = "disable"
connection_max_idle = 100 connection_max_idle = 100
connection_max_open = 100 connection_max_open = 100
@ -166,9 +224,9 @@ addresses = [
] ]
password = "" password = ""
# allows reads from slave servers or the master, but all writes still go to the master # allows reads from slave servers or the master, but all writes still go to the master
readOnly = true
readOnly = false
# automatically use the closest Redis server for reads # automatically use the closest Redis server for reads
routeByLatency = true
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [] superLargeDirectories = []
@ -459,5 +517,19 @@ copy_other = 1 # create n x 1 = n actual volumes
# if you are doing your own replication or periodic sync of volumes. # if you are doing your own replication or periodic sync of volumes.
treat_replication_as_minimums = false treat_replication_as_minimums = false
`
SHELL_TOML_EXAMPLE = `
[cluster]
default = "c1"
[cluster.c1]
master = "localhost:9333" # comma-separated master servers
filer = "localhost:8888" # filer host and port
[cluster.c2]
master = ""
filer = ""
` `
) )

10
weed/command/server.go

@ -61,6 +61,7 @@ var (
serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
// pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") // pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server")
isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server") isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server")
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
@ -94,7 +95,7 @@ func init() {
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list") filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
filerOptions.cacheToFilerLimit = cmdServer.Flag.Int("filer.cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
@ -113,6 +114,7 @@ func init() {
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file") s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port") msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")
@ -223,7 +225,11 @@ func runServer(cmd *Command, args []string) bool {
} }
startMaster(masterOptions, serverWhiteList)
if *isStartingMasterServer {
go startMaster(masterOptions, serverWhiteList)
}
select {}
return true return true
} }

25
weed/command/shell.go

@ -11,12 +11,14 @@ import (
var ( var (
shellOptions shell.ShellOptions shellOptions shell.ShellOptions
shellInitialFiler *string shellInitialFiler *string
shellCluster *string
) )
func init() { func init() {
cmdShell.Run = runShell // break init cycle cmdShell.Run = runShell // break init cycle
shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
shellOptions.Masters = cmdShell.Flag.String("master", "", "comma-separated master servers, e.g. localhost:9333")
shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port, e.g. localhost:8888")
shellCluster = cmdShell.Flag.String("cluster", "", "cluster defined in shell.toml")
} }
var cmdShell = &Command{ var cmdShell = &Command{
@ -24,6 +26,8 @@ var cmdShell = &Command{
Short: "run interactive administrative commands", Short: "run interactive administrative commands",
Long: `run interactive administrative commands. Long: `run interactive administrative commands.
Generate shell.toml via "weed scaffold -config=shell"
`, `,
} }
@ -32,6 +36,23 @@ func runShell(command *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
if *shellOptions.Masters == "" && *shellInitialFiler == "" {
util.LoadConfiguration("shell", false)
v := util.GetViper()
cluster := v.GetString("cluster.default")
if *shellCluster != "" {
cluster = *shellCluster
}
if cluster == "" {
*shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888"
} else {
*shellOptions.Masters = v.GetString("cluster." + cluster + ".master")
*shellInitialFiler = v.GetString("cluster." + cluster + ".filer")
}
}
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
var err error var err error
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler) shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
if err != nil { if err != nil {

25
weed/command/upload.go

@ -1,8 +1,12 @@
package command package command
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"google.golang.org/grpc"
"os" "os"
"path/filepath" "path/filepath"
@ -65,6 +69,15 @@ func runUpload(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master)
if err != nil {
fmt.Printf("upload: %v", err)
return false
}
if *upload.replication == "" {
*upload.replication = defaultCollection
}
if len(args) == 0 { if len(args) == 0 {
if *upload.dir == "" { if *upload.dir == "" {
return false return false
@ -104,3 +117,15 @@ func runUpload(cmd *Command, args []string) bool {
} }
return true return true
} }
func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) {
err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error {
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return fmt.Errorf("get master %s configuration: %v", masterAddress, err)
}
replication = resp.DefaultReplication
return nil
})
return
}

113
weed/command/watch.go

@ -1,113 +0,0 @@
package command
import (
"context"
"fmt"
"io"
"path/filepath"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
cmdWatch.Run = runWatch // break init cycle
}
var cmdWatch = &Command{
UsageLine: "watch [-filer=localhost:8888] [-target=/]",
Short: "see recent changes on a filer",
Long: `See recent changes on a filer.
`,
}
var (
watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port")
watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
watchPattern = cmdWatch.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
)
func runWatch(cmd *Command, args []string) bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var filterFunc func(dir, fname string) bool
if *watchPattern != "" {
if strings.Contains(*watchPattern, "/") {
println("watch path pattern", *watchPattern)
filterFunc = func(dir, fname string) bool {
matched, err := filepath.Match(*watchPattern, dir+"/"+fname)
if err != nil {
fmt.Printf("error: %v", err)
}
return matched
}
} else {
println("watch file pattern", *watchPattern)
filterFunc = func(dir, fname string) bool {
matched, err := filepath.Match(*watchPattern, fname)
if err != nil {
fmt.Printf("error: %v", err)
}
return matched
}
}
}
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
if filterFunc == nil {
return true
}
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
return false
}
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
return true
}
if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
return true
}
return false
}
watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "watch",
PathPrefix: *watchTarget,
SinceNs: time.Now().Add(-*watchStart).UnixNano(),
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if !shouldPrint(resp) {
continue
}
fmt.Printf("dir:%s %+v\n", resp.Directory, resp.EventNotification)
}
})
if watchErr != nil {
fmt.Printf("watch %s: %v\n", *watchFiler, watchErr)
}
return true
}

192
weed/filer/abstract_sql/abstract_sql_store.go

@ -9,19 +9,33 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"strings" "strings"
"sync"
) )
type SqlGenerator interface {
GetSqlInsert(bucket string) string
GetSqlUpdate(bucket string) string
GetSqlFind(bucket string) string
GetSqlDelete(bucket string) string
GetSqlDeleteFolderChildren(bucket string) string
GetSqlListExclusive(bucket string) string
GetSqlListInclusive(bucket string) string
GetSqlCreateTable(bucket string) string
GetSqlDropTable(bucket string) string
}
type AbstractSqlStore struct { type AbstractSqlStore struct {
SqlGenerator
DB *sql.DB DB *sql.DB
SqlInsert string
SqlUpdate string
SqlFind string
SqlDelete string
SqlDeleteFolderChildren string
SqlListExclusive string
SqlListInclusive string
SupportBucketTable bool
dbs map[string]bool
dbsLock sync.Mutex
} }
const (
DEFAULT_TABLE = "filemeta"
)
type TxOrDB interface { type TxOrDB interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
@ -52,16 +66,65 @@ func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
return nil return nil
} }
func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.FullPath, isForChildren bool) (txOrDB TxOrDB, bucket string, shortPath util.FullPath, err error) {
shortPath = fullpath
bucket = DEFAULT_TABLE
if tx, ok := ctx.Value("tx").(*sql.Tx); ok { if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
return tx
txOrDB = tx
} else {
txOrDB = store.DB
}
if !store.SupportBucketTable {
return
}
if !strings.HasPrefix(string(fullpath), "/buckets/") {
return
} }
return store.DB
// detect bucket
bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
t := strings.Index(bucketAndObjectKey, "/")
if t < 0 && !isForChildren {
return
}
bucket = bucketAndObjectKey
shortPath = "/"
if t > 0 {
bucket = bucketAndObjectKey[:t]
shortPath = util.FullPath(bucketAndObjectKey[t:])
}
if isValidBucket(bucket) {
store.dbsLock.Lock()
defer store.dbsLock.Unlock()
if store.dbs == nil {
store.dbs = make(map[string]bool)
}
if _, found := store.dbs[bucket]; !found {
if err = store.CreateTable(ctx, bucket); err != nil {
store.dbs[bucket] = true
}
}
}
return
} }
func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
if err != nil {
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
}
dir, name := shortPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
if err != nil { if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err) return fmt.Errorf("encode %s: %s", entry.FullPath, err)
@ -71,7 +134,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
meta = util.MaybeGzipData(meta) meta = util.MaybeGzipData(meta)
} }
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta)
res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta)
if err == nil { if err == nil {
return return
} }
@ -84,7 +147,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
// now the insert failed possibly due to duplication constraints // now the insert failed possibly due to duplication constraints
glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err) glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
if err != nil { if err != nil {
return fmt.Errorf("upsert %s: %s", entry.FullPath, err) return fmt.Errorf("upsert %s: %s", entry.FullPath, err)
} }
@ -99,13 +162,18 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
if err != nil {
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
}
dir, name := shortPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
if err != nil { if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err) return fmt.Errorf("encode %s: %s", entry.FullPath, err)
} }
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
res, err := db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
if err != nil { if err != nil {
return fmt.Errorf("update %s: %s", entry.FullPath, err) return fmt.Errorf("update %s: %s", entry.FullPath, err)
} }
@ -119,8 +187,13 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Ent
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) { func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) {
dir, name := fullpath.DirAndName()
row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
if err != nil {
return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
}
dir, name := shortPath.DirAndName()
row := db.QueryRowContext(ctx, store.GetSqlFind(bucket), util.HashStringToLong(dir), name, dir)
var data []byte var data []byte
if err := row.Scan(&data); err != nil { if err := row.Scan(&data); err != nil {
@ -142,9 +215,14 @@ func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.Full
func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
if err != nil {
return fmt.Errorf("findDB %s : %v", fullpath, err)
}
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir)
dir, name := shortPath.DirAndName()
res, err := db.ExecContext(ctx, store.GetSqlDelete(bucket), util.HashStringToLong(dir), name, dir)
if err != nil { if err != nil {
return fmt.Errorf("delete %s: %s", fullpath, err) return fmt.Errorf("delete %s: %s", fullpath, err)
} }
@ -159,7 +237,23 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.Fu
func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath)
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, true)
if err != nil {
return fmt.Errorf("findDB %s : %v", fullpath, err)
}
if isValidBucket(bucket) && shortPath == "/" {
if err = store.deleteTable(ctx, bucket); err == nil {
store.dbsLock.Lock()
delete(store.dbs, bucket)
store.dbsLock.Unlock()
return nil
} else {
return err
}
}
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), fullpath)
if err != nil { if err != nil {
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
} }
@ -172,15 +266,21 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
return nil return nil
} }
func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
sqlText := store.SqlListExclusive
if inclusive {
sqlText = store.SqlListInclusive
func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true)
if err != nil {
return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
}
sqlText := store.GetSqlListExclusive(bucket)
if includeStartFile {
sqlText = store.GetSqlListInclusive(bucket)
} }
rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), prefix+"%", limit)
rows, err := db.QueryContext(ctx, sqlText, util.HashStringToLong(string(shortPath)), startFileName, string(shortPath), prefix+"%", limit+1)
if err != nil { if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
} }
defer rows.Close() defer rows.Close()
@ -188,28 +288,52 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
var name string var name string
var data []byte var data []byte
if err = rows.Scan(&name, &data); err != nil { if err = rows.Scan(&name, &data); err != nil {
glog.V(0).Infof("scan %s : %v", fullpath, err)
return nil, fmt.Errorf("scan %s: %v", fullpath, err)
glog.V(0).Infof("scan %s : %v", dirPath, err)
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
} }
lastFileName = name
entry := &filer.Entry{ entry := &filer.Entry{
FullPath: util.NewFullPath(string(fullpath), name),
FullPath: util.NewFullPath(string(dirPath), name),
} }
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
}
if !eachEntryFunc(entry) {
break
} }
entries = append(entries, entry)
} }
return entries, nil
return lastFileName, nil
} }
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", nil)
} }
func (store *AbstractSqlStore) Shutdown() { func (store *AbstractSqlStore) Shutdown() {
store.DB.Close() store.DB.Close()
} }
func isValidBucket(bucket string) bool {
return bucket != DEFAULT_TABLE && bucket != ""
}
func (store *AbstractSqlStore) CreateTable(ctx context.Context, bucket string) error {
if !store.SupportBucketTable {
return nil
}
_, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlCreateTable(bucket))
return err
}
func (store *AbstractSqlStore) deleteTable(ctx context.Context, bucket string) error {
if !store.SupportBucketTable {
return nil
}
_, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlDropTable(bucket))
return err
}

23
weed/filer/abstract_sql/abstract_sql_store_kv.go

@ -13,9 +13,14 @@ import (
func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
return fmt.Errorf("findDB: %v", err)
}
dirStr, dirHash, name := genDirAndName(key) dirStr, dirHash, name := genDirAndName(key)
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, dirHash, name, dirStr, value)
res, err := db.ExecContext(ctx, store.GetSqlInsert(DEFAULT_TABLE), dirHash, name, dirStr, value)
if err == nil { if err == nil {
return return
} }
@ -28,7 +33,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
// now the insert failed possibly due to duplication constraints // now the insert failed possibly due to duplication constraints
glog.V(1).Infof("kv insert falls back to update: %s", err) glog.V(1).Infof("kv insert falls back to update: %s", err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)
res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
if err != nil { if err != nil {
return fmt.Errorf("kv upsert: %s", err) return fmt.Errorf("kv upsert: %s", err)
} }
@ -43,8 +48,13 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
return nil, fmt.Errorf("findDB: %v", err)
}
dirStr, dirHash, name := genDirAndName(key) dirStr, dirHash, name := genDirAndName(key)
row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, dirHash, name, dirStr)
row := db.QueryRowContext(ctx, store.GetSqlFind(DEFAULT_TABLE), dirHash, name, dirStr)
err = row.Scan(&value) err = row.Scan(&value)
@ -61,9 +71,14 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b
func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) { func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) {
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
return fmt.Errorf("findDB: %v", err)
}
dirStr, dirHash, name := genDirAndName(key) dirStr, dirHash, name := genDirAndName(key)
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, dirHash, name, dirStr)
res, err := db.ExecContext(ctx, store.GetSqlDelete(DEFAULT_TABLE), dirHash, name, dirStr)
if err != nil { if err != nil {
return fmt.Errorf("kv delete: %s", err) return fmt.Errorf("kv delete: %s", err)
} }

22
weed/filer/cassandra/cassandra_store.go

@ -168,41 +168,43 @@ func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath
return nil return nil
} }
func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
} }
func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer.Entry, err error) {
func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
if _, ok := store.isSuperLargeDirectory(string(fullpath)); ok {
if _, ok := store.isSuperLargeDirectory(string(dirPath)); ok {
return // nil, filer.ErrUnsupportedSuperLargeDirectoryListing return // nil, filer.ErrUnsupportedSuperLargeDirectoryListing
} }
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
if inclusive {
if includeStartFile {
cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?" cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?"
} }
var data []byte var data []byte
var name string var name string
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
iter := store.session.Query(cqlStr, string(dirPath), startFileName, limit+1).Iter()
for iter.Scan(&name, &data) { for iter.Scan(&name, &data) {
entry := &filer.Entry{ entry := &filer.Entry{
FullPath: util.NewFullPath(string(fullpath), name),
FullPath: util.NewFullPath(string(dirPath), name),
} }
lastFileName = name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err) glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
if err := iter.Close(); err != nil { if err := iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err) glog.V(0).Infof("list iterator close: %v", err)
} }
return entries, err
return lastFileName, err
} }
func (store *CassandraStore) Shutdown() { func (store *CassandraStore) Shutdown() {

6
weed/filer/configuration.go

@ -2,7 +2,7 @@ package filer
import ( import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/util"
"os" "os"
"reflect" "reflect"
"strings" "strings"
@ -12,7 +12,7 @@ var (
Stores []FilerStore Stores []FilerStore
) )
func (f *Filer) LoadConfiguration(config *viper.Viper) {
func (f *Filer) LoadConfiguration(config *util.ViperProxy) {
validateOneEnabledStore(config) validateOneEnabledStore(config)
@ -79,7 +79,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) {
} }
func validateOneEnabledStore(config *viper.Viper) {
func validateOneEnabledStore(config *util.ViperProxy) {
enabledStore := "" enabledStore := ""
for _, store := range Stores { for _, store := range Stores {
if config.GetBool(store.GetName() + ".enabled") { if config.GetBool(store.GetName() + ".enabled") {

58
weed/filer/elastic/v7/elastic_store.go

@ -96,8 +96,8 @@ func (store *ElasticStore) RollbackTransaction(ctx context.Context) error {
return nil return nil
} }
func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
} }
func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
@ -187,28 +187,28 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
} }
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
if entries, err := store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32); err == nil {
for _, entry := range entries {
store.DeleteEntry(ctx, entry.FullPath)
_, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
return false
} }
}
return nil
return true
})
return
} }
func (store *ElasticStore) ListDirectoryEntries(
ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
) (entries []*filer.Entry, err error) {
if string(fullpath) == "/" {
return store.listRootDirectoryEntries(ctx, startFileName, inclusive, limit)
func (store *ElasticStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
if string(dirPath) == "/" {
return store.listRootDirectoryEntries(ctx, startFileName, includeStartFile, limit, eachEntryFunc)
} }
return store.listDirectoryEntries(ctx, fullpath, startFileName, inclusive, limit)
return store.listDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc)
} }
func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
indexResult, err := store.client.CatIndices().Do(ctx) indexResult, err := store.client.CatIndices().Do(ctx)
if err != nil { if err != nil {
glog.Errorf("list indices %v.", err) glog.Errorf("list indices %v.", err)
return entries, err
return
} }
for _, index := range indexResult { for _, index := range indexResult {
if index.Index == indexKV { if index.Index == indexKV {
@ -218,6 +218,7 @@ func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFi
if entry, err := store.FindEntry(ctx, if entry, err := store.FindEntry(ctx,
weed_util.FullPath("/"+strings.Replace(index.Index, indexPrefix, "", 1))); err == nil { weed_util.FullPath("/"+strings.Replace(index.Index, indexPrefix, "", 1))); err == nil {
fileName := getFileName(entry.FullPath) fileName := getFileName(entry.FullPath)
lastFileName = fileName
if fileName == startFileName && !inclusive { if fileName == startFileName && !inclusive {
continue continue
} }
@ -225,24 +226,25 @@ func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFi
if limit < 0 { if limit < 0 {
break break
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
} }
} }
return entries, nil
return
} }
func (store *ElasticStore) listDirectoryEntries( func (store *ElasticStore) listDirectoryEntries(
ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
) (entries []*filer.Entry, err error) {
ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
first := true first := true
index := getIndex(fullpath) index := getIndex(fullpath)
nextStart := "" nextStart := ""
parentId := weed_util.Md5String([]byte(fullpath)) parentId := weed_util.Md5String([]byte(fullpath))
if _, err := store.client.Refresh(index).Do(ctx); err != nil {
if _, err = store.client.Refresh(index).Do(ctx); err != nil {
if elastic.IsNotFound(err) { if elastic.IsNotFound(err) {
store.client.CreateIndex(index).Do(ctx) store.client.CreateIndex(index).Do(ctx)
return entries, nil
return
} }
} }
for { for {
@ -250,7 +252,7 @@ func (store *ElasticStore) listDirectoryEntries(
if (startFileName == "" && first) || inclusive { if (startFileName == "" && first) || inclusive {
if result, err = store.search(ctx, index, parentId); err != nil { if result, err = store.search(ctx, index, parentId); err != nil {
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return entries, err
return
} }
} else { } else {
fullPath := string(fullpath) + "/" + startFileName fullPath := string(fullpath) + "/" + startFileName
@ -260,7 +262,7 @@ func (store *ElasticStore) listDirectoryEntries(
after := weed_util.Md5String([]byte(fullPath)) after := weed_util.Md5String([]byte(fullPath))
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil { if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return entries, err
return
} }
} }
first = false first = false
@ -272,21 +274,21 @@ func (store *ElasticStore) listDirectoryEntries(
if err := jsoniter.Unmarshal(hit.Source, esEntry); err == nil { if err := jsoniter.Unmarshal(hit.Source, esEntry); err == nil {
limit-- limit--
if limit < 0 { if limit < 0 {
return entries, nil
return lastFileName, nil
} }
nextStart = string(esEntry.Entry.FullPath) nextStart = string(esEntry.Entry.FullPath)
fileName := getFileName(esEntry.Entry.FullPath) fileName := getFileName(esEntry.Entry.FullPath)
lastFileName = fileName
if fileName == startFileName && !inclusive { if fileName == startFileName && !inclusive {
continue continue
} }
entries = append(entries, esEntry.Entry)
if !eachEntryFunc(esEntry.Entry) {
break
} }
} }
if len(result.Hits.Hits) < store.maxPageSize {
break
} }
} }
return entries, nil
return
} }
func (store *ElasticStore) search(ctx context.Context, index, parentId string) (result *elastic.SearchResult, err error) { func (store *ElasticStore) search(ctx context.Context, index, parentId string) (result *elastic.SearchResult, err error) {

23
weed/filer/etcd/etcd_store.go

@ -101,7 +101,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPa
resp, err := store.client.Get(ctx, string(key)) resp, err := store.client.Get(ctx, string(key))
if err != nil { if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
return nil, fmt.Errorf("get %s : %v", fullpath, err)
} }
if len(resp.Kvs) == 0 { if len(resp.Kvs) == 0 {
@ -139,17 +139,17 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_
return nil return nil
} }
func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
} }
func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
directoryPrefix := genDirectoryKeyPrefix(dirPath, "")
resp, err := store.client.Get(ctx, string(directoryPrefix), resp, err := store.client.Get(ctx, string(directoryPrefix),
clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend)) clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
if err != nil { if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
} }
for _, kv := range resp.Kvs { for _, kv := range resp.Kvs {
@ -157,25 +157,28 @@ func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_
if fileName == "" { if fileName == "" {
continue continue
} }
if fileName == startFileName && !inclusive {
if fileName == startFileName && !includeStartFile {
continue continue
} }
lastFileName = fileName
limit-- limit--
if limit < 0 { if limit < 0 {
break break
} }
entry := &filer.Entry{ entry := &filer.Entry{
FullPath: weed_util.NewFullPath(string(fullpath), fileName),
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
} }
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err) glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
return entries, err
return lastFileName, err
} }
func genKey(dirPath, fileName string) (key []byte) { func genKey(dirPath, fileName string) (key []byte) {

7
weed/filer/filechunk_manifest.go

@ -3,6 +3,7 @@ package filer
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"io" "io"
"math" "math"
"time" "time"
@ -38,7 +39,7 @@ func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonMa
return return
} }
func ResolveChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
// TODO maybe parallel this // TODO maybe parallel this
for _, chunk := range chunks { for _, chunk := range chunks {
if !chunk.IsChunkManifest { if !chunk.IsChunkManifest {
@ -63,7 +64,7 @@ func ResolveChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunks []*fil
return return
} }
func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
if !chunk.IsChunkManifest { if !chunk.IsChunkManifest {
return return
} }
@ -84,7 +85,7 @@ func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *fil
} }
// TODO fetch from cache for weed mount? // TODO fetch from cache for weed mount?
func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
func fetchChunk(lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
urlStrings, err := lookupFileIdFn(fileId) urlStrings, err := lookupFileIdFn(fileId)
if err != nil { if err != nil {
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)

9
weed/filer/filechunks.go

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"math" "math"
"sort" "sort"
"sync" "sync"
@ -52,7 +53,7 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks)) return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
} }
func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks) visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
@ -71,7 +72,7 @@ func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_
return return
} }
func MinusChunks(lookupFileIdFn LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as) aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
if aErr != nil { if aErr != nil {
@ -116,7 +117,7 @@ func (cv *ChunkView) IsFullChunk() bool {
return cv.Size == cv.ChunkSize return cv.Size == cv.ChunkSize
} }
func ViewFromChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks) visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
@ -222,7 +223,7 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
// If the file chunk content is a chunk manifest // If the file chunk content is a chunk manifest
func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks) chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)

119
weed/filer/filer.go

@ -134,14 +134,57 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
return nil return nil
} }
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
/*
if !hasWritePermission(lastDirectoryEntry, entry) {
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
}
*/
if oldEntry == nil {
dirParts := strings.Split(string(entry.FullPath), "/") dirParts := strings.Split(string(entry.FullPath), "/")
if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil {
return err
}
// fmt.Printf("directory parts: %+v\n", dirParts)
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
if err := f.Store.InsertEntry(ctx, entry); err != nil {
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
} else {
if o_excl {
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
}
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
glog.Errorf("update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
}
}
var lastDirectoryEntry *Entry
f.maybeAddBucket(entry)
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
f.deleteChunksIfNotNew(oldEntry, entry)
for i := 1; i < len(dirParts); i++ {
dirPath := "/" + util.Join(dirParts[:i]...)
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
return nil
}
func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) {
if level == 0 {
return nil
}
dirPath := "/" + util.Join(dirParts[:level]...)
// fmt.Printf("%d directory: %+v\n", i, dirPath) // fmt.Printf("%d directory: %+v\n", i, dirPath)
// check the store directly // check the store directly
@ -151,6 +194,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
// no such existing directory // no such existing directory
if dirEntry == nil { if dirEntry == nil {
// ensure parent directory
if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil {
return err
}
// create the directory // create the directory
now := time.Now() now := time.Now()
@ -186,53 +234,6 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
return fmt.Errorf("%s is a file", dirPath) return fmt.Errorf("%s is a file", dirPath)
} }
// remember the direct parent directory entry
if i == len(dirParts)-1 {
lastDirectoryEntry = dirEntry
}
}
if lastDirectoryEntry == nil {
glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
return fmt.Errorf("parent folder not found: %v", entry.FullPath)
}
/*
if !hasWritePermission(lastDirectoryEntry, entry) {
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
}
*/
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
if oldEntry == nil {
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
if err := f.Store.InsertEntry(ctx, entry); err != nil {
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
} else {
if o_excl {
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
}
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
glog.Errorf("update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
}
}
f.maybeAddBucket(entry)
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
f.deleteChunksIfNotNew(oldEntry, entry)
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
return nil return nil
} }
@ -280,21 +281,19 @@ func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, e
} }
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*Entry, expiredCount int, lastFileName string, err error) {
listedEntries, listErr := f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix)
if listErr != nil {
return listedEntries, expiredCount, "", listErr
}
for _, entry := range listedEntries {
lastFileName = entry.Name()
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (expiredCount int64, lastFileName string, err error) {
lastFileName, err = f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
if entry.TtlSec > 0 { if entry.TtlSec > 0 {
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
f.Store.DeleteOneEntry(ctx, entry) f.Store.DeleteOneEntry(ctx, entry)
expiredCount++ expiredCount++
continue
return true
} }
} }
entries = append(entries, entry)
return eachEntryFunc(entry)
})
if err != nil {
return expiredCount, lastFileName, err
} }
return return
} }

4
weed/filer/filer_buckets.go

@ -27,9 +27,9 @@ func (f *Filer) LoadBuckets() {
buckets: make(map[BucketName]*BucketOption), buckets: make(map[BucketName]*BucketOption),
} }
limit := math.MaxInt32
limit := int64(math.MaxInt32)
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "")
entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "")
if err != nil { if err != nil {
glog.V(1).Infof("no buckets found: %v", err) glog.V(1).Infof("no buckets found: %v", err)

11
weed/filer/filer_delete_entry.go

@ -30,7 +30,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
// delete the folder children, not including the folder itself // delete the folder children, not including the folder itself
var dirChunks []*filer_pb.FileChunk var dirChunks []*filer_pb.FileChunk
var dirHardLinkIds []HardLinkId var dirHardLinkIds []HardLinkId
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isFromOtherCluster, signatures)
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures)
if err != nil { if err != nil {
glog.V(0).Infof("delete directory %s: %v", p, err) glog.V(0).Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err)
@ -63,12 +63,13 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
return nil return nil
} }
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
lastFileName := "" lastFileName := ""
includeLastFile := false includeLastFile := false
if !isDeletingBucket {
for { for {
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "")
if err != nil { if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err) glog.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
@ -84,7 +85,8 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
var dirChunks []*filer_pb.FileChunk var dirChunks []*filer_pb.FileChunk
var dirHardLinkIds []HardLinkId var dirHardLinkIds []HardLinkId
if sub.IsDirectory() { if sub.IsDirectory() {
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, false, nil)
subIsDeletingBucket := f.isBucket(sub)
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil)
chunks = append(chunks, dirChunks...) chunks = append(chunks, dirChunks...)
hardlinkIds = append(hardlinkIds, dirHardLinkIds...) hardlinkIds = append(hardlinkIds, dirHardLinkIds...)
} else { } else {
@ -105,6 +107,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
break break
} }
} }
}
glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks) glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)

6
weed/filer/filer_notify.go

@ -113,13 +113,13 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
sizeBuf := make([]byte, 4) sizeBuf := make([]byte, 4)
startTsNs := startTime.UnixNano() startTsNs := startTime.UnixNano()
dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "")
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "")
if listDayErr != nil { if listDayErr != nil {
return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr) return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
} }
for _, dayEntry := range dayEntries { for _, dayEntry := range dayEntries {
// println("checking day", dayEntry.FullPath) // println("checking day", dayEntry.FullPath)
hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "")
hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "")
if listHourMinuteErr != nil { if listHourMinuteErr != nil {
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
} }
@ -170,7 +170,7 @@ func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func
return lastTsNs, err return lastTsNs, err
} }
if logEntry.TsNs <= ns { if logEntry.TsNs <= ns {
return lastTsNs, nil
continue
} }
// println("each log: ", logEntry.TsNs) // println("each log: ", logEntry.TsNs)
if err := eachLogEntryFn(logEntry); err != nil { if err := eachLogEntryFn(logEntry); err != nil {

70
weed/filer/filer_search.go

@ -19,58 +19,72 @@ func splitPattern(pattern string) (prefix string, restPattern string) {
return "", restPattern return "", restPattern
} }
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, namePattern string) (entries []*Entry, err error) {
// For now, prefix and namePattern are mutually exclusive
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string) (entries []*Entry, hasMore bool, err error) {
_, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, func(entry *Entry) bool {
entries = append(entries, entry)
return true
})
hasMore = int64(len(entries)) >= limit+1
if hasMore {
entries = entries[:limit]
}
return entries, hasMore, err
}
// For now, prefix and namePattern are mutually exclusive
func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 { if strings.HasSuffix(string(p), "/") && len(p) > 1 {
p = p[0 : len(p)-1] p = p[0 : len(p)-1]
} }
prefix, restNamePattern := splitPattern(namePattern)
var missedCount int
var lastFileName string
prefixInNamePattern, restNamePattern := splitPattern(namePattern)
if prefixInNamePattern != "" {
prefix = prefixInNamePattern
}
var missedCount int64
entries, missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern)
missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, eachEntryFunc)
for missedCount > 0 && err == nil { for missedCount > 0 && err == nil {
var makeupEntries []*Entry
makeupEntries, missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern)
for _, entry := range makeupEntries {
entries = append(entries, entry)
}
missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, eachEntryFunc)
} }
return entries, err
return
} }
func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix, restNamePattern string) (matchedEntries []*Entry, missedCount int, lastFileName string, err error) {
var foundEntries []*Entry
func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) {
foundEntries, lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix)
if err != nil {
return
}
if len(restNamePattern) == 0 { if len(restNamePattern) == 0 {
return foundEntries, 0, lastFileName, nil
lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
return 0, lastFileName, err
} }
for _, entry := range foundEntries {
lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
nameToTest := strings.ToLower(entry.Name()) nameToTest := strings.ToLower(entry.Name())
if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && matched { if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && matched {
matchedEntries = append(matchedEntries, entry)
if !eachEntryFunc(entry) {
return false
}
} else { } else {
missedCount++ missedCount++
} }
return true
})
if err != nil {
return
} }
return return
} }
func (f *Filer) doListValidEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*Entry, lastFileName string, err error) {
var makeupEntries []*Entry
var expiredCount int
entries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix)
func (f *Filer) doListValidEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
var expiredCount int64
expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
for expiredCount > 0 && err == nil { for expiredCount > 0 && err == nil {
makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix)
if err == nil {
entries = append(entries, makeupEntries...)
}
expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix, eachEntryFunc)
} }
return return
} }

6
weed/filer/filerstore.go

@ -13,6 +13,8 @@ var (
ErrKvNotFound = errors.New("kv: not found") ErrKvNotFound = errors.New("kv: not found")
) )
type ListEachEntryFunc func(entry *Entry) bool
type FilerStore interface { type FilerStore interface {
// GetName gets the name to locate the configuration in filer.toml file // GetName gets the name to locate the configuration in filer.toml file
GetName() string GetName() string
@ -24,8 +26,8 @@ type FilerStore interface {
FindEntry(context.Context, util.FullPath) (entry *Entry, err error) FindEntry(context.Context, util.FullPath) (entry *Entry, err error)
DeleteEntry(context.Context, util.FullPath) (err error) DeleteEntry(context.Context, util.FullPath) (err error)
DeleteFolderChildren(context.Context, util.FullPath) (err error) DeleteFolderChildren(context.Context, util.FullPath) (err error)
ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error)
ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error)
ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error)
BeginTransaction(ctx context.Context) (context.Context, error) BeginTransaction(ctx context.Context) (context.Context, error)
CommitTransaction(ctx context.Context) error CommitTransaction(ctx context.Context) error

24
weed/filer/filerstore_translate_path.go

@ -106,32 +106,24 @@ func (t *FilerStorePathTranlator) DeleteFolderChildren(ctx context.Context, fp u
return t.actualStore.DeleteFolderChildren(ctx, newFullPath) return t.actualStore.DeleteFolderChildren(ctx, newFullPath)
} }
func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) {
newFullPath := t.translatePath(dirPath) newFullPath := t.translatePath(dirPath)
entries, err := t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit)
if err != nil {
return nil, err
}
for _, entry := range entries {
return t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
}
return entries, err
return eachEntryFunc(entry)
})
} }
func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error) {
func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (string, error) {
newFullPath := t.translatePath(dirPath) newFullPath := t.translatePath(dirPath)
entries, err := t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix)
if err != nil {
return nil, err
}
for _, entry := range entries {
return t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
}
return entries, nil
return eachEntryFunc(entry)
})
} }
func (t *FilerStorePathTranlator) BeginTransaction(ctx context.Context) (context.Context, error) { func (t *FilerStorePathTranlator) BeginTransaction(ctx context.Context) (context.Context, error) {

58
weed/filer/filerstore_wrapper.go

@ -194,7 +194,7 @@ func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.
return actualStore.DeleteFolderChildren(ctx, fp) return actualStore.DeleteFolderChildren(ctx, fp)
} }
func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) {
actualStore := fsw.getActualStore(dirPath + "/") actualStore := fsw.getActualStore(dirPath + "/")
stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "list").Inc() stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "list").Inc()
start := time.Now() start := time.Now()
@ -203,18 +203,14 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath
}() }()
glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit)
entries, err := actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
if err != nil {
return nil, err
}
for _, entry := range entries {
return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
fsw.maybeReadHardLink(ctx, entry) fsw.maybeReadHardLink(ctx, entry)
filer_pb.AfterEntryDeserialization(entry.Chunks) filer_pb.AfterEntryDeserialization(entry.Chunks)
}
return entries, err
return eachEntryFunc(entry)
})
} }
func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error) {
func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
actualStore := fsw.getActualStore(dirPath + "/") actualStore := fsw.getActualStore(dirPath + "/")
stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "prefixList").Inc() stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "prefixList").Inc()
start := time.Now() start := time.Now()
@ -222,48 +218,52 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context,
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds()) stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds())
}() }()
glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
entries, err := actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix)
lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, eachEntryFunc)
if err == ErrUnsupportedListDirectoryPrefixed { if err == ErrUnsupportedListDirectoryPrefixed {
entries, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix)
}
if err != nil {
return nil, err
}
for _, entry := range entries {
lastFileName, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
fsw.maybeReadHardLink(ctx, entry) fsw.maybeReadHardLink(ctx, entry)
filer_pb.AfterEntryDeserialization(entry.Chunks) filer_pb.AfterEntryDeserialization(entry.Chunks)
return eachEntryFunc(entry)
})
} }
return entries, nil
return lastFileName, err
} }
func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) (entries []*Entry, err error) {
func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
actualStore := fsw.getActualStore(dirPath + "/") actualStore := fsw.getActualStore(dirPath + "/")
entries, err = actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
if err != nil {
return nil, err
}
if prefix == "" { if prefix == "" {
return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc)
}
var notPrefixed []*Entry
lastFileName, err = actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
notPrefixed = append(notPrefixed, entry)
return true
})
if err != nil {
return return
} }
count := 0
var lastFileName string
notPrefixed := entries
entries = nil
count := int64(0)
for count < limit && len(notPrefixed) > 0 { for count < limit && len(notPrefixed) > 0 {
for _, entry := range notPrefixed { for _, entry := range notPrefixed {
lastFileName = entry.Name()
if strings.HasPrefix(entry.Name(), prefix) { if strings.HasPrefix(entry.Name(), prefix) {
count++ count++
entries = append(entries, entry)
if !eachEntryFunc(entry) {
return
}
if count >= limit { if count >= limit {
break break
} }
} }
} }
if count < limit { if count < limit {
notPrefixed, err = actualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit)
notPrefixed = notPrefixed[:0]
_, err = actualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit, func(entry *Entry) bool {
notPrefixed = append(notPrefixed, entry)
return true
})
if err != nil { if err != nil {
return return
} }

19
weed/filer/hbase/hbase_store.go

@ -148,19 +148,18 @@ func (store *HbaseStore) DeleteFolderChildren(ctx context.Context, path util.Ful
return return
} }
func (store *HbaseStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer.Entry, error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "")
func (store *HbaseStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (string, error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
} }
func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*filer.Entry, error) {
func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}} family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}}
expectedPrefix := []byte(dirPath.Child(prefix)) expectedPrefix := []byte(dirPath.Child(prefix))
scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family)) scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family))
if err != nil { if err != nil {
return nil, err
return lastFileName, err
} }
var entries []*filer.Entry
scanner := store.Client.Scan(scan) scanner := store.Client.Scan(scan)
defer scanner.Close() defer scanner.Close()
for { for {
@ -169,7 +168,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa
break break
} }
if err != nil { if err != nil {
return entries, err
return lastFileName, err
} }
if len(res.Cells) == 0 { if len(res.Cells) == 0 {
continue continue
@ -186,6 +185,8 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa
continue continue
} }
lastFileName = fileName
value := cell.Value value := cell.Value
if fileName == startFileName && !includeStartFile { if fileName == startFileName && !includeStartFile {
@ -204,10 +205,12 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa
glog.V(0).Infof("list %s : %v", entry.FullPath, err) glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
return entries, nil
return lastFileName, nil
} }
func (store *HbaseStore) BeginTransaction(ctx context.Context) (context.Context, error) { func (store *HbaseStore) BeginTransaction(ctx context.Context) (context.Context, error) {

1
weed/filer/hbase/hbase_store_kv.go

@ -10,6 +10,7 @@ import (
const ( const (
COLUMN_NAME = "a" COLUMN_NAME = "a"
) )
func (store *HbaseStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { func (store *HbaseStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
return store.doPut(ctx, store.cfKv, key, value, 0) return store.doPut(ctx, store.cfKv, key, value, 0)
} }

28
weed/filer/leveldb/leveldb_store.go

@ -107,7 +107,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
return nil, fmt.Errorf("get %s : %v", fullpath, err)
} }
entry = &filer.Entry{ entry = &filer.Entry{
@ -162,16 +162,19 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath we
return nil return nil
} }
func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer.Entry, err error) {
return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
} }
func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, prefix)
directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix)
lastFileStart := directoryPrefix
if startFileName != "" {
lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
}
iter := store.db.NewIterator(&leveldb_util.Range{Start: genDirectoryKeyPrefix(fullpath, startFileName)}, nil)
iter := store.db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
for iter.Next() { for iter.Next() {
key := iter.Key() key := iter.Key()
if !bytes.HasPrefix(key, directoryPrefix) { if !bytes.HasPrefix(key, directoryPrefix) {
@ -181,26 +184,29 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, ful
if fileName == "" { if fileName == "" {
continue continue
} }
if fileName == startFileName && !inclusive {
if fileName == startFileName && !includeStartFile {
continue continue
} }
lastFileName = fileName
limit-- limit--
if limit < 0 { if limit < 0 {
break break
} }
entry := &filer.Entry{ entry := &filer.Entry{
FullPath: weed_util.NewFullPath(string(fullpath), fileName),
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
} }
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err) glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
iter.Release() iter.Release()
return entries, err
return lastFileName, err
} }
func genKey(dirPath, fileName string) (key []byte) { func genKey(dirPath, fileName string) (key []byte) {

33
weed/filer/leveldb/leveldb_store_test.go

@ -2,9 +2,11 @@ package leveldb
import ( import (
"context" "context"
"fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
"time"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -49,14 +51,14 @@ func TestCreateAndFind(t *testing.T) {
} }
// checking one upper directory // checking one upper directory
entries, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "")
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
} }
// checking one upper directory // checking one upper directory
entries, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
@ -75,7 +77,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background() ctx := context.Background()
// checking one upper directory // checking one upper directory
entries, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if err != nil { if err != nil {
t.Errorf("list entries: %v", err) t.Errorf("list entries: %v", err)
return return
@ -86,3 +88,28 @@ func TestEmptyRoot(t *testing.T) {
} }
} }
func BenchmarkInsertEntry(b *testing.B) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
defer os.RemoveAll(dir)
store := &LevelDBStore{}
store.initialize(dir)
testFiler.SetStore(store)
ctx := context.Background()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
entry := &filer.Entry{
FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)),
Attr: filer.Attr{
Crtime: time.Now(),
Mtime: time.Now(),
Mode: os.FileMode(0644),
},
}
store.InsertEntry(ctx, entry)
}
}

27
weed/filer/leveldb2/leveldb2_store.go

@ -115,7 +115,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.Fu
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
return nil, fmt.Errorf("get %s : %v", fullpath, err)
} }
entry = &filer.Entry{ entry = &filer.Entry{
@ -171,15 +171,17 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath w
return nil return nil
} }
func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer.Entry, err error) {
return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
} }
func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, prefix, store.dbCount)
lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount)
directoryPrefix, partitionId := genDirectoryKeyPrefix(dirPath, prefix, store.dbCount)
lastFileStart := directoryPrefix
if startFileName != "" {
lastFileStart, _ = genDirectoryKeyPrefix(dirPath, startFileName, store.dbCount)
}
iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
for iter.Next() { for iter.Next() {
@ -191,15 +193,16 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, fu
if fileName == "" { if fileName == "" {
continue continue
} }
if fileName == startFileName && !inclusive {
if fileName == startFileName && !includeStartFile {
continue continue
} }
lastFileName = fileName
limit-- limit--
if limit < 0 { if limit < 0 {
break break
} }
entry := &filer.Entry{ entry := &filer.Entry{
FullPath: weed_util.NewFullPath(string(fullpath), fileName),
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
} }
// println("list", entry.FullPath, "chunks", len(entry.Chunks)) // println("list", entry.FullPath, "chunks", len(entry.Chunks))
@ -208,11 +211,13 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, fu
glog.V(0).Infof("list %s : %v", entry.FullPath, err) glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
iter.Release() iter.Release()
return entries, err
return lastFileName, err
} }
func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) { func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) {

6
weed/filer/leveldb2/leveldb2_store_test.go

@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
} }
// checking one upper directory // checking one upper directory
entries, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "")
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
} }
// checking one upper directory // checking one upper directory
entries, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background() ctx := context.Background()
// checking one upper directory // checking one upper directory
entries, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if err != nil { if err != nil {
t.Errorf("list entries: %v", err) t.Errorf("list entries: %v", err)
return return

375
weed/filer/leveldb3/leveldb3_store.go

@ -0,0 +1,375 @@
package leveldb
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"github.com/syndtr/goleveldb/leveldb"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"io"
"os"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
const (
DEFAULT = "_main"
)
func init() {
filer.Stores = append(filer.Stores, &LevelDB3Store{})
}
type LevelDB3Store struct {
dir string
dbs map[string]*leveldb.DB
dbsLock sync.RWMutex
}
func (store *LevelDB3Store) GetName() string {
return "leveldb3"
}
func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
dir := configuration.GetString(prefix + "dir")
return store.initialize(dir)
}
func (store *LevelDB3Store) initialize(dir string) (err error) {
glog.Infof("filer store leveldb3 dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
store.dir = dir
db, loadDbErr := store.loadDB(DEFAULT)
if loadDbErr != nil {
return loadDbErr
}
store.dbs = make(map[string]*leveldb.DB)
store.dbs[DEFAULT] = db
return
}
func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
}
if name != DEFAULT {
opts = &opt.Options{
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
}
}
dbFolder := fmt.Sprintf("%s/%s", store.dir, name)
os.MkdirAll(dbFolder, 0755)
db, dbErr := leveldb.OpenFile(dbFolder, opts)
if leveldb_errors.IsCorrupted(dbErr) {
db, dbErr = leveldb.RecoverFile(dbFolder, opts)
}
if dbErr != nil {
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
return nil, dbErr
}
return db, nil
}
func (store *LevelDB3Store) findDB(fullpath weed_util.FullPath, isForChildren bool) (*leveldb.DB, string, weed_util.FullPath, error) {
store.dbsLock.RLock()
defaultDB := store.dbs[DEFAULT]
if !strings.HasPrefix(string(fullpath), "/buckets/") {
store.dbsLock.RUnlock()
return defaultDB, DEFAULT, fullpath, nil
}
// detect bucket
bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
t := strings.Index(bucketAndObjectKey, "/")
if t < 0 && !isForChildren {
store.dbsLock.RUnlock()
return defaultDB, DEFAULT, fullpath, nil
}
bucket := bucketAndObjectKey
shortPath := weed_util.FullPath("/")
if t > 0 {
bucket = bucketAndObjectKey[:t]
shortPath = weed_util.FullPath(bucketAndObjectKey[t:])
}
if db, found := store.dbs[bucket]; found {
store.dbsLock.RUnlock()
return db, bucket, shortPath, nil
}
store.dbsLock.RUnlock()
// upgrade to write lock
store.dbsLock.Lock()
defer store.dbsLock.Unlock()
// double check after getting the write lock
if db, found := store.dbs[bucket]; found {
return db, bucket, shortPath, nil
}
// create db
db, err := store.loadDB(bucket)
if err != nil {
return nil, bucket, shortPath, err
}
store.dbs[bucket] = db
return db, bucket, shortPath, nil
}
func (store *LevelDB3Store) closeDB(bucket string) {
store.dbsLock.Lock()
defer store.dbsLock.Unlock()
if db, found := store.dbs[bucket]; found {
db.Close()
delete(store.dbs, bucket)
}
}
func (store *LevelDB3Store) BeginTransaction(ctx context.Context) (context.Context, error) {
return ctx, nil
}
func (store *LevelDB3Store) CommitTransaction(ctx context.Context) error {
return nil
}
func (store *LevelDB3Store) RollbackTransaction(ctx context.Context) error {
return nil
}
func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
db, _, shortPath, err := store.findDB(entry.FullPath, false)
if err != nil {
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
}
dir, name := shortPath.DirAndName()
key := genKey(dir, name)
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
value = weed_util.MaybeGzipData(value)
}
err = db.Put(key, value, nil)
if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
return nil
}
func (store *LevelDB3Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
db, _, shortPath, err := store.findDB(fullpath, false)
if err != nil {
return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
}
dir, name := shortPath.DirAndName()
key := genKey(dir, name)
data, err := db.Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, filer_pb.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
entry = &filer.Entry{
FullPath: fullpath,
}
err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data))
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
return entry, nil
}
func (store *LevelDB3Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
db, _, shortPath, err := store.findDB(fullpath, false)
if err != nil {
return fmt.Errorf("findDB %s : %v", fullpath, err)
}
dir, name := shortPath.DirAndName()
key := genKey(dir, name)
err = db.Delete(key, nil)
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
}
func (store *LevelDB3Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
db, bucket, shortPath, err := store.findDB(fullpath, true)
if err != nil {
return fmt.Errorf("findDB %s : %v", fullpath, err)
}
if bucket != DEFAULT && shortPath == "/" {
store.closeDB(bucket)
if bucket != "" { // just to make sure
os.RemoveAll(store.dir + "/" + bucket)
}
return nil
}
directoryPrefix := genDirectoryKeyPrefix(shortPath, "")
batch := new(leveldb.Batch)
iter := db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil)
for iter.Next() {
key := iter.Key()
if !bytes.HasPrefix(key, directoryPrefix) {
break
}
fileName := getNameFromKey(key)
if fileName == "" {
continue
}
batch.Delete(append(directoryPrefix, []byte(fileName)...))
}
iter.Release()
err = db.Write(batch, nil)
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
}
func (store *LevelDB3Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
}
func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
db, _, shortPath, err := store.findDB(dirPath, true)
if err != nil {
return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
}
directoryPrefix := genDirectoryKeyPrefix(shortPath, prefix)
lastFileStart := directoryPrefix
if startFileName != "" {
lastFileStart = genDirectoryKeyPrefix(shortPath, startFileName)
}
iter := db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
for iter.Next() {
key := iter.Key()
if !bytes.HasPrefix(key, directoryPrefix) {
break
}
fileName := getNameFromKey(key)
if fileName == "" {
continue
}
if fileName == startFileName && !includeStartFile {
continue
}
lastFileName = fileName
limit--
if limit < 0 {
break
}
entry := &filer.Entry{
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {
break
}
}
iter.Release()
return lastFileName, err
}
func genKey(dirPath, fileName string) (key []byte) {
key = hashToBytes(dirPath)
key = append(key, []byte(fileName)...)
return key
}
func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = hashToBytes(string(fullpath))
if len(startFileName) > 0 {
keyPrefix = append(keyPrefix, []byte(startFileName)...)
}
return keyPrefix
}
func getNameFromKey(key []byte) string {
return string(key[md5.Size:])
}
// hash directory
func hashToBytes(dir string) []byte {
h := md5.New()
io.WriteString(h, dir)
b := h.Sum(nil)
return b
}
func (store *LevelDB3Store) Shutdown() {
for _, db := range store.dbs {
db.Close()
}
}

46
weed/filer/leveldb3/leveldb3_store_kv.go

@ -0,0 +1,46 @@
package leveldb
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/syndtr/goleveldb/leveldb"
)
func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
err = store.dbs[DEFAULT].Put(key, value, nil)
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
return nil
}
func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
value, err = store.dbs[DEFAULT].Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, filer.ErrKvNotFound
}
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
}
return
}
func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error) {
err = store.dbs[DEFAULT].Delete(key, nil)
if err != nil {
return fmt.Errorf("kv delete: %v", err)
}
return nil
}

88
weed/filer/leveldb3/leveldb3_store_test.go

@ -0,0 +1,88 @@
package leveldb
import (
"context"
"io/ioutil"
"os"
"testing"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
)
func TestCreateAndFind(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &LevelDB3Store{}
store.initialize(dir)
testFiler.SetStore(store)
fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background()
entry1 := &filer.Entry{
FullPath: fullpath,
Attr: filer.Attr{
Mode: 0440,
Uid: 1234,
Gid: 5678,
},
}
if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err)
return
}
entry, err := testFiler.FindEntry(ctx, fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
return
}
if entry.FullPath != entry1.FullPath {
t.Errorf("find wrong entry: %v", entry.FullPath)
return
}
// checking one upper directory
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
}
func TestEmptyRoot(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
store := &LevelDB3Store{}
store.initialize(dir)
testFiler.SetStore(store)
ctx := context.Background()
// checking one upper directory
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return
}
if len(entries) != 0 {
t.Errorf("list entries count: %v", len(entries))
return
}
}

22
weed/filer/mongodb/mongodb_store.go

@ -178,14 +178,14 @@ func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath ut
return nil return nil
} }
func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
} }
func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}}
if inclusive {
var where = bson.M{"directory": string(dirPath), "name": bson.M{"$gt": startFileName}}
if includeStartFile {
where["name"] = bson.M{ where["name"] = bson.M{
"$gte": startFileName, "$gte": startFileName,
} }
@ -197,26 +197,30 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
var data Model var data Model
err := cur.Decode(&data) err := cur.Decode(&data)
if err != nil && err != mongo.ErrNoDocuments { if err != nil && err != mongo.ErrNoDocuments {
return nil, err
return lastFileName, err
} }
entry := &filer.Entry{ entry := &filer.Entry{
FullPath: util.NewFullPath(string(fullpath), data.Name),
FullPath: util.NewFullPath(string(dirPath), data.Name),
} }
lastFileName = data.Name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err) glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
if err := cur.Close(ctx); err != nil { if err := cur.Close(ctx); err != nil {
glog.V(0).Infof("list iterator close: %v", err) glog.V(0).Infof("list iterator close: %v", err)
} }
return entries, err
return lastFileName, err
} }
func (store *MongodbStore) Shutdown() { func (store *MongodbStore) Shutdown() {

52
weed/filer/mysql/mysql_sql_gen.go

@ -0,0 +1,52 @@
package mysql
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
_ "github.com/go-sql-driver/mysql"
)
type SqlGenMysql struct {
CreateTableSqlTemplate string
DropTableSqlTemplate string
}
var (
_ = abstract_sql.SqlGenerator(&SqlGenMysql{})
)
func (gen *SqlGenMysql) GetSqlInsert(bucket string) string {
return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES(?,?,?,?)", bucket)
}
func (gen *SqlGenMysql) GetSqlUpdate(bucket string) string {
return fmt.Sprintf("UPDATE %s SET meta=? WHERE dirhash=? AND name=? AND directory=?", bucket)
}
func (gen *SqlGenMysql) GetSqlFind(bucket string) string {
return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket)
}
func (gen *SqlGenMysql) GetSqlDelete(bucket string) string {
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket)
}
func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(bucket string) string {
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND directory=?", bucket)
}
func (gen *SqlGenMysql) GetSqlListExclusive(bucket string) string {
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
}
func (gen *SqlGenMysql) GetSqlListInclusive(bucket string) string {
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
}
func (gen *SqlGenMysql) GetSqlCreateTable(bucket string) string {
return fmt.Sprintf(gen.CreateTableSqlTemplate, bucket)
}
func (gen *SqlGenMysql) GetSqlDropTable(bucket string) string {
return fmt.Sprintf(gen.DropTableSqlTemplate, bucket)
}

23
weed/filer/mysql/mysql_store.go

@ -3,8 +3,9 @@ package mysql
import ( import (
"database/sql" "database/sql"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"time"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
_ "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql"
@ -35,20 +36,19 @@ func (store *MysqlStore) Initialize(configuration util.Configuration, prefix str
configuration.GetString(prefix+"database"), configuration.GetString(prefix+"database"),
configuration.GetInt(prefix+"connection_max_idle"), configuration.GetInt(prefix+"connection_max_idle"),
configuration.GetInt(prefix+"connection_max_open"), configuration.GetInt(prefix+"connection_max_open"),
configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
configuration.GetBool(prefix+"interpolateParams"), configuration.GetBool(prefix+"interpolateParams"),
) )
} }
func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int,
interpolateParams bool) (err error) {
//
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)"
store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?"
func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen,
maxLifetimeSeconds int, interpolateParams bool) (err error) {
store.SupportBucketTable = false
store.SqlGenerator = &SqlGenMysql{
CreateTableSqlTemplate: "",
DropTableSqlTemplate: "drop table %s",
}
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
if interpolateParams { if interpolateParams {
@ -65,6 +65,7 @@ func (store *MysqlStore) initialize(user, password, hostname string, port int, d
store.DB.SetMaxIdleConns(maxIdle) store.DB.SetMaxIdleConns(maxIdle)
store.DB.SetMaxOpenConns(maxOpen) store.DB.SetMaxOpenConns(maxOpen)
store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
if err = store.DB.Ping(); err != nil { if err = store.DB.Ping(); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err) return fmt.Errorf("connect to %s error:%v", sqlUrl, err)

82
weed/filer/mysql2/mysql2_store.go

@ -0,0 +1,82 @@
package mysql2
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
"github.com/chrislusf/seaweedfs/weed/filer/mysql"
"github.com/chrislusf/seaweedfs/weed/util"
_ "github.com/go-sql-driver/mysql"
)
const (
CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
)
func init() {
filer.Stores = append(filer.Stores, &MysqlStore2{})
}
type MysqlStore2 struct {
abstract_sql.AbstractSqlStore
}
func (store *MysqlStore2) GetName() string {
return "mysql2"
}
func (store *MysqlStore2) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
configuration.GetString(prefix+"createTable"),
configuration.GetString(prefix+"username"),
configuration.GetString(prefix+"password"),
configuration.GetString(prefix+"hostname"),
configuration.GetInt(prefix+"port"),
configuration.GetString(prefix+"database"),
configuration.GetInt(prefix+"connection_max_idle"),
configuration.GetInt(prefix+"connection_max_open"),
configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
configuration.GetBool(prefix+"interpolateParams"),
)
}
func (store *MysqlStore2) initialize(createTable, user, password, hostname string, port int, database string, maxIdle, maxOpen,
maxLifetimeSeconds int, interpolateParams bool) (err error) {
store.SupportBucketTable = true
store.SqlGenerator = &mysql.SqlGenMysql{
CreateTableSqlTemplate: createTable,
DropTableSqlTemplate: "drop table %s",
}
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
if interpolateParams {
sqlUrl += "&interpolateParams=true"
}
var dbErr error
store.DB, dbErr = sql.Open("mysql", sqlUrl)
if dbErr != nil {
store.DB.Close()
store.DB = nil
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
}
store.DB.SetMaxIdleConns(maxIdle)
store.DB.SetMaxOpenConns(maxOpen)
store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
if err = store.DB.Ping(); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
}
if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil {
return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err)
}
return nil
}

53
weed/filer/postgres/postgres_sql_gen.go

@ -0,0 +1,53 @@
package postgres
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
_ "github.com/lib/pq"
)
type SqlGenPostgres struct {
CreateTableSqlTemplate string
DropTableSqlTemplate string
}
var (
_ = abstract_sql.SqlGenerator(&SqlGenPostgres{})
)
func (gen *SqlGenPostgres) GetSqlInsert(bucket string) string {
return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)", bucket)
}
func (gen *SqlGenPostgres) GetSqlUpdate(bucket string) string {
return fmt.Sprintf("UPDATE %s SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4", bucket)
}
func (gen *SqlGenPostgres) GetSqlFind(bucket string) string {
return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket)
}
func (gen *SqlGenPostgres) GetSqlDelete(bucket string) string {
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket)
}
func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(bucket string) string {
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND directory=$2", bucket)
}
func (gen *SqlGenPostgres) GetSqlListExclusive(bucket string) string {
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket)
}
func (gen *SqlGenPostgres) GetSqlListInclusive(bucket string) string {
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket)
}
func (gen *SqlGenPostgres) GetSqlCreateTable(bucket string) string {
return fmt.Sprintf(gen.CreateTableSqlTemplate, bucket)
}
func (gen *SqlGenPostgres) GetSqlDropTable(bucket string) string {
return fmt.Sprintf(gen.DropTableSqlTemplate, bucket)
}

18
weed/filer/postgres/postgres_store.go

@ -33,21 +33,20 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix
configuration.GetString(prefix+"hostname"), configuration.GetString(prefix+"hostname"),
configuration.GetInt(prefix+"port"), configuration.GetInt(prefix+"port"),
configuration.GetString(prefix+"database"), configuration.GetString(prefix+"database"),
configuration.GetString(prefix+"schema"),
configuration.GetString(prefix+"sslmode"), configuration.GetString(prefix+"sslmode"),
configuration.GetInt(prefix+"connection_max_idle"), configuration.GetInt(prefix+"connection_max_idle"),
configuration.GetInt(prefix+"connection_max_open"), configuration.GetInt(prefix+"connection_max_open"),
) )
} }
func (store *PostgresStore) initialize(user, password, hostname string, port int, database, sslmode string, maxIdle, maxOpen int) (err error) {
func (store *PostgresStore) initialize(user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) {
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"
store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5"
store.SupportBucketTable = false
store.SqlGenerator = &SqlGenPostgres{
CreateTableSqlTemplate: "",
DropTableSqlTemplate: "drop table %s",
}
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
if user != "" { if user != "" {
@ -59,6 +58,9 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int
if database != "" { if database != "" {
sqlUrl += " dbname=" + database sqlUrl += " dbname=" + database
} }
if schema != "" {
sqlUrl += " search_path=" + schema
}
var dbErr error var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl) store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil { if dbErr != nil {

87
weed/filer/postgres2/postgres2_store.go

@ -0,0 +1,87 @@
package postgres2
import (
"context"
"database/sql"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
"github.com/chrislusf/seaweedfs/weed/filer/postgres"
"github.com/chrislusf/seaweedfs/weed/util"
_ "github.com/lib/pq"
)
const (
CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30"
)
func init() {
filer.Stores = append(filer.Stores, &PostgresStore2{})
}
type PostgresStore2 struct {
abstract_sql.AbstractSqlStore
}
func (store *PostgresStore2) GetName() string {
return "postgres2"
}
func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
configuration.GetString(prefix+"createTable"),
configuration.GetString(prefix+"username"),
configuration.GetString(prefix+"password"),
configuration.GetString(prefix+"hostname"),
configuration.GetInt(prefix+"port"),
configuration.GetString(prefix+"database"),
configuration.GetString(prefix+"schema"),
configuration.GetString(prefix+"sslmode"),
configuration.GetInt(prefix+"connection_max_idle"),
configuration.GetInt(prefix+"connection_max_open"),
)
}
func (store *PostgresStore2) initialize(createTable, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) {
store.SupportBucketTable = true
store.SqlGenerator = &postgres.SqlGenPostgres{
CreateTableSqlTemplate: createTable,
DropTableSqlTemplate: "drop table %s",
}
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
if user != "" {
sqlUrl += " user=" + user
}
if password != "" {
sqlUrl += " password=" + password
}
if database != "" {
sqlUrl += " dbname=" + database
}
if schema != "" {
sqlUrl += " search_path=" + schema
}
var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
store.DB.Close()
store.DB = nil
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
}
store.DB.SetMaxIdleConns(maxIdle)
store.DB.SetMaxOpenConns(maxOpen)
if err = store.DB.Ping(); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
}
if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil {
return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err)
}
return nil
}

8
weed/filer/reader_at.go

@ -18,7 +18,7 @@ import (
type ChunkReadAt struct { type ChunkReadAt struct {
masterClient *wdclient.MasterClient masterClient *wdclient.MasterClient
chunkViews []*ChunkView chunkViews []*ChunkView
lookupFileId LookupFileIdFunctionType
lookupFileId wdclient.LookupFileIdFunctionType
readerLock sync.Mutex readerLock sync.Mutex
fileSize int64 fileSize int64
@ -31,9 +31,7 @@ type ChunkReadAt struct {
var _ = io.ReaderAt(&ChunkReadAt{}) var _ = io.ReaderAt(&ChunkReadAt{})
var _ = io.Closer(&ChunkReadAt{}) var _ = io.Closer(&ChunkReadAt{})
type LookupFileIdFunctionType func(fileId string) (targetUrls []string, err error)
func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionType {
vidCache := make(map[string]*filer_pb.Locations) vidCache := make(map[string]*filer_pb.Locations)
var vicCacheLock sync.RWMutex var vicCacheLock sync.RWMutex
@ -109,7 +107,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
defer c.readerLock.Unlock() defer c.readerLock.Unlock()
glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
return c.doReadAt(p[n:], offset+int64(n))
return c.doReadAt(p, offset)
} }
func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {

6
weed/filer/redis/redis_cluster_store.go

@ -3,7 +3,7 @@ package redis
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis"
"github.com/go-redis/redis/v8"
) )
func init() { func init() {
@ -20,8 +20,8 @@ func (store *RedisClusterStore) GetName() string {
func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) { func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) {
configuration.SetDefault(prefix+"useReadOnly", true)
configuration.SetDefault(prefix+"routeByLatency", true)
configuration.SetDefault(prefix+"useReadOnly", false)
configuration.SetDefault(prefix+"routeByLatency", false)
return store.initialize( return store.initialize(
configuration.GetStringSlice(prefix+"addresses"), configuration.GetStringSlice(prefix+"addresses"),

2
weed/filer/redis/redis_store.go

@ -3,7 +3,7 @@ package redis
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis"
"github.com/go-redis/redis/v8"
) )
func init() { func init() {

49
weed/filer/redis/universal_redis_store.go

@ -7,7 +7,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-redis/redis"
"github.com/go-redis/redis/v8"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -44,7 +44,7 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.
value = util.MaybeGzipData(value) value = util.MaybeGzipData(value)
} }
_, err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result()
_, err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result()
if err != nil { if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err) return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
@ -52,7 +52,7 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
if name != "" { if name != "" {
_, err = store.Client.SAdd(genDirectoryListKey(dir), name).Result()
_, err = store.Client.SAdd(ctx, genDirectoryListKey(dir), name).Result()
if err != nil { if err != nil {
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
} }
@ -68,7 +68,7 @@ func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer.
func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result()
data, err := store.Client.Get(ctx, string(fullpath)).Result()
if err == redis.Nil { if err == redis.Nil {
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
@ -90,7 +90,7 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.F
func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
_, err = store.Client.Del(string(fullpath)).Result()
_, err = store.Client.Del(ctx, string(fullpath)).Result()
if err != nil { if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err) return fmt.Errorf("delete %s : %v", fullpath, err)
@ -98,7 +98,7 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
if name != "" { if name != "" {
_, err = store.Client.SRem(genDirectoryListKey(dir), name).Result()
_, err = store.Client.SRem(ctx, genDirectoryListKey(dir), name).Result()
if err != nil { if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
} }
@ -109,14 +109,14 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util
func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
members, err := store.Client.SMembers(ctx, genDirectoryListKey(string(fullpath))).Result()
if err != nil { if err != nil {
return fmt.Errorf("delete folder %s : %v", fullpath, err) return fmt.Errorf("delete folder %s : %v", fullpath, err)
} }
for _, fileName := range members { for _, fileName := range members {
path := util.NewFullPath(string(fullpath), fileName) path := util.NewFullPath(string(fullpath), fileName)
_, err = store.Client.Del(string(path)).Result()
_, err = store.Client.Del(ctx, string(path)).Result()
if err != nil { if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
} }
@ -125,17 +125,16 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full
return nil return nil
} }
func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
} }
func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer.Entry, err error) {
func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
dirListKey := genDirectoryListKey(string(fullpath))
members, err := store.Client.SMembers(dirListKey).Result()
dirListKey := genDirectoryListKey(string(dirPath))
members, err := store.Client.SMembers(ctx, dirListKey).Result()
if err != nil { if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
} }
// skip // skip
@ -144,7 +143,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
for _, m := range members { for _, m := range members {
if strings.Compare(m, startFileName) >= 0 { if strings.Compare(m, startFileName) >= 0 {
if m == startFileName { if m == startFileName {
if inclusive {
if includeStartFile {
t = append(t, m) t = append(t, m)
} }
} else { } else {
@ -161,29 +160,35 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
}) })
// limit // limit
if limit < len(members) {
if limit < int64(len(members)) {
members = members[:limit] members = members[:limit]
} }
// fetch entry meta // fetch entry meta
for _, fileName := range members { for _, fileName := range members {
path := util.NewFullPath(string(fullpath), fileName)
path := util.NewFullPath(string(dirPath), fileName)
entry, err := store.FindEntry(ctx, path) entry, err := store.FindEntry(ctx, path)
lastFileName = fileName
if err != nil { if err != nil {
glog.V(0).Infof("list %s : %v", path, err) glog.V(0).Infof("list %s : %v", path, err)
if err == filer_pb.ErrNotFound {
continue
}
} else { } else {
if entry.TtlSec > 0 { if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
store.Client.Del(string(path)).Result()
store.Client.SRem(dirListKey, fileName).Result()
store.Client.Del(ctx, string(path)).Result()
store.Client.SRem(ctx, dirListKey, fileName).Result()
continue continue
} }
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
} }
return entries, err
return lastFileName, err
} }
func genDirectoryListKey(dir string) (dirList string) { func genDirectoryListKey(dir string) (dirList string) {

8
weed/filer/redis/universal_redis_store_kv.go

@ -5,12 +5,12 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/go-redis/redis"
"github.com/go-redis/redis/v8"
) )
func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
_, err = store.Client.Set(string(key), value, 0).Result()
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil { if err != nil {
return fmt.Errorf("kv put: %v", err) return fmt.Errorf("kv put: %v", err)
@ -21,7 +21,7 @@ func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value [
func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
data, err := store.Client.Get(string(key)).Result()
data, err := store.Client.Get(ctx, string(key)).Result()
if err == redis.Nil { if err == redis.Nil {
return nil, filer.ErrKvNotFound return nil, filer.ErrKvNotFound
@ -32,7 +32,7 @@ func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value
func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err error) { func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.Client.Del(string(key)).Result()
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil { if err != nil {
return fmt.Errorf("kv delete: %v", err) return fmt.Errorf("kv delete: %v", err)

6
weed/filer/redis2/redis_cluster_store.go

@ -3,7 +3,7 @@ package redis2
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis"
"github.com/go-redis/redis/v8"
) )
func init() { func init() {
@ -20,8 +20,8 @@ func (store *RedisCluster2Store) GetName() string {
func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) { func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
configuration.SetDefault(prefix+"useReadOnly", true)
configuration.SetDefault(prefix+"routeByLatency", true)
configuration.SetDefault(prefix+"useReadOnly", false)
configuration.SetDefault(prefix+"routeByLatency", false)
return store.initialize( return store.initialize(
configuration.GetStringSlice(prefix+"addresses"), configuration.GetStringSlice(prefix+"addresses"),

2
weed/filer/redis2/redis_store.go

@ -3,7 +3,7 @@ package redis2
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis"
"github.com/go-redis/redis/v8"
) )
func init() { func init() {

51
weed/filer/redis2/universal_redis_store.go

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/go-redis/redis"
"github.com/go-redis/redis/v8"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -56,7 +56,7 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer
value = util.MaybeGzipData(value) value = util.MaybeGzipData(value)
} }
if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err) return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
} }
@ -66,7 +66,7 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer
} }
if name != "" { if name != "" {
if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil {
if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), &redis.Z{Score: 0, Member: name}).Err(); err != nil {
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
} }
} }
@ -81,7 +81,7 @@ func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer
func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result()
data, err := store.Client.Get(ctx, string(fullpath)).Result()
if err == redis.Nil { if err == redis.Nil {
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
@ -103,12 +103,12 @@ func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.
func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
_, err = store.Client.Del(genDirectoryListKey(string(fullpath))).Result()
_, err = store.Client.Del(ctx, genDirectoryListKey(string(fullpath))).Result()
if err != nil { if err != nil {
return fmt.Errorf("delete dir list %s : %v", fullpath, err) return fmt.Errorf("delete dir list %s : %v", fullpath, err)
} }
_, err = store.Client.Del(string(fullpath)).Result()
_, err = store.Client.Del(ctx, string(fullpath)).Result()
if err != nil { if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err) return fmt.Errorf("delete %s : %v", fullpath, err)
} }
@ -118,7 +118,7 @@ func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath uti
return nil return nil
} }
if name != "" { if name != "" {
_, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result()
_, err = store.Client.ZRem(ctx, genDirectoryListKey(dir), name).Result()
if err != nil { if err != nil {
return fmt.Errorf("DeleteEntry %s in parent dir: %v", fullpath, err) return fmt.Errorf("DeleteEntry %s in parent dir: %v", fullpath, err)
} }
@ -133,14 +133,14 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
return nil return nil
} }
members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result()
members, err := store.Client.ZRange(ctx, genDirectoryListKey(string(fullpath)), 0, -1).Result()
if err != nil { if err != nil {
return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err) return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err)
} }
for _, fileName := range members { for _, fileName := range members {
path := util.NewFullPath(string(fullpath), fileName) path := util.NewFullPath(string(fullpath), fileName)
_, err = store.Client.Del(string(path)).Result()
_, err = store.Client.Del(ctx, string(path)).Result()
if err != nil { if err != nil {
return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err) return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err)
} }
@ -149,45 +149,50 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
return nil return nil
} }
func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
} }
func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer.Entry, err error) {
func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
dirListKey := genDirectoryListKey(string(fullpath))
dirListKey := genDirectoryListKey(string(dirPath))
start := int64(0) start := int64(0)
if startFileName != "" { if startFileName != "" {
start, _ = store.Client.ZRank(dirListKey, startFileName).Result()
if !inclusive {
start, _ = store.Client.ZRank(ctx, dirListKey, startFileName).Result()
if !includeStartFile {
start++ start++
} }
} }
members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result()
members, err := store.Client.ZRange(ctx, dirListKey, start, start+int64(limit)-1).Result()
if err != nil { if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
} }
// fetch entry meta // fetch entry meta
for _, fileName := range members { for _, fileName := range members {
path := util.NewFullPath(string(fullpath), fileName)
path := util.NewFullPath(string(dirPath), fileName)
entry, err := store.FindEntry(ctx, path) entry, err := store.FindEntry(ctx, path)
lastFileName = fileName
if err != nil { if err != nil {
glog.V(0).Infof("list %s : %v", path, err) glog.V(0).Infof("list %s : %v", path, err)
if err == filer_pb.ErrNotFound {
continue
}
} else { } else {
if entry.TtlSec > 0 { if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
store.Client.Del(string(path)).Result()
store.Client.ZRem(dirListKey, fileName).Result()
store.Client.Del(ctx, string(path)).Result()
store.Client.ZRem(ctx, dirListKey, fileName).Result()
continue continue
} }
} }
entries = append(entries, entry)
if !eachEntryFunc(entry) {
break
}
} }
} }
return entries, err
return lastFileName, err
} }
func genDirectoryListKey(dir string) (dirList string) { func genDirectoryListKey(dir string) (dirList string) {

8
weed/filer/redis2/universal_redis_store_kv.go

@ -5,12 +5,12 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/go-redis/redis"
"github.com/go-redis/redis/v8"
) )
func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
_, err = store.Client.Set(string(key), value, 0).Result()
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil { if err != nil {
return fmt.Errorf("kv put: %v", err) return fmt.Errorf("kv put: %v", err)
@ -21,7 +21,7 @@ func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value
func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) { func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
data, err := store.Client.Get(string(key)).Result()
data, err := store.Client.Get(ctx, string(key)).Result()
if err == redis.Nil { if err == redis.Nil {
return nil, filer.ErrKvNotFound return nil, filer.ErrKvNotFound
@ -32,7 +32,7 @@ func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value
func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (err error) { func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.Client.Del(string(key)).Result()
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil { if err != nil {
return fmt.Errorf("kv delete: %v", err) return fmt.Errorf("kv delete: %v", err)

41
weed/filer/rocksdb/README.md

@ -0,0 +1,41 @@
# Prepare the compilation environment on linux
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
- sudo apt-get update -qq
- sudo apt-get install gcc-6 g++-6 libsnappy-dev zlib1g-dev libbz2-dev -qq
- export CXX="g++-6" CC="gcc-6"
- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags2_2.0-1.1ubuntu1_amd64.deb
- sudo dpkg -i libgflags2_2.0-1.1ubuntu1_amd64.deb
- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags-dev_2.0-1.1ubuntu1_amd64.deb
- sudo dpkg -i libgflags-dev_2.0-1.1ubuntu1_amd64.deb
# Prepare the compilation environment on mac os
```
brew install snappy
```
# install rocksdb:
```
export ROCKSDB_HOME=/Users/chris/dev/rocksdb
git clone https://github.com/facebook/rocksdb.git $ROCKSDB_HOME
pushd $ROCKSDB_HOME
make clean
make install-static
popd
```
# install gorocksdb
```
export CGO_CFLAGS="-I$ROCKSDB_HOME/include"
export CGO_LDFLAGS="-L$ROCKSDB_HOME -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
go get github.com/tecbot/gorocksdb
```
# compile with rocksdb
```
cd ~/go/src/github.com/chrislusf/seaweedfs/weed
go install -tags rocksdb
```

302
weed/filer/rocksdb/rocksdb_store.go

@ -0,0 +1,302 @@
// +build rocksdb
package rocksdb
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"github.com/tecbot/gorocksdb"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
filer.Stores = append(filer.Stores, &RocksDBStore{})
}
type options struct {
opt *gorocksdb.Options
ro *gorocksdb.ReadOptions
wo *gorocksdb.WriteOptions
}
func (opt *options) init() {
opt.opt = gorocksdb.NewDefaultOptions()
opt.ro = gorocksdb.NewDefaultReadOptions()
opt.wo = gorocksdb.NewDefaultWriteOptions()
}
func (opt *options) close() {
opt.opt.Destroy()
opt.ro.Destroy()
opt.wo.Destroy()
}
type RocksDBStore struct {
path string
db *gorocksdb.DB
options
}
func (store *RocksDBStore) GetName() string {
return "rocksdb"
}
func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
dir := configuration.GetString(prefix + "dir")
return store.initialize(dir)
}
func (store *RocksDBStore) initialize(dir string) (err error) {
glog.Infof("filer store rocksdb dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
store.options.init()
store.opt.SetCreateIfMissing(true)
// reduce write amplification
// also avoid expired data stored in highest level never get compacted
store.opt.SetLevelCompactionDynamicLevelBytes(true)
store.opt.SetCompactionFilter(NewTTLFilter())
// store.opt.SetMaxBackgroundCompactions(2)
store.db, err = gorocksdb.OpenDb(store.opt, dir)
return
}
func (store *RocksDBStore) BeginTransaction(ctx context.Context) (context.Context, error) {
return ctx, nil
}
func (store *RocksDBStore) CommitTransaction(ctx context.Context) error {
return nil
}
func (store *RocksDBStore) RollbackTransaction(ctx context.Context) error {
return nil
}
func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.DirAndName()
key := genKey(dir, name)
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
err = store.db.Put(store.wo, key, value)
if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
return nil
}
func (store *RocksDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
dir, name := fullpath.DirAndName()
key := genKey(dir, name)
data, err := store.db.Get(store.ro, key)
if data == nil {
return nil, filer_pb.ErrNotFound
}
defer data.Free()
if err != nil {
return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
entry = &filer.Entry{
FullPath: fullpath,
}
err = entry.DecodeAttributesAndChunks(data.Data())
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
return entry, nil
}
func (store *RocksDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
dir, name := fullpath.DirAndName()
key := genKey(dir, name)
err = store.db.Delete(store.wo, key)
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
}
func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
batch := gorocksdb.NewWriteBatch()
defer batch.Destroy()
ro := gorocksdb.NewDefaultReadOptions()
defer ro.Destroy()
ro.SetFillCache(false)
iter := store.db.NewIterator(ro)
defer iter.Close()
err = enumerate(iter, directoryPrefix, nil, false, -1, func(key, value []byte) bool {
batch.Delete(key)
return true
})
if err != nil {
return fmt.Errorf("delete list %s : %v", fullpath, err)
}
err = store.db.Write(store.wo, batch)
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
}
func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, fn func(key, value []byte) bool) (err error) {
if len(lastKey) == 0 {
iter.Seek(prefix)
} else {
iter.Seek(lastKey)
if !includeLastKey {
if iter.Valid() {
if bytes.Equal(iter.Key().Data(), lastKey) {
iter.Next()
}
}
}
}
i := int64(0)
for ; iter.Valid(); iter.Next() {
if limit > 0 {
i++
if i > limit {
break
}
}
key := iter.Key().Data()
if !bytes.HasPrefix(key, prefix) {
break
}
ret := fn(key, iter.Value().Data())
if !ret {
break
}
}
if err := iter.Err(); err != nil {
return fmt.Errorf("prefix scan iterator: %v", err)
}
return nil
}
func (store *RocksDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
}
func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix)
lastFileStart := directoryPrefix
if startFileName != "" {
lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
}
ro := gorocksdb.NewDefaultReadOptions()
defer ro.Destroy()
ro.SetFillCache(false)
iter := store.db.NewIterator(ro)
defer iter.Close()
err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, func(key, value []byte) bool {
fileName := getNameFromKey(key)
if fileName == "" {
return true
}
entry := &filer.Entry{
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
lastFileName = fileName
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
return false
}
if !eachEntryFunc(entry) {
return false
}
return true
})
if err != nil {
return lastFileName, fmt.Errorf("prefix list %s : %v", dirPath, err)
}
return lastFileName, err
}
func genKey(dirPath, fileName string) (key []byte) {
key = hashToBytes(dirPath)
key = append(key, []byte(fileName)...)
return key
}
func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = hashToBytes(string(fullpath))
if len(startFileName) > 0 {
keyPrefix = append(keyPrefix, []byte(startFileName)...)
}
return keyPrefix
}
func getNameFromKey(key []byte) string {
return string(key[md5.Size:])
}
// hash directory, and use last byte for partitioning
func hashToBytes(dir string) []byte {
h := md5.New()
io.WriteString(h, dir)
b := h.Sum(nil)
return b
}
func (store *RocksDBStore) Shutdown() {
store.db.Close()
store.options.close()
}

47
weed/filer/rocksdb/rocksdb_store_kv.go

@ -0,0 +1,47 @@
// +build rocksdb
package rocksdb
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
)
func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
err = store.db.Put(store.wo, key, value)
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
return nil
}
func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
value, err = store.db.GetBytes(store.ro, key)
if value == nil {
return nil, filer.ErrKvNotFound
}
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
}
return
}
func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error) {
err = store.db.Delete(store.wo, key)
if err != nil {
return fmt.Errorf("kv delete: %v", err)
}
return nil
}

117
weed/filer/rocksdb/rocksdb_store_test.go

@ -0,0 +1,117 @@
// +build rocksdb
package rocksdb
import (
"context"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
)
func TestCreateAndFind(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &RocksDBStore{}
store.initialize(dir)
testFiler.SetStore(store)
fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background()
entry1 := &filer.Entry{
FullPath: fullpath,
Attr: filer.Attr{
Mode: 0440,
Uid: 1234,
Gid: 5678,
},
}
if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err)
return
}
entry, err := testFiler.FindEntry(ctx, fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
return
}
if entry.FullPath != entry1.FullPath {
t.Errorf("find wrong entry: %v", entry.FullPath)
return
}
// checking one upper directory
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
}
func TestEmptyRoot(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
store := &RocksDBStore{}
store.initialize(dir)
testFiler.SetStore(store)
ctx := context.Background()
// checking one upper directory
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return
}
if len(entries) != 0 {
t.Errorf("list entries count: %v", len(entries))
return
}
}
func BenchmarkInsertEntry(b *testing.B) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
defer os.RemoveAll(dir)
store := &RocksDBStore{}
store.initialize(dir)
testFiler.SetStore(store)
ctx := context.Background()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
entry := &filer.Entry{
FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)),
Attr: filer.Attr{
Crtime: time.Now(),
Mtime: time.Now(),
Mode: os.FileMode(0644),
},
}
store.InsertEntry(ctx, entry)
}
}

40
weed/filer/rocksdb/rocksdb_ttl.go

@ -0,0 +1,40 @@
//+build rocksdb
package rocksdb
import (
"time"
"github.com/tecbot/gorocksdb"
"github.com/chrislusf/seaweedfs/weed/filer"
)
type TTLFilter struct {
skipLevel0 bool
}
func NewTTLFilter() gorocksdb.CompactionFilter {
return &TTLFilter{
skipLevel0: true,
}
}
func (t *TTLFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) {
// decode could be slow, causing write stall
// level >0 sst can run compaction in parallel
if !t.skipLevel0 || level > 0 {
entry := filer.Entry{}
if err := entry.DecodeAttributesAndChunks(val); err == nil {
if entry.TtlSec > 0 &&
entry.Crtime.Add(time.Duration(entry.TtlSec)*time.Second).Before(time.Now()) {
return true, nil
}
}
}
return false, val
}
func (t *TTLFilter) Name() string {
return "TTLFilter"
}

8
weed/filer/stream.go

@ -13,16 +13,16 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
) )
func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
// fmt.Printf("start to stream content for chunks: %+v\n", chunks) // fmt.Printf("start to stream content for chunks: %+v\n", chunks)
chunkViews := ViewFromChunks(masterClient.LookupFileId, chunks, offset, size)
chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size)
fileId2Url := make(map[string][]string) fileId2Url := make(map[string][]string)
for _, chunkView := range chunkViews { for _, chunkView := range chunkViews {
urlStrings, err := masterClient.LookupFileId(chunkView.FileId)
urlStrings, err := masterClient.GetLookupFileIdFunction()(chunkView.FileId)
if err != nil { if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err return err
@ -86,7 +86,7 @@ type ChunkStreamReader struct {
bufferOffset int64 bufferOffset int64
bufferPos int bufferPos int
chunkIndex int chunkIndex int
lookupFileId LookupFileIdFunctionType
lookupFileId wdclient.LookupFileIdFunctionType
} }
var _ = io.ReadSeeker(&ChunkStreamReader{}) var _ = io.ReadSeeker(&ChunkStreamReader{})

110
weed/filesys/dir.go

@ -6,6 +6,7 @@ import (
"math" "math"
"os" "os"
"strings" "strings"
"syscall"
"time" "time"
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
@ -127,28 +128,59 @@ func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.N
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, req.Flags&fuse.OpenExclusive != 0)
if err != nil {
return nil, nil, err
}
var node fs.Node
if request.Entry.IsDirectory {
node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry)
return node, nil, nil
}
node = dir.newFile(req.Name, request.Entry)
file := node.(*File)
fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
return file, fh, nil
}
func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false)
if err != nil {
return nil, err
}
var node fs.Node
node = dir.newFile(req.Name, request.Entry)
return node, nil
}
func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exlusive bool) (*filer_pb.CreateEntryRequest, error) {
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(), Directory: dir.FullPath(),
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: req.Mode&os.ModeDir > 0,
Name: name,
IsDirectory: mode&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{ Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(), Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(), Crtime: time.Now().Unix(),
FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
Uid: req.Uid,
Gid: req.Gid,
FileMode: uint32(mode &^ dir.wfs.option.Umask),
Uid: uid,
Gid: gid,
Collection: dir.wfs.option.Collection, Collection: dir.wfs.option.Collection,
Replication: dir.wfs.option.Replication, Replication: dir.wfs.option.Replication,
TtlSec: dir.wfs.option.TtlSec, TtlSec: dir.wfs.option.TtlSec,
}, },
}, },
OExcl: req.Flags&fuse.OpenExclusive != 0,
OExcl: exlusive,
Signatures: []int32{dir.wfs.signature}, Signatures: []int32{dir.wfs.signature},
} }
glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
glog.V(1).Infof("create %s/%s", dir.FullPath(), name)
if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir.wfs.mapPbIdFromLocalToFiler(request.Entry) dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
@ -157,41 +189,15 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
if strings.Contains(err.Error(), "EEXIST") { if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST return fuse.EEXIST
} }
glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), name, err)
return fuse.EIO return fuse.EIO
} }
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil return nil
}); err != nil {
return nil, nil, err
}
var node fs.Node
if request.Entry.IsDirectory {
node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry)
return node, nil, nil
}
node = dir.newFile(req.Name, request.Entry)
file := node.(*File)
fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
return file, fh, nil
}
func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
if req.Mode&os.ModeNamedPipe != 0 {
glog.V(1).Infof("mknod named pipe %s", req.String())
return nil, fuse.ENOSYS
}
if req.Mode&req.Mode&os.ModeSocket != 0 {
glog.V(1).Infof("mknod socket %s", req.String())
return nil, fuse.ENOSYS
}
// not going to support mknod for normal files either
glog.V(1).Infof("mknod %s", req.String())
return nil, fuse.ENOSYS
})
return request, err
} }
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
@ -308,7 +314,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir} dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir}
ret = append(ret, dirent) ret = append(ret, dirent)
} else { } else {
dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File}
dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: findFileType(uint16(entry.Attributes.FileMode))}
ret = append(ret, dirent) ret = append(ret, dirent)
} }
return nil return nil
@ -319,17 +325,37 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
glog.Errorf("dir ReadDirAll %s: %v", dirPath, err) glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
return nil, fuse.EIO return nil, fuse.EIO
} }
listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool {
processEachEntryFn(entry.ToProtoEntry(), false)
return true
})
if listErr != nil { if listErr != nil {
glog.Errorf("list meta cache: %v", listErr) glog.Errorf("list meta cache: %v", listErr)
return nil, fuse.EIO return nil, fuse.EIO
} }
for _, cachedEntry := range listedEntries {
processEachEntryFn(cachedEntry.ToProtoEntry(), false)
}
return return
} }
func findFileType(mode uint16) fuse.DirentType {
switch mode & (syscall.S_IFMT & 0xffff) {
case syscall.S_IFSOCK:
return fuse.DT_Socket
case syscall.S_IFLNK:
return fuse.DT_Link
case syscall.S_IFREG:
return fuse.DT_File
case syscall.S_IFBLK:
return fuse.DT_Block
case syscall.S_IFDIR:
return fuse.DT_Dir
case syscall.S_IFCHR:
return fuse.DT_Char
case syscall.S_IFIFO:
return fuse.DT_FIFO
}
return fuse.DT_File
}
func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
if !req.Dir { if !req.Dir {

2
weed/filesys/filehandle.go

@ -72,7 +72,7 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
} }
totalRead, err := fh.readFromChunks(buff, req.Offset) totalRead, err := fh.readFromChunks(buff, req.Offset)
if err == nil {
if err == nil || err == io.EOF {
maxStop := fh.readFromDirtyPages(buff, req.Offset) maxStop := fh.readFromDirtyPages(buff, req.Offset)
totalRead = max(maxStop-req.Offset, totalRead) totalRead = max(maxStop-req.Offset, totalRead)
} }

16
weed/filesys/meta_cache/meta_cache.go

@ -117,22 +117,22 @@ func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err err
return mc.localStore.DeleteEntry(ctx, fp) return mc.localStore.DeleteEntry(ctx, fp)
} }
func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer.Entry, error) {
func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) error {
mc.RLock() mc.RLock()
defer mc.RUnlock() defer mc.RUnlock()
if !mc.visitedBoundary.HasVisited(dirPath) { if !mc.visitedBoundary.HasVisited(dirPath) {
return nil, fmt.Errorf("unsynchronized dir: %v", dirPath)
return fmt.Errorf("unsynchronized dir: %v", dirPath)
} }
entries, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
if err != nil {
return nil, err
}
for _, entry := range entries {
_, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool {
mc.mapIdFromFilerToLocal(entry) mc.mapIdFromFilerToLocal(entry)
return eachEntryFunc(entry)
})
if err != nil {
return err
} }
return entries, err
return err
} }
func (mc *MetaCache) Shutdown() { func (mc *MetaCache) Shutdown() {

7
weed/filesys/meta_cache/meta_cache_init.go

@ -19,6 +19,9 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
util.Retry("ReadDirAllEntries", func() error { util.Retry("ReadDirAllEntries", func() error {
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error { err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(dirPath), pbEntry) entry := filer.FromPbEntry(string(dirPath), pbEntry)
if IsHiddenSystemEntry(string(dirPath), entry.Name()) {
return nil
}
if err := mc.doInsertEntry(context.Background(), entry); err != nil { if err := mc.doInsertEntry(context.Background(), entry); err != nil {
glog.V(0).Infof("read %s: %v", entry.FullPath, err) glog.V(0).Infof("read %s: %v", entry.FullPath, err)
return err return err
@ -38,3 +41,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
return return
}) })
} }
func IsHiddenSystemEntry(dir, name string) bool {
return dir == "/" && name == "topics"
}

10
weed/messaging/broker/broker_grpc_server_subscribe.go

@ -101,6 +101,10 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
return nil return nil
} }
// fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime)
for {
if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil { if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil {
if err != io.EOF { if err != io.EOF {
// println("stopping from persisted logs", err.Error()) // println("stopping from persisted logs", err.Error())
@ -112,9 +116,6 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
lastReadTime = time.Unix(0, processedTsNs) lastReadTime = time.Unix(0, processedTsNs)
} }
// fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime)
for {
lastReadTime, err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool { lastReadTime, err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool {
lock.Mutex.Lock() lock.Mutex.Lock()
lock.cond.Wait() lock.cond.Wait()
@ -122,6 +123,9 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
return isConnected return isConnected
}, eachLogEntryFn) }, eachLogEntryFn)
if err != nil { if err != nil {
if err == log_buffer.ResumeFromDiskError {
continue
}
glog.Errorf("processed to %v: %v", lastReadTime, err) glog.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond) time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError { if err != log_buffer.ResumeError {

5
weed/notification/configuration.go

@ -4,7 +4,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/spf13/viper"
) )
type MessageQueue interface { type MessageQueue interface {
@ -21,7 +20,7 @@ var (
Queue MessageQueue Queue MessageQueue
) )
func LoadConfiguration(config *viper.Viper, prefix string) {
func LoadConfiguration(config *util.ViperProxy, prefix string) {
if config == nil { if config == nil {
return return
@ -43,7 +42,7 @@ func LoadConfiguration(config *viper.Viper, prefix string) {
} }
func validateOneEnabledQueue(config *viper.Viper) {
func validateOneEnabledQueue(config *util.ViperProxy) {
enabledQueue := "" enabledQueue := ""
for _, queue := range MessageQueues { for _, queue := range MessageQueues {
if config.GetBool(queue.GetName() + ".enabled") { if config.GetBool(queue.GetName() + ".enabled") {

3
weed/operation/submit.go

@ -32,7 +32,7 @@ type FilePart struct {
type SubmitResult struct { type SubmitResult struct {
FileName string `json:"fileName,omitempty"` FileName string `json:"fileName,omitempty"`
FileUrl string `json:"fileUrl,omitempty"`
FileUrl string `json:"url,omitempty"`
Fid string `json:"fid,omitempty"` Fid string `json:"fid,omitempty"`
Size uint32 `json:"size,omitempty"` Size uint32 `json:"size,omitempty"`
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`
@ -69,6 +69,7 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
file.Replication = replication file.Replication = replication
file.Collection = collection file.Collection = collection
file.DataCenter = dataCenter file.DataCenter = dataCenter
file.Ttl = ttl
results[index].Size, err = file.Upload(maxMB, master, usePublicUrl, ret.Auth, grpcDialOption) results[index].Size, err = file.Upload(maxMB, master, usePublicUrl, ret.Auth, grpcDialOption)
if err != nil { if err != nil {
results[index].Error = err.Error() results[index].Error = err.Error()

3
weed/replication/sink/filersink/filer_sink.go

@ -3,6 +3,7 @@ package filersink
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -206,7 +207,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
}) })
} }
func compareChunks(lookupFileIdFn filer.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks) aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
if aErr != nil { if aErr != nil {
return nil, nil, aErr return nil, nil, aErr

31
weed/s3api/auth_credentials.go

@ -156,7 +156,36 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
// check whether the request has valid access keys // check whether the request has valid access keys
func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) (*Identity, s3err.ErrorCode) { func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) (*Identity, s3err.ErrorCode) {
identity, s3Err := iam.authUser(r)
var identity *Identity
var s3Err s3err.ErrorCode
var found bool
switch getRequestAuthType(r) {
case authTypeStreamingSigned:
return identity, s3err.ErrNone
case authTypeUnknown:
glog.V(3).Infof("unknown auth type")
return identity, s3err.ErrAccessDenied
case authTypePresignedV2, authTypeSignedV2:
glog.V(3).Infof("v2 auth type")
identity, s3Err = iam.isReqAuthenticatedV2(r)
case authTypeSigned, authTypePresigned:
glog.V(3).Infof("v4 auth type")
identity, s3Err = iam.reqSignatureV4Verify(r)
case authTypePostPolicy:
glog.V(3).Infof("post policy auth type")
return identity, s3err.ErrNone
case authTypeJWT:
glog.V(3).Infof("jwt auth type")
return identity, s3err.ErrNotImplemented
case authTypeAnonymous:
identity, found = iam.lookupAnonymous()
if !found {
return identity, s3err.ErrAccessDenied
}
default:
return identity, s3err.ErrNotImplemented
}
if s3Err != s3err.ErrNone { if s3Err != s3err.ErrNone {
return identity, s3Err return identity, s3Err
} }

6
weed/s3api/s3api_objects_list_handlers.go

@ -71,7 +71,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
ContinuationToken: continuationToken, ContinuationToken: continuationToken,
Delimiter: response.Delimiter, Delimiter: response.Delimiter,
IsTruncated: response.IsTruncated, IsTruncated: response.IsTruncated,
KeyCount: len(response.Contents),
KeyCount: len(response.Contents) + len(response.CommonPrefixes),
MaxKeys: response.MaxKeys, MaxKeys: response.MaxKeys,
NextContinuationToken: response.NextMarker, NextContinuationToken: response.NextMarker,
Prefix: response.Prefix, Prefix: response.Prefix,
@ -238,7 +238,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
return return
} }
} }
if counter >= maxKeys {
if counter >= maxKeys + 1 {
isTruncated = true isTruncated = true
return return
} }
@ -264,9 +264,11 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
} }
} else { } else {
var isEmpty bool var isEmpty bool
if !s3a.option.AllowEmptyFolder {
if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil { if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
glog.Errorf("check empty folder %s: %v", dir, err) glog.Errorf("check empty folder %s: %v", dir, err)
} }
}
if !isEmpty { if !isEmpty {
eachEntryFn(dir, entry) eachEntryFn(dir, entry)
counter++ counter++

1
weed/s3api/s3api_server.go

@ -20,6 +20,7 @@ type S3ApiServerOption struct {
DomainName string DomainName string
BucketsPath string BucketsPath string
GrpcDialOption grpc.DialOption GrpcDialOption grpc.DialOption
AllowEmptyFolder bool
} }
type S3ApiServer struct { type S3ApiServer struct {

7
weed/security/tls.go

@ -3,17 +3,16 @@ package security
import ( import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"github.com/chrislusf/seaweedfs/weed/util"
"io/ioutil" "io/ioutil"
"github.com/spf13/viper"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
) )
func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
func LoadServerTLS(config *util.ViperProxy, component string) grpc.ServerOption {
if config == nil { if config == nil {
return nil return nil
} }
@ -40,7 +39,7 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
return grpc.Creds(ta) return grpc.Creds(ta)
} }
func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption {
func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption {
if config == nil { if config == nil {
return grpc.WithInsecure() return grpc.WithInsecure()
} }

9
weed/server/common.go

@ -233,12 +233,12 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file
} }
} }
func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64, httpStatusCode int) error) {
rangeReq := r.Header.Get("Range") rangeReq := r.Header.Get("Range")
if rangeReq == "" { if rangeReq == "" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
if err := writeFn(w, 0, totalSize); err != nil {
if err := writeFn(w, 0, totalSize, 0); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
@ -277,9 +277,8 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
ra := ranges[0] ra := ranges[0]
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
w.Header().Set("Content-Range", ra.contentRange(totalSize)) w.Header().Set("Content-Range", ra.contentRange(totalSize))
w.WriteHeader(http.StatusPartialContent)
err = writeFn(w, ra.start, ra.length)
err = writeFn(w, ra.start, ra.length, http.StatusPartialContent)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
@ -307,7 +306,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
pw.CloseWithError(e) pw.CloseWithError(e)
return return
} }
if e = writeFn(part, ra.start, ra.length); e != nil {
if e = writeFn(part, ra.start, ra.length, 0); e != nil {
pw.CloseWithError(e) pw.CloseWithError(e)
return return
} }

44
weed/server/filer_grpc_server.go

@ -44,7 +44,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
}, nil }, nil
} }
func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error {
func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) (err error) {
glog.V(4).Infof("ListEntries %v", req) glog.V(4).Infof("ListEntries %v", req)
@ -60,23 +60,12 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
lastFileName := req.StartFromFileName lastFileName := req.StartFromFileName
includeLastFile := req.InclusiveStartFrom includeLastFile := req.InclusiveStartFrom
var listErr error
for limit > 0 { for limit > 0 {
entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit, req.Prefix)
if err != nil {
return err
}
if len(entries) == 0 {
return nil
}
includeLastFile = false
for _, entry := range entries {
lastFileName = entry.Name()
if err := stream.Send(&filer_pb.ListEntriesResponse{
var hasEntries bool
lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", func(entry *filer.Entry) bool {
hasEntries = true
if err = stream.Send(&filer_pb.ListEntriesResponse{
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
Name: entry.Name(), Name: entry.Name(),
IsDirectory: entry.IsDirectory(), IsDirectory: entry.IsDirectory(),
@ -88,19 +77,28 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
Content: entry.Content, Content: entry.Content,
}, },
}); err != nil { }); err != nil {
return err
return false
} }
limit-- limit--
if limit == 0 { if limit == 0 {
return nil
}
return false
} }
return true
})
if len(entries) < paginationLimit {
break
if listErr != nil {
return listErr
}
if err != nil {
return err
}
if !hasEntries {
return nil
} }
includeLastFile = false
} }
return nil return nil
@ -326,7 +324,7 @@ func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntr
err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures) err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures)
resp = &filer_pb.DeleteEntryResponse{} resp = &filer_pb.DeleteEntryResponse{}
if err != nil {
if err != nil && err != filer_pb.ErrNotFound {
resp.Error = err.Error() resp.Error = err.Error()
} }
return resp, nil return resp, nil

4
weed/server/filer_grpc_server_rename.go

@ -75,7 +75,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
includeLastFile := false includeLastFile := false
for { for {
entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "")
entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "")
if err != nil { if err != nil {
return err return err
} }
@ -90,7 +90,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
return err return err
} }
} }
if len(entries) < 1024 {
if !hasMore {
break break
} }
} }

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save