Browse Source

Merge branch 'upstream_master' into store_s3cred

# Conflicts:
#	weed/s3api/filer_util.go
pull/1596/head
Konstantin Lebedev 4 years ago
parent
commit
27e73de797
  1. 38
      .github/workflows/release.yml
  2. 14
      README.md
  3. 3
      docker/Makefile
  4. 95
      docker/local-registry-compose.yml
  5. 7
      go.mod
  6. 14
      go.sum
  7. 2
      k8s/seaweedfs/Chart.yaml
  8. 1856
      k8s/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
  9. 2
      k8s/seaweedfs/templates/filer-statefulset.yaml
  10. 2
      k8s/seaweedfs/templates/s3-deployment.yaml
  11. 4
      k8s/seaweedfs/templates/s3-service.yaml
  12. 2
      k8s/seaweedfs/templates/s3-servicemonitor.yaml
  13. 20
      k8s/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml
  14. 1352
      k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml
  15. 2
      k8s/seaweedfs/templates/volume-service.yaml
  16. 2
      k8s/seaweedfs/templates/volume-servicemonitor.yaml
  17. 2
      k8s/seaweedfs/templates/volume-statefulset.yaml
  18. 6
      k8s/seaweedfs/values.yaml
  19. 7
      other/java/client/pom.xml
  20. 2
      other/java/client/pom.xml.deploy
  21. 2
      other/java/client/pom_debug.xml
  22. 1
      other/java/client/src/main/java/seaweedfs/client/ChunkCache.java
  23. 9
      other/java/client/src/main/java/seaweedfs/client/FilerClient.java
  24. 17
      other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
  25. 36
      other/java/client/src/main/java/seaweedfs/client/VolumeIdCache.java
  26. 20
      other/java/client/src/main/proto/filer.proto
  27. 32
      other/java/examples/pom.xml
  28. 54
      other/java/examples/src/main/java/com/seaweedfs/examples/UnzipFile.java
  29. 46
      other/java/examples/src/main/java/com/seaweedfs/examples/WatchFiles.java
  30. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  31. 2
      other/java/hdfs2/pom.xml
  32. 9
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
  33. 8
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  34. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  35. 2
      other/java/hdfs3/pom.xml
  36. 9
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
  37. 8
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  38. 3
      unmaintained/s3/benchmark/hsbench.sh
  39. 2
      weed/command/benchmark.go
  40. 2
      weed/command/filer.go
  41. 2
      weed/command/filer_copy.go
  42. 4
      weed/command/scaffold.go
  43. 6
      weed/filer/filer.go
  44. 148
      weed/filer/filer_conf.go
  45. 34
      weed/filer/filer_conf_test.go
  46. 2
      weed/filer/filer_delete_entry.go
  47. 44
      weed/filer/filer_deletion.go
  48. 12
      weed/filer/filer_notify_append.go
  49. 61
      weed/filer/filer_on_meta_event.go
  50. 4
      weed/filer/leveldb/leveldb_store_test.go
  51. 4
      weed/filer/leveldb2/leveldb2_store_test.go
  52. 3
      weed/filer/meta_aggregator.go
  53. 4
      weed/filesys/dirty_page.go
  54. 34
      weed/filesys/file.go
  55. 7
      weed/filesys/filehandle.go
  56. 55
      weed/operation/assign_file_id.go
  57. 8
      weed/operation/upload_content.go
  58. 20
      weed/pb/filer.proto
  59. 497
      weed/pb/filer_pb/filer.pb.go
  60. 26
      weed/pb/filer_pb/filer_pb_helper.go
  61. 6
      weed/pb/master.proto
  62. 1001
      weed/pb/master_pb/master.pb.go
  63. 44
      weed/s3api/auth_credentials.go
  64. 10
      weed/s3api/filer_util.go
  65. 6
      weed/s3api/http/header.go
  66. 78
      weed/s3api/s3api_bucket_handlers.go
  67. 3
      weed/s3api/s3api_object_copy_handlers.go
  68. 18
      weed/s3api/s3api_object_handlers.go
  69. 16
      weed/s3api/s3api_object_multipart_handlers.go
  70. 3
      weed/server/common.go
  71. 60
      weed/server/filer_grpc_server.go
  72. 6
      weed/server/filer_server.go
  73. 8
      weed/server/filer_server_handlers.go
  74. 7
      weed/server/filer_server_handlers_read.go
  75. 102
      weed/server/filer_server_handlers_tagging.go
  76. 96
      weed/server/filer_server_handlers_write.go
  77. 55
      weed/server/filer_server_handlers_write_autochunk.go
  78. 12
      weed/server/filer_server_handlers_write_cipher.go
  79. 2
      weed/server/master_grpc_server.go
  80. 2
      weed/server/master_server.go
  81. 1
      weed/server/volume_grpc_client_to_master.go
  82. 151
      weed/shell/command_fs_configure.go
  83. 22
      weed/shell/command_volume_fix_replication.go
  84. 2
      weed/shell/commands.go
  85. 3
      weed/storage/disk_location.go
  86. 7
      weed/storage/store.go
  87. 1
      weed/topology/store_replicate.go
  88. 2
      weed/topology/volume_layout.go
  89. 2
      weed/util/constants.go
  90. 18
      weed/util/http_util.go
  91. 10
      weed/util/retry.go
  92. 7
      weed/wdclient/masterclient.go
  93. 10
      weed/wdclient/vid_map.go
  94. 2
      weed/wdclient/vid_map_test.go

38
.github/workflows/release.yml

@ -3,8 +3,6 @@ name: Release
on: on:
push: push:
branches: [ master ] branches: [ master ]
pull_request:
branches: [ master ]
jobs: jobs:
@ -16,14 +14,28 @@ jobs:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v2 uses: actions/checkout@v2
# - name: Go Release Binaries
# uses: wangyoucao577/go-release-action@v1.8
# with:
# github_token: ${{ secrets.GITHUB_TOKEN }}
# goos: linux # default is
# goarch: amd64 # default is
# build_flags: -tags 5BytesOffset # optional, default is
# ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# # Where to run `go build .`
# project_path: weed
# binary_name: weed-large-disk
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.10
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: linux # default is
goarch: amd64 # default is
release_tag: dev
overwrite: true
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.10
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: linux # default is
goarch: amd64 # default is
release_tag: dev
overwrite: true
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

14
README.md

@ -58,11 +58,12 @@ Your support will be really appreciated by me and other supporters!
Table of Contents Table of Contents
================= =================
* [Quick Start](#quick-start)
* [Introduction](#introduction) * [Introduction](#introduction)
* [Features](#features) * [Features](#features)
* [Additional Features](#additional-features) * [Additional Features](#additional-features)
* [Filer Features](#filer-features) * [Filer Features](#filer-features)
* [Example Usage](#example-usage)
* [Example: Using Seaweed Object Store](#example-Using-Seaweed-Object-Store)
* [Architecture](#architecture) * [Architecture](#architecture)
* [Compared to Other File Systems](#compared-to-other-file-systems) * [Compared to Other File Systems](#compared-to-other-file-systems)
* [Compared to HDFS](#compared-to-hdfs) * [Compared to HDFS](#compared-to-hdfs)
@ -75,6 +76,13 @@ Table of Contents
* [Benchmark](#Benchmark) * [Benchmark](#Benchmark)
* [License](#license) * [License](#license)
## Quick Start ##
* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
Also, to increase capacity, just add more volume servers by `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally or a different machine. That is it!
## Introduction ## ## Introduction ##
SeaweedFS is a simple and highly scalable distributed file system. There are two objectives: SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:
@ -141,10 +149,6 @@ On top of the object store, optional [Filer] can support directories and POSIX a
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
## Quick Start ##
* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
* Run `weed server -s3` to start one master, one volume server, one filer, and one S3 gateway.
## Example: Using Seaweed Object Store ## ## Example: Using Seaweed Object Store ##
By default, the master node runs on port 9333, and the volume nodes run on port 8080. By default, the master node runs on port 9333, and the volume nodes run on port 8080.

3
docker/Makefile

@ -12,6 +12,9 @@ build:
dev: build dev: build
docker-compose -f local-dev-compose.yml -p seaweedfs up docker-compose -f local-dev-compose.yml -p seaweedfs up
dev_registry: build
docker-compose -f local-registry-compose.yml -p seaweedfs up
cluster: build cluster: build
docker-compose -f local-cluster-compose.yml -p seaweedfs up docker-compose -f local-cluster-compose.yml -p seaweedfs up

95
docker/local-registry-compose.yml

@ -0,0 +1,95 @@
version: '2'
services:
master:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master -volumeSizeLimitMB=1024"
volume:
image: chrislusf/seaweedfs:local
ports:
- 8080:8080
- 18080:18080
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
volumes:
- type: bind
source: /Volumes/mobile_disk/data
target: /data
depends_on:
- master
filer:
image: chrislusf/seaweedfs:local
ports:
- 8888:8888
- 18888:18888
command: 'filer -master="master:9333"'
depends_on:
- master
- volume
s3:
image: chrislusf/seaweedfs:local
ports:
- 8333:8333
command: '-v 9 s3 -filer="filer:8888"'
depends_on:
- master
- volume
- filer
minio:
image: minio/minio
ports:
- 9000:9000
command: 'minio server /data'
environment:
MINIO_ACCESS_KEY: "some_access_key1"
MINIO_SECRET_KEY: "some_secret_key1"
depends_on:
- master
registry1:
image: registry:2
environment:
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
REGISTRY_LOG_LEVEL: "debug"
REGISTRY_STORAGE: "s3"
REGISTRY_STORAGE_S3_REGION: "us-east-1"
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333"
REGISTRY_STORAGE_S3_BUCKET: "registry"
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
REGISTRY_STORAGE_S3_V4AUTH: "true"
REGISTRY_STORAGE_S3_SECURE: "false"
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
REGISTRY_STORAGE_DELETE_ENABLED: "true"
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
REGISTRY_VALIDATION_DISABLED: "true"
ports:
- 5001:5001
depends_on:
- s3
- minio
registry2:
image: registry:2
environment:
REGISTRY_HTTP_ADDR: "0.0.0.0:5002" # minio
REGISTRY_LOG_LEVEL: "debug"
REGISTRY_STORAGE: "s3"
REGISTRY_STORAGE_S3_REGION: "us-east-1"
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://minio:9000"
REGISTRY_STORAGE_S3_BUCKET: "registry"
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
REGISTRY_STORAGE_S3_V4AUTH: "true"
REGISTRY_STORAGE_S3_SECURE: "false"
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
REGISTRY_STORAGE_DELETE_ENABLED: "true"
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
REGISTRY_VALIDATION_DISABLED: "true"
ports:
- 5002:5002
depends_on:
- s3
- minio

7
go.mod

@ -11,7 +11,7 @@ require (
github.com/aws/aws-sdk-go v1.33.5 github.com/aws/aws-sdk-go v1.33.5
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011
github.com/chrislusf/raft v1.0.3
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/disintegration/imaging v1.6.2 github.com/disintegration/imaging v1.6.2
@ -24,6 +24,7 @@ require (
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/fclairamb/ftpserverlib v0.8.0 github.com/fclairamb/ftpserverlib v0.8.0
github.com/frankban/quicktest v1.7.2 // indirect github.com/frankban/quicktest v1.7.2 // indirect
github.com/go-errors/errors v1.1.1 // indirect
github.com/go-redis/redis v6.15.7+incompatible github.com/go-redis/redis v6.15.7+incompatible
github.com/go-sql-driver/mysql v1.5.0 github.com/go-sql-driver/mysql v1.5.0
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
@ -45,6 +46,7 @@ require (
github.com/klauspost/reedsolomon v1.9.2 github.com/klauspost/reedsolomon v1.9.2
github.com/kurin/blazer v0.5.3 github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.2.0 github.com/lib/pq v1.2.0
github.com/lunixbochs/vtclean v1.0.0 // indirect
github.com/magiconair/properties v1.8.1 // indirect github.com/magiconair/properties v1.8.1 // indirect
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect github.com/mattn/go-runewidth v0.0.4 // indirect
@ -67,6 +69,9 @@ require (
github.com/tidwall/gjson v1.3.2 github.com/tidwall/gjson v1.3.2
github.com/tidwall/match v1.0.1 github.com/tidwall/match v1.0.1
github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/bytebufferpool v1.0.0
github.com/viant/assertly v0.5.4 // indirect
github.com/viant/ptrie v0.3.0
github.com/viant/toolbox v0.33.2 // indirect
github.com/willf/bitset v1.1.10 // indirect github.com/willf/bitset v1.1.10 // indirect
github.com/willf/bloom v2.0.3+incompatible github.com/willf/bloom v2.0.3+incompatible
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect

14
go.sum

@ -84,8 +84,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011 h1:vN1GvfLgDg8kIPCdhuVKAjlYpxG1B86jiKejB6MC/Q0=
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/chrislusf/raft v1.0.3 h1:11YrnzJtVa5z7m9lhY2p8VcPHoUlC1UswyoAo+U1m1k=
github.com/chrislusf/raft v1.0.3/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@ -159,6 +159,8 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg=
github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -386,6 +388,8 @@ github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@ -621,6 +625,12 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/viant/assertly v0.5.4 h1:5Hh4U3pLZa6uhCFAGpYOxck/8l9TZczEzoHNfJAhHEQ=
github.com/viant/assertly v0.5.4/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/ptrie v0.3.0 h1:SDaRd7Gqr1+ItCNz0GpTxRdK21nOfqjV6YtBm9jGlMY=
github.com/viant/ptrie v0.3.0/go.mod h1:VguMnbGfz95Zw+V5VarYSqtqslDxJbOv++xLzxkMhec=
github.com/viant/toolbox v0.33.2 h1:Av844IIeGz81gT672qZemyptGfbrcxqGymA5RFnIPjE=
github.com/viant/toolbox v0.33.2/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=

2
k8s/seaweedfs/Chart.yaml

@ -1,4 +1,4 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
version: 2.08
version: 2.11

1856
k8s/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
File diff suppressed because it is too large
View File

2
k8s/seaweedfs/templates/filer-statefulset.yaml

@ -101,7 +101,7 @@ spec:
-port={{ .Values.filer.port }} \ -port={{ .Values.filer.port }} \
{{- if .Values.filer.metricsPort }} {{- if .Values.filer.metricsPort }}
-metricsPort {{ .Values.filer.metricsPort }} \ -metricsPort {{ .Values.filer.metricsPort }} \
{{- end }}}
{{- end }}
{{- if .Values.filer.redirectOnRead }} {{- if .Values.filer.redirectOnRead }}
-redirectOnRead \ -redirectOnRead \
{{- end }} {{- end }}

2
k8s/seaweedfs/templates/s3-deployment.yaml

@ -73,7 +73,7 @@ spec:
-port={{ .Values.s3.port }} \ -port={{ .Values.s3.port }} \
{{- if .Values.s3.metricsPort }} {{- if .Values.s3.metricsPort }}
-metricsPort {{ .Values.s3.metricsPort }} \ -metricsPort {{ .Values.s3.metricsPort }} \
{{- end }}}
{{- end }}
{{- if .Values.global.enableSecurity }} {{- if .Values.global.enableSecurity }}
-cert.file=/usr/local/share/ca-certificates/client/tls.crt \ -cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-key.file=/usr/local/share/ca-certificates/client/tls.key \ -key.file=/usr/local/share/ca-certificates/client/tls.key \

4
k8s/seaweedfs/templates/s3-service.yaml

@ -14,10 +14,10 @@ spec:
protocol: TCP protocol: TCP
{{- if .Values.s3.metricsPort }} {{- if .Values.s3.metricsPort }}
- name: "swfs-s3-metrics" - name: "swfs-s3-metrics"
port: {{ .Values.filer.s3 }}
port: {{ .Values.s3.metricsPort }}
targetPort: {{ .Values.s3.metricsPort }} targetPort: {{ .Values.s3.metricsPort }}
protocol: TCP protocol: TCP
{{- end }}}
{{- end }}
selector: selector:
app: {{ template "seaweedfs.name" . }} app: {{ template "seaweedfs.name" . }}
component: s3 component: s3

2
k8s/seaweedfs/templates/s3-servicemonitor.yaml

@ -15,4 +15,4 @@ spec:
selector: selector:
app: {{ template "seaweedfs.name" . }} app: {{ template "seaweedfs.name" . }}
component: s3 component: s3
{{- end }}}
{{- end }}

20
k8s/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml

@ -0,0 +1,20 @@
{{- if .Values.global.monitoring.enabled }}
{{- $files := .Files.Glob "dashboards/*.json" }}
{{- if $files }}
apiVersion: v1
kind: ConfigMapList
items:
{{- range $path, $fileContents := $files }}
{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }}
- apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }}
namespace: {{ $.Release.Namespace }}
labels:
grafana_dashboard: "1"
data:
{{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }}
{{- end }}
{{- end }}
{{- end }}

1352
k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml
File diff suppressed because it is too large
View File

2
k8s/seaweedfs/templates/volume-service.yaml

@ -22,7 +22,7 @@ spec:
port: {{ .Values.volume.metricsPort }} port: {{ .Values.volume.metricsPort }}
targetPort: {{ .Values.volume.metricsPort }} targetPort: {{ .Values.volume.metricsPort }}
protocol: TCP protocol: TCP
{{- end }}}
{{- end }}
selector: selector:
app: {{ template "seaweedfs.name" . }} app: {{ template "seaweedfs.name" . }}
component: volume component: volume

2
k8s/seaweedfs/templates/volume-servicemonitor.yaml

@ -15,4 +15,4 @@ spec:
selector: selector:
app: {{ template "seaweedfs.name" . }} app: {{ template "seaweedfs.name" . }}
component: volume component: volume
{{- end }}}
{{- end }}

2
k8s/seaweedfs/templates/volume-statefulset.yaml

@ -78,7 +78,7 @@ spec:
-port={{ .Values.volume.port }} \ -port={{ .Values.volume.port }} \
{{- if .Values.volume.metricsPort }} {{- if .Values.volume.metricsPort }}
-metricsPort {{ .Values.volume.metricsPort }} \ -metricsPort {{ .Values.volume.metricsPort }} \
{{- end }}}
{{- end }}
-dir={{ .Values.volume.dir }} \ -dir={{ .Values.volume.dir }} \
-max={{ .Values.volume.maxVolumes }} \ -max={{ .Values.volume.maxVolumes }} \
{{- if .Values.volume.rack }} {{- if .Values.volume.rack }}

6
k8s/seaweedfs/values.yaml

@ -4,7 +4,7 @@ global:
registry: "" registry: ""
repository: "" repository: ""
imageName: chrislusf/seaweedfs imageName: chrislusf/seaweedfs
imageTag: "2.08"
imageTag: "2.11"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
imagePullSecrets: imagepullsecret imagePullSecrets: imagepullsecret
restartPolicy: Always restartPolicy: Always
@ -302,10 +302,6 @@ filer:
WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false" WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false"
# directories under this folder will be automatically creating a separate bucket # directories under this folder will be automatically creating a separate bucket
WEED_FILER_BUCKETS_FOLDER: "/buckets" WEED_FILER_BUCKETS_FOLDER: "/buckets"
# directories under this folder will be store message queue data
WEED_FILER_QUEUES_FOLDER: "/queues"
# WEED_FILER_OPTIONS_BUCKETS_FSYNC a list of buckets names with all write requests fsync=true
WEED_FILER_OPTIONS_BUCKETS_FSYNC: []
s3: s3:
enabled: true enabled: true

7
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.5.2</version>
<version>1.5.6</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>
@ -68,6 +68,11 @@
<version>4.13.1</version> <version>4.13.1</version>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>javax.annotation</groupId>
<artifactId>javax.annotation-api</artifactId>
<version>1.3.2</version>
</dependency>
</dependencies> </dependencies>
<distributionManagement> <distributionManagement>

2
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.5.2</version>
<version>1.5.6</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

2
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.5.2</version>
<version>1.5.6</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

1
other/java/client/src/main/java/seaweedfs/client/ChunkCache.java

@ -15,7 +15,6 @@ public class ChunkCache {
} }
this.cache = CacheBuilder.newBuilder() this.cache = CacheBuilder.newBuilder()
.maximumSize(maxEntries) .maximumSize(maxEntries)
.weakValues()
.expireAfterAccess(1, TimeUnit.HOURS) .expireAfterAccess(1, TimeUnit.HOURS)
.build(); .build();
} }

9
other/java/client/src/main/java/seaweedfs/client/FilerClient.java

@ -333,4 +333,13 @@ public class FilerClient {
return true; return true;
} }
public Iterator<FilerProto.SubscribeMetadataResponse> watch(String prefix, String clientName, long sinceNs) {
return filerGrpcClient.getBlockingStub().subscribeMetadata(FilerProto.SubscribeMetadataRequest.newBuilder()
.setPathPrefix(prefix)
.setClientName(clientName)
.setSinceNs(sinceNs)
.build()
);
}
} }

17
other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java

@ -19,6 +19,7 @@ public class SeaweedRead {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class); private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
static ChunkCache chunkCache = new ChunkCache(4); static ChunkCache chunkCache = new ChunkCache(4);
static VolumeIdCache volumeIdCache = new VolumeIdCache(4 * 1024);
// returns bytesRead // returns bytesRead
public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals, public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals,
@ -27,16 +28,28 @@ public class SeaweedRead {
List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength); List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength);
Map<String, FilerProto.Locations> knownLocations = new HashMap<>();
FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder(); FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder();
for (ChunkView chunkView : chunkViews) { for (ChunkView chunkView : chunkViews) {
String vid = parseVolumeId(chunkView.fileId); String vid = parseVolumeId(chunkView.fileId);
FilerProto.Locations locations = volumeIdCache.getLocations(vid);
if (locations == null) {
lookupRequest.addVolumeIds(vid); lookupRequest.addVolumeIds(vid);
} else {
knownLocations.put(vid, locations);
}
} }
if (lookupRequest.getVolumeIdsCount() > 0) {
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
.getBlockingStub().lookupVolume(lookupRequest.build()); .getBlockingStub().lookupVolume(lookupRequest.build());
Map<String, FilerProto.Locations> vid2Locations = lookupResponse.getLocationsMapMap(); Map<String, FilerProto.Locations> vid2Locations = lookupResponse.getLocationsMapMap();
for (Map.Entry<String, FilerProto.Locations> entry : vid2Locations.entrySet()) {
volumeIdCache.setLocations(entry.getKey(), entry.getValue());
knownLocations.put(entry.getKey(), entry.getValue());
}
}
//TODO parallel this //TODO parallel this
long readCount = 0; long readCount = 0;
@ -50,7 +63,7 @@ public class SeaweedRead {
startOffset += gap; startOffset += gap;
} }
FilerProto.Locations locations = vid2Locations.get(parseVolumeId(chunkView.fileId));
FilerProto.Locations locations = knownLocations.get(parseVolumeId(chunkView.fileId));
if (locations == null || locations.getLocationsCount() == 0) { if (locations == null || locations.getLocationsCount() == 0) {
LOG.error("failed to locate {}", chunkView.fileId); LOG.error("failed to locate {}", chunkView.fileId);
// log here! // log here!

36
other/java/client/src/main/java/seaweedfs/client/VolumeIdCache.java

@ -0,0 +1,36 @@
package seaweedfs.client;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import java.util.concurrent.TimeUnit;
public class VolumeIdCache {
private Cache<String, FilerProto.Locations> cache = null;
public VolumeIdCache(int maxEntries) {
if (maxEntries == 0) {
return;
}
this.cache = CacheBuilder.newBuilder()
.maximumSize(maxEntries)
.expireAfterAccess(5, TimeUnit.MINUTES)
.build();
}
public FilerProto.Locations getLocations(String volumeId) {
if (this.cache == null) {
return null;
}
return this.cache.getIfPresent(volumeId);
}
public void setLocations(String volumeId, FilerProto.Locations locations) {
if (this.cache == null) {
return;
}
this.cache.put(volumeId, locations);
}
}

20
other/java/client/src/main/proto/filer.proto

@ -348,3 +348,23 @@ message KvPutRequest {
message KvPutResponse { message KvPutResponse {
string error = 1; string error = 1;
} }
// path-based configurations
message FilerConf {
int32 version = 1;
message PathConf {
string location_prefix = 1;
string collection = 2;
string replication = 3;
string ttl = 4;
enum DiskType {
NONE = 0;
HDD = 1;
SSD = 2;
}
DiskType disk_type = 5;
bool fsync = 6;
uint32 volume_growth_count = 7;
}
repeated PathConf locations = 2;
}

32
other/java/examples/pom.xml

@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.example</groupId>
<artifactId>unzip</artifactId>
<version>1.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.6</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-hadoop2-client</artifactId>
<version>1.5.6</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.9.2</version>
<scope>compile</scope>
</dependency>
</dependencies>
</project>

54
other/java/examples/src/main/java/com/seaweedfs/examples/UnzipFile.java

@ -0,0 +1,54 @@
package com.seaweedfs.examples;
import seaweed.hdfs.SeaweedInputStream;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerGrpcClient;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
public class UnzipFile {
public static void main(String[] args) throws IOException {
FilerGrpcClient filerGrpcClient = new FilerGrpcClient("localhost", 18888);
FilerClient filerClient = new FilerClient(filerGrpcClient);
long startTime = System.currentTimeMillis();
parseZip("/Users/chris/tmp/test.zip");
long startTime2 = System.currentTimeMillis();
long localProcessTime = startTime2 - startTime;
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(
filerGrpcClient,
new org.apache.hadoop.fs.FileSystem.Statistics(""),
"/",
filerClient.lookupEntry("/", "test.zip")
);
parseZip(seaweedInputStream);
long swProcessTime = System.currentTimeMillis() - startTime2;
System.out.println("Local time: " + localProcessTime);
System.out.println("SeaweedFS time: " + swProcessTime);
}
public static void parseZip(String filename) throws IOException {
FileInputStream fileInputStream = new FileInputStream(filename);
parseZip(fileInputStream);
}
public static void parseZip(InputStream is) throws IOException {
ZipInputStream zin = new ZipInputStream(is);
ZipEntry ze;
while ((ze = zin.getNextEntry()) != null) {
System.out.println(ze.getName());
}
}
}

46
other/java/examples/src/main/java/com/seaweedfs/examples/WatchFiles.java

@ -0,0 +1,46 @@
package com.seaweedfs.examples;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerProto;
import java.io.IOException;
import java.util.Date;
import java.util.Iterator;
public class WatchFiles {
public static void main(String[] args) throws IOException {
FilerClient filerClient = new FilerClient("localhost", 18888);
long sinceNs = (System.currentTimeMillis() - 3600 * 1000) * 1000000L;
Iterator<FilerProto.SubscribeMetadataResponse> watch = filerClient.watch(
"/buckets",
"exampleClientName",
sinceNs
);
System.out.println("Connected to filer, subscribing from " + new Date());
while (watch.hasNext()) {
FilerProto.SubscribeMetadataResponse event = watch.next();
FilerProto.EventNotification notification = event.getEventNotification();
if (!event.getDirectory().equals(notification.getNewParentPath())) {
// move an entry to a new directory, possibly with a new name
if (notification.hasOldEntry() && notification.hasNewEntry()) {
System.out.println("moved " + event.getDirectory() + "/" + notification.getOldEntry().getName() + " to " + notification.getNewParentPath() + "/" + notification.getNewEntry().getName());
} else {
System.out.println("this should not happen.");
}
} else if (notification.hasNewEntry() && !notification.hasOldEntry()) {
System.out.println("created entry " + event.getDirectory() + "/" + notification.getNewEntry().getName());
} else if (!notification.hasNewEntry() && notification.hasOldEntry()) {
System.out.println("deleted entry " + event.getDirectory() + "/" + notification.getOldEntry().getName());
} else if (notification.hasNewEntry() && notification.hasOldEntry()) {
System.out.println("updated entry " + event.getDirectory() + "/" + notification.getNewEntry().getName());
}
}
}
}

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -301,7 +301,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<seaweedfs.client.version>1.5.6</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<seaweedfs.client.version>1.5.6</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>

9
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java

@ -25,6 +25,7 @@ public class SeaweedFileSystem extends FileSystem {
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size"; public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024; public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
@ -65,6 +66,12 @@ public class SeaweedFileSystem extends FileSystem {
} }
@Override
public void close() throws IOException {
super.close();
this.seaweedFileSystemStore.close();
}
@Override @Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException { public FSDataInputStream open(Path path, int bufferSize) throws IOException {
@ -91,7 +98,7 @@ public class SeaweedFileSystem extends FileSystem {
path = qualify(path); path = qualify(path);
try { try {
String replicaPlacement = String.format("%03d", replication - 1);
String replicaPlacement = this.getConf().get(FS_SEAWEED_REPLICATION, String.format("%03d", replication - 1));
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement); OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement);
return new FSDataOutputStream(outputStream, statistics); return new FSDataOutputStream(outputStream, statistics);

8
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -36,6 +36,14 @@ public class SeaweedFileSystemStore {
this.conf = conf; this.conf = conf;
} }
public void close() {
try {
this.filerGrpcClient.shutdown();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public static String getParentDirectory(Path path) { public static String getParentDirectory(Path path) {
return path.isRoot() ? "/" : path.getParent().toUri().getPath(); return path.isRoot() ? "/" : path.getParent().toUri().getPath();
} }

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -309,7 +309,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<seaweedfs.client.version>1.5.6</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<seaweedfs.client.version>1.5.6</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>

9
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java

@ -25,6 +25,7 @@ public class SeaweedFileSystem extends FileSystem {
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size"; public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024; public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
@ -65,6 +66,12 @@ public class SeaweedFileSystem extends FileSystem {
} }
@Override
public void close() throws IOException {
super.close();
this.seaweedFileSystemStore.close();
}
@Override @Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException { public FSDataInputStream open(Path path, int bufferSize) throws IOException {
@ -91,7 +98,7 @@ public class SeaweedFileSystem extends FileSystem {
path = qualify(path); path = qualify(path);
try { try {
String replicaPlacement = String.format("%03d", replication - 1);
String replicaPlacement = this.getConf().get(FS_SEAWEED_REPLICATION, String.format("%03d", replication - 1));
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement); OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement);
return new FSDataOutputStream(outputStream, statistics); return new FSDataOutputStream(outputStream, statistics);

8
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -36,6 +36,14 @@ public class SeaweedFileSystemStore {
this.conf = conf; this.conf = conf;
} }
public void close() {
try {
this.filerGrpcClient.shutdown();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public static String getParentDirectory(Path path) { public static String getParentDirectory(Path path) {
return path.isRoot() ? "/" : path.getParent().toUri().getPath(); return path.isRoot() ? "/" : path.getParent().toUri().getPath();
} }

3
unmaintained/s3/benchmark/hsbench.sh

@ -0,0 +1,3 @@
#!/bin/bash
hsbench -a accesstoken -s secret -z 4K -d 10 -t 10 -b 10 -u http://localhost:8333 -m "cxipgdx" -bp "hsbench-"

2
weed/command/benchmark.go

@ -125,7 +125,7 @@ func runBenchmark(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, strings.Split(*b.masters, ","))
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, "", strings.Split(*b.masters, ","))
go b.masterClient.KeepConnectedToMaster() go b.masterClient.KeepConnectedToMaster()
b.masterClient.WaitUntilConnected() b.masterClient.WaitUntilConnected()

2
weed/command/filer.go

@ -59,7 +59,7 @@ func init() {
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size")
f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center")
f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center")
f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack") f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack")
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")

2
weed/command/filer_copy.go

@ -122,7 +122,7 @@ func runCopy(cmd *Command, args []string) bool {
expectedBucket := restPath[:strings.Index(restPath, "/")] expectedBucket := restPath[:strings.Index(restPath, "/")]
if *copy.collection == "" { if *copy.collection == "" {
*copy.collection = expectedBucket *copy.collection = expectedBucket
} else {
} else if *copy.collection != expectedBucket {
fmt.Printf("destination %s uses collection \"%s\": unexpected collection \"%v\"\n", urlPath, expectedBucket, *copy.collection) fmt.Printf("destination %s uses collection \"%s\": unexpected collection \"%v\"\n", urlPath, expectedBucket, *copy.collection)
return true return true
} }

4
weed/command/scaffold.go

@ -76,10 +76,6 @@ const (
recursive_delete = false recursive_delete = false
# directories under this folder will be automatically creating a separate bucket # directories under this folder will be automatically creating a separate bucket
buckets_folder = "/buckets" buckets_folder = "/buckets"
buckets_fsync = [ # a list of buckets with all write requests fsync=true
"important_bucket",
"should_always_fsync",
]
#################################################### ####################################################
# The following are filer store options # The following are filer store options

6
weed/filer/filer.go

@ -41,14 +41,16 @@ type Filer struct {
metaLogReplication string metaLogReplication string
MetaAggregator *MetaAggregator MetaAggregator *MetaAggregator
Signature int32 Signature int32
FilerConf *FilerConf
} }
func NewFiler(masters []string, grpcDialOption grpc.DialOption, func NewFiler(masters []string, grpcDialOption grpc.DialOption,
filerHost string, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
filerHost string, filerGrpcPort uint32, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
f := &Filer{ f := &Filer{
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, masters),
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, dataCenter, masters),
fileIdDeletionQueue: util.NewUnboundedQueue(), fileIdDeletionQueue: util.NewUnboundedQueue(),
GrpcDialOption: grpcDialOption, GrpcDialOption: grpcDialOption,
FilerConf: NewFilerConf(),
} }
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn) f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn)
f.metaLogCollection = collection f.metaLogCollection = collection

148
weed/filer/filer_conf.go

@ -0,0 +1,148 @@
package filer
import (
"bytes"
"context"
"io"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/viant/ptrie"
)
const (
DirectoryEtc = "/etc"
FilerConfName = "filer.conf"
)
type FilerConf struct {
rules ptrie.Trie
}
func NewFilerConf() (fc *FilerConf) {
fc = &FilerConf{
rules: ptrie.New(),
}
return fc
}
func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
filerConfPath := util.NewFullPath(DirectoryEtc, FilerConfName)
entry, err := filer.FindEntry(context.Background(), filerConfPath)
if err != nil {
if err == filer_pb.ErrNotFound {
return nil
}
glog.Errorf("read filer conf entry %s: %v", filerConfPath, err)
return
}
return fc.loadFromChunks(filer, entry.Chunks)
}
func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) {
data, err := filer.readEntry(chunks)
if err != nil {
glog.Errorf("read filer conf content: %v", err)
return
}
return fc.LoadFromBytes(data)
}
func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
conf := &filer_pb.FilerConf{}
if err := jsonpb.Unmarshal(bytes.NewReader(data), conf); err != nil {
err = proto.UnmarshalText(string(data), conf)
if err != nil {
glog.Errorf("unable to parse filer conf: %v", err)
// this is not recoverable
return nil
}
return nil
}
return fc.doLoadConf(conf)
}
func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
for _, location := range conf.Locations {
err = fc.AddLocationConf(location)
if err != nil {
// this is not recoverable
return nil
}
}
return nil
}
func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
if err != nil {
glog.Errorf("put location prefix: %v", err)
}
return
}
func (fc *FilerConf) DeleteLocationConf(locationPrefix string) {
rules := ptrie.New()
fc.rules.Walk(func(key []byte, value interface{}) bool {
if string(key) == locationPrefix {
return true
}
rules.Put(key, value)
return true
})
fc.rules = rules
return
}
func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf_PathConf) {
pathConf = &filer_pb.FilerConf_PathConf{}
fc.rules.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool {
t := value.(*filer_pb.FilerConf_PathConf)
mergePathConf(pathConf, t)
return true
})
return pathConf
}
// merge if values in b is not empty, merge them into a
func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
a.Collection = util.Nvl(b.Collection, a.Collection)
a.Replication = util.Nvl(b.Replication, a.Replication)
a.Ttl = util.Nvl(b.Ttl, a.Ttl)
if b.DiskType != filer_pb.FilerConf_PathConf_NONE {
a.DiskType = b.DiskType
}
a.Fsync = b.Fsync || a.Fsync
if b.VolumeGrowthCount > 0 {
a.VolumeGrowthCount = b.VolumeGrowthCount
}
}
func (fc *FilerConf) ToProto() *filer_pb.FilerConf {
m := &filer_pb.FilerConf{}
fc.rules.Walk(func(key []byte, value interface{}) bool {
pathConf := value.(*filer_pb.FilerConf_PathConf)
m.Locations = append(m.Locations, pathConf)
return true
})
return m
}
func (fc *FilerConf) ToText(writer io.Writer) error {
m := jsonpb.Marshaler{
EmitDefaults: false,
Indent: " ",
}
return m.Marshal(writer, fc.ToProto())
}

34
weed/filer/filer_conf_test.go

@ -0,0 +1,34 @@
package filer
import (
"testing"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/stretchr/testify/assert"
)
func TestFilerConf(t *testing.T) {
fc := NewFilerConf()
conf := &filer_pb.FilerConf{Locations: []*filer_pb.FilerConf_PathConf{
{
LocationPrefix: "/buckets/abc",
Collection: "abc",
},
{
LocationPrefix: "/buckets/abcd",
Collection: "abcd",
},
{
LocationPrefix: "/buckets/",
Replication: "001",
},
}}
fc.doLoadConf(conf)
assert.Equal(t, "abc", fc.MatchStorageRule("/buckets/abc/jasdf").Collection)
assert.Equal(t, "abcd", fc.MatchStorageRule("/buckets/abcd/jasdf").Collection)
assert.Equal(t, "001", fc.MatchStorageRule("/buckets/abc/jasdf").Replication)
}

2
weed/filer/filer_delete_entry.go

@ -47,7 +47,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
} }
if shouldDeleteChunks && !isCollection { if shouldDeleteChunks && !isCollection {
go f.DeleteChunks(chunks)
f.DirectDeleteChunks(chunks)
} }
// A case not handled: // A case not handled:
// what if the chunk is in a different collection? // what if the chunk is in a different collection?

44
weed/filer/filer_deletion.go

@ -68,6 +68,50 @@ func (f *Filer) loopProcessingDeletion() {
} }
} }
func (f *Filer) doDeleteFileIds(fileIds []string) {
lookupFunc := LookupByMasterClientFn(f.MasterClient)
DeletionBatchSize := 100000 // roughly 20 bytes cost per file id.
for len(fileIds) > 0 {
var toDeleteFileIds []string
if len(fileIds) > DeletionBatchSize {
toDeleteFileIds = fileIds[:DeletionBatchSize]
fileIds = fileIds[DeletionBatchSize:]
} else {
toDeleteFileIds = fileIds
fileIds = fileIds[:0]
}
deletionCount := len(toDeleteFileIds)
_, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
if err != nil {
if !strings.Contains(err.Error(), "already deleted") {
glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
}
}
}
}
func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) {
var fildIdsToDelete []string
for _, chunk := range chunks {
if !chunk.IsChunkManifest {
fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString())
continue
}
dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
if manifestResolveErr != nil {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
fildIdsToDelete = append(fildIdsToDelete, dChunk.GetFileIdString())
}
fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString())
}
f.doDeleteFileIds(fildIdsToDelete)
}
func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks { for _, chunk := range chunks {
if !chunk.IsChunkManifest { if !chunk.IsChunkManifest {

12
weed/filer/filer_notify_append.go

@ -13,7 +13,7 @@ import (
func (f *Filer) appendToFile(targetFile string, data []byte) error { func (f *Filer) appendToFile(targetFile string, data []byte) error {
assignResult, uploadResult, err2 := f.assignAndUpload(data)
assignResult, uploadResult, err2 := f.assignAndUpload(targetFile, data)
if err2 != nil { if err2 != nil {
return err2 return err2
} }
@ -46,14 +46,16 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error {
return err return err
} }
func (f *Filer) assignAndUpload(data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
// assign a volume location // assign a volume location
rule := f.FilerConf.MatchStorageRule(targetFile)
assignRequest := &operation.VolumeAssignRequest{ assignRequest := &operation.VolumeAssignRequest{
Count: 1, Count: 1,
Collection: f.metaLogCollection,
Replication: f.metaLogReplication,
WritableVolumeCount: 1,
Collection: util.Nvl(f.metaLogCollection, rule.Collection),
Replication: util.Nvl(f.metaLogReplication, rule.Replication),
WritableVolumeCount: rule.VolumeGrowthCount,
} }
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest) assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("AssignVolume: %v", err) return nil, nil, fmt.Errorf("AssignVolume: %v", err)

61
weed/filer/filer_on_meta_event.go

@ -0,0 +1,61 @@
package filer
import (
"bytes"
"math"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
// onMetadataChangeEvent is triggered after filer processed change events from local or remote filers
func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) {
if DirectoryEtc != event.Directory {
if DirectoryEtc != event.EventNotification.NewParentPath {
return
}
}
entry := event.EventNotification.NewEntry
if entry == nil {
return
}
glog.V(0).Infof("procesing %v", event)
if entry.Name == FilerConfName {
f.reloadFilerConfiguration(entry)
}
}
func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) {
var buf bytes.Buffer
err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
fc := NewFilerConf()
err := fc.loadFromChunks(f, entry.Chunks)
if err != nil {
glog.Errorf("read filer conf chunks: %v", err)
return
}
f.FilerConf = fc
}
func (f *Filer) LoadFilerConf() {
fc := NewFilerConf()
err := util.Retry("loadFilerConf", func() error {
return fc.loadFromFiler(f)
})
if err != nil {
glog.Errorf("read filer conf: %v", err)
return
}
f.FilerConf = fc
}

4
weed/filer/leveldb/leveldb_store_test.go

@ -11,7 +11,7 @@ import (
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
@ -65,7 +65,7 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}

4
weed/filer/leveldb2/leveldb2_store_test.go

@ -11,7 +11,7 @@ import (
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB2Store{} store := &LevelDB2Store{}
@ -65,7 +65,7 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB2Store{} store := &LevelDB2Store{}

3
weed/filer/meta_aggregator.go

@ -141,6 +141,9 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
return fmt.Errorf("process %v: %v", resp, err) return fmt.Errorf("process %v: %v", resp, err)
} }
lastTsNs = resp.TsNs lastTsNs = resp.TsNs
f.onMetadataChangeEvent(resp)
} }
}) })
if err != nil { if err != nil {

4
weed/filesys/dirty_page.go

@ -15,10 +15,10 @@ type ContinuousDirtyPages struct {
intervals *ContinuousIntervals intervals *ContinuousIntervals
f *File f *File
writeWaitGroup sync.WaitGroup writeWaitGroup sync.WaitGroup
chunkAddLock sync.Mutex
chunkSaveErrChan chan error chunkSaveErrChan chan error
chunkSaveErrChanClosed bool chunkSaveErrChanClosed bool
lastErr error lastErr error
lock sync.Mutex
collection string collection string
replication string replication string
} }
@ -117,6 +117,8 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
} }
chunk.Mtime = mtime chunk.Mtime = mtime
pages.collection, pages.replication = collection, replication pages.collection, pages.replication = collection, replication
pages.chunkAddLock.Lock()
defer pages.chunkAddLock.Unlock()
pages.f.addChunks([]*filer_pb.FileChunk{chunk}) pages.f.addChunks([]*filer_pb.FileChunk{chunk})
glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size) glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
} }

34
weed/filesys/file.go

@ -144,7 +144,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
} }
} }
file.entry.Chunks = chunks file.entry.Chunks = chunks
file.entryViewCache = nil
file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), chunks)
file.reader = nil file.reader = nil
file.wfs.deleteFileChunks(truncatedChunks) file.wfs.deleteFileChunks(truncatedChunks)
} }
@ -282,15 +282,37 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er
return entry, nil return entry, nil
} }
func lessThan(a, b *filer_pb.FileChunk) bool {
if a.Mtime == b.Mtime {
return a.Fid.FileKey < b.Fid.FileKey
}
return a.Mtime < b.Mtime
}
func (file *File) addChunks(chunks []*filer_pb.FileChunk) { func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
sort.Slice(chunks, func(i, j int) bool {
if chunks[i].Mtime == chunks[j].Mtime {
return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey
// find the earliest incoming chunk
newChunks := chunks
earliestChunk := newChunks[0]
for i := 1; i < len(newChunks); i++ {
if lessThan(earliestChunk, newChunks[i]) {
earliestChunk = newChunks[i]
}
} }
return chunks[i].Mtime < chunks[j].Mtime
// pick out-of-order chunks from existing chunks
for _, chunk := range file.entry.Chunks {
if lessThan(earliestChunk, chunk) {
chunks = append(chunks, chunk)
}
}
// sort incoming chunks
sort.Slice(chunks, func(i, j int) bool {
return lessThan(chunks[i], chunks[j])
}) })
// add to entry view cache
for _, chunk := range chunks { for _, chunk := range chunks {
file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk) file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk)
} }
@ -299,7 +321,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks)) glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
file.entry.Chunks = append(file.entry.Chunks, chunks...)
file.entry.Chunks = append(file.entry.Chunks, newChunks...)
} }
func (file *File) setEntry(entry *filer_pb.Entry) { func (file *File) setEntry(entry *filer_pb.Entry) {

7
weed/filesys/filehandle.go

@ -183,11 +183,10 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
} }
if fh.f.isOpen == 0 { if fh.f.isOpen == 0 {
if err := fh.doFlush(ctx, req.Header); err != nil { if err := fh.doFlush(ctx, req.Header); err != nil {
glog.Errorf("Release doFlush %s: %v", fh.f.Name, err) glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
} }
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
}
// stop the goroutine // stop the goroutine
if !fh.dirtyPages.chunkSaveErrChanClosed { if !fh.dirtyPages.chunkSaveErrChanClosed {
@ -195,6 +194,9 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
close(fh.dirtyPages.chunkSaveErrChan) close(fh.dirtyPages.chunkSaveErrChan)
} }
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
}
return nil return nil
} }
@ -262,7 +264,6 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
glog.V(0).Infof("MaybeManifestize: %v", manifestErr) glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
} }
fh.f.entry.Chunks = append(chunks, manifestChunks...) fh.f.entry.Chunks = append(chunks, manifestChunks...)
fh.f.entryViewCache = nil
fh.f.wfs.mapPbIdFromLocalToFiler(request.Entry) fh.f.wfs.mapPbIdFromLocalToFiler(request.Entry)
defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry) defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry)

55
weed/operation/assign_file_id.go

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -49,14 +50,14 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum
lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.AssignRequest{ req := &master_pb.AssignRequest{
Count: primaryRequest.Count,
Replication: primaryRequest.Replication,
Collection: primaryRequest.Collection,
Ttl: primaryRequest.Ttl,
DataCenter: primaryRequest.DataCenter,
Rack: primaryRequest.Rack,
DataNode: primaryRequest.DataNode,
WritableVolumeCount: primaryRequest.WritableVolumeCount,
Count: request.Count,
Replication: request.Replication,
Collection: request.Collection,
Ttl: request.Ttl,
DataCenter: request.DataCenter,
Rack: request.Rack,
DataNode: request.DataNode,
WritableVolumeCount: request.WritableVolumeCount,
} }
resp, grpcErr := masterClient.Assign(context.Background(), req) resp, grpcErr := masterClient.Assign(context.Background(), req)
if grpcErr != nil { if grpcErr != nil {
@ -101,3 +102,41 @@ func LookupJwt(master string, fileId string) security.EncodedJwt {
return security.EncodedJwt(tokenStr) return security.EncodedJwt(tokenStr)
} }
type StorageOption struct {
Replication string
Collection string
DataCenter string
Rack string
TtlSeconds int32
Fsync bool
VolumeGrowthCount uint32
}
func (so *StorageOption) TtlString() string {
return needle.SecondsToTTL(so.TtlSeconds)
}
func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, altRequest *VolumeAssignRequest) {
ar = &VolumeAssignRequest{
Count: uint64(count),
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DataCenter: so.DataCenter,
Rack: so.Rack,
WritableVolumeCount: so.VolumeGrowthCount,
}
if so.DataCenter != "" || so.Rack != "" {
altRequest = &VolumeAssignRequest{
Count: uint64(count),
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DataCenter: "",
Rack: "",
WritableVolumeCount: so.VolumeGrowthCount,
}
}
return
}

8
weed/operation/upload_content.go

@ -81,14 +81,11 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader,
if ok { if ok {
data = bytesReader.Bytes data = bytesReader.Bytes
} else { } else {
buf := bytebufferpool.Get()
_, err = buf.ReadFrom(reader)
defer bytebufferpool.Put(buf)
data, err = ioutil.ReadAll(reader)
if err != nil { if err != nil {
err = fmt.Errorf("read input: %v", err) err = fmt.Errorf("read input: %v", err)
return return
} }
data = buf.Bytes()
} }
uploadResult, uploadErr := retriedUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) uploadResult, uploadErr := retriedUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt)
return uploadResult, uploadErr, data return uploadResult, uploadErr, data
@ -172,7 +169,7 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i
uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) {
_, err = w.Write(data) _, err = w.Write(data)
return return
}, filename, contentIsGzipped, 0, mtype, pairMap, jwt)
}, filename, contentIsGzipped, len(data), mtype, pairMap, jwt)
} }
if uploadResult == nil { if uploadResult == nil {
@ -193,6 +190,7 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
body_writer := multipart.NewWriter(buf) body_writer := multipart.NewWriter(buf)
h := make(textproto.MIMEHeader) h := make(textproto.MIMEHeader)
h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, fileNameEscaper.Replace(filename))) h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, fileNameEscaper.Replace(filename)))
h.Set("Idempotency-Key", uploadUrl)
if mtype == "" { if mtype == "" {
mtype = mime.TypeByExtension(strings.ToLower(filepath.Ext(filename))) mtype = mime.TypeByExtension(strings.ToLower(filepath.Ext(filename)))
} }

20
weed/pb/filer.proto

@ -348,3 +348,23 @@ message KvPutRequest {
message KvPutResponse { message KvPutResponse {
string error = 1; string error = 1;
} }
// path-based configurations
message FilerConf {
int32 version = 1;
message PathConf {
string location_prefix = 1;
string collection = 2;
string replication = 3;
string ttl = 4;
enum DiskType {
NONE = 0;
HDD = 1;
SSD = 2;
}
DiskType disk_type = 5;
bool fsync = 6;
uint32 volume_growth_count = 7;
}
repeated PathConf locations = 2;
}

497
weed/pb/filer_pb/filer.pb.go

@ -29,6 +29,55 @@ const (
// of the legacy proto package is being used. // of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4 const _ = proto.ProtoPackageIsVersion4
type FilerConf_PathConf_DiskType int32
const (
FilerConf_PathConf_NONE FilerConf_PathConf_DiskType = 0
FilerConf_PathConf_HDD FilerConf_PathConf_DiskType = 1
FilerConf_PathConf_SSD FilerConf_PathConf_DiskType = 2
)
// Enum value maps for FilerConf_PathConf_DiskType.
var (
FilerConf_PathConf_DiskType_name = map[int32]string{
0: "NONE",
1: "HDD",
2: "SSD",
}
FilerConf_PathConf_DiskType_value = map[string]int32{
"NONE": 0,
"HDD": 1,
"SSD": 2,
}
)
func (x FilerConf_PathConf_DiskType) Enum() *FilerConf_PathConf_DiskType {
p := new(FilerConf_PathConf_DiskType)
*p = x
return p
}
func (x FilerConf_PathConf_DiskType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (FilerConf_PathConf_DiskType) Descriptor() protoreflect.EnumDescriptor {
return file_filer_proto_enumTypes[0].Descriptor()
}
func (FilerConf_PathConf_DiskType) Type() protoreflect.EnumType {
return &file_filer_proto_enumTypes[0]
}
func (x FilerConf_PathConf_DiskType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use FilerConf_PathConf_DiskType.Descriptor instead.
func (FilerConf_PathConf_DiskType) EnumDescriptor() ([]byte, []int) {
return file_filer_proto_rawDescGZIP(), []int{47, 0, 0}
}
type LookupDirectoryEntryRequest struct { type LookupDirectoryEntryRequest struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@ -3002,6 +3051,62 @@ func (x *KvPutResponse) GetError() string {
return "" return ""
} }
// path-based configurations
type FilerConf struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
Locations []*FilerConf_PathConf `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"`
}
func (x *FilerConf) Reset() {
*x = FilerConf{}
if protoimpl.UnsafeEnabled {
mi := &file_filer_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FilerConf) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FilerConf) ProtoMessage() {}
func (x *FilerConf) ProtoReflect() protoreflect.Message {
mi := &file_filer_proto_msgTypes[47]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FilerConf.ProtoReflect.Descriptor instead.
func (*FilerConf) Descriptor() ([]byte, []int) {
return file_filer_proto_rawDescGZIP(), []int{47}
}
func (x *FilerConf) GetVersion() int32 {
if x != nil {
return x.Version
}
return 0
}
func (x *FilerConf) GetLocations() []*FilerConf_PathConf {
if x != nil {
return x.Locations
}
return nil
}
// if found, send the exact address // if found, send the exact address
// if not found, send the full list of existing brokers // if not found, send the full list of existing brokers
type LocateBrokerResponse_Resource struct { type LocateBrokerResponse_Resource struct {
@ -3016,7 +3121,7 @@ type LocateBrokerResponse_Resource struct {
func (x *LocateBrokerResponse_Resource) Reset() { func (x *LocateBrokerResponse_Resource) Reset() {
*x = LocateBrokerResponse_Resource{} *x = LocateBrokerResponse_Resource{}
if protoimpl.UnsafeEnabled { if protoimpl.UnsafeEnabled {
mi := &file_filer_proto_msgTypes[49]
mi := &file_filer_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@ -3029,7 +3134,7 @@ func (x *LocateBrokerResponse_Resource) String() string {
func (*LocateBrokerResponse_Resource) ProtoMessage() {} func (*LocateBrokerResponse_Resource) ProtoMessage() {}
func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message { func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message {
mi := &file_filer_proto_msgTypes[49]
mi := &file_filer_proto_msgTypes[50]
if protoimpl.UnsafeEnabled && x != nil { if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@ -3059,6 +3164,101 @@ func (x *LocateBrokerResponse_Resource) GetResourceCount() int32 {
return 0 return 0
} }
type FilerConf_PathConf struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
LocationPrefix string `protobuf:"bytes,1,opt,name=location_prefix,json=locationPrefix,proto3" json:"location_prefix,omitempty"`
Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
DiskType FilerConf_PathConf_DiskType `protobuf:"varint,5,opt,name=disk_type,json=diskType,proto3,enum=filer_pb.FilerConf_PathConf_DiskType" json:"disk_type,omitempty"`
Fsync bool `protobuf:"varint,6,opt,name=fsync,proto3" json:"fsync,omitempty"`
VolumeGrowthCount uint32 `protobuf:"varint,7,opt,name=volume_growth_count,json=volumeGrowthCount,proto3" json:"volume_growth_count,omitempty"`
}
func (x *FilerConf_PathConf) Reset() {
*x = FilerConf_PathConf{}
if protoimpl.UnsafeEnabled {
mi := &file_filer_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FilerConf_PathConf) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FilerConf_PathConf) ProtoMessage() {}
func (x *FilerConf_PathConf) ProtoReflect() protoreflect.Message {
mi := &file_filer_proto_msgTypes[51]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FilerConf_PathConf.ProtoReflect.Descriptor instead.
func (*FilerConf_PathConf) Descriptor() ([]byte, []int) {
return file_filer_proto_rawDescGZIP(), []int{47, 0}
}
func (x *FilerConf_PathConf) GetLocationPrefix() string {
if x != nil {
return x.LocationPrefix
}
return ""
}
func (x *FilerConf_PathConf) GetCollection() string {
if x != nil {
return x.Collection
}
return ""
}
func (x *FilerConf_PathConf) GetReplication() string {
if x != nil {
return x.Replication
}
return ""
}
func (x *FilerConf_PathConf) GetTtl() string {
if x != nil {
return x.Ttl
}
return ""
}
func (x *FilerConf_PathConf) GetDiskType() FilerConf_PathConf_DiskType {
if x != nil {
return x.DiskType
}
return FilerConf_PathConf_NONE
}
func (x *FilerConf_PathConf) GetFsync() bool {
if x != nil {
return x.Fsync
}
return false
}
func (x *FilerConf_PathConf) GetVolumeGrowthCount() uint32 {
if x != nil {
return x.VolumeGrowthCount
}
return 0
}
var File_filer_proto protoreflect.FileDescriptor var File_filer_proto protoreflect.FileDescriptor
var file_filer_proto_rawDesc = []byte{ var file_filer_proto_rawDesc = []byte{
@ -3433,7 +3633,33 @@ var file_filer_proto_rawDesc = []byte{
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0xdc, 0x0c, 0x0a, 0x0c, 0x53, 0x65,
0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x9d, 0x03, 0x0a, 0x09, 0x46, 0x69,
0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x12, 0x3a, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f,
0x6e, 0x66, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xb9, 0x02,
0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65,
0x66, 0x69, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f,
0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x2e,
0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70,
0x65, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66,
0x73, 0x79, 0x6e, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x73, 0x79, 0x6e,
0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x77,
0x74, 0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11,
0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e,
0x74, 0x22, 0x26, 0x0a, 0x08, 0x44, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a,
0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x44, 0x44, 0x10, 0x01,
0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x44, 0x10, 0x02, 0x32, 0xdc, 0x0c, 0x0a, 0x0c, 0x53, 0x65,
0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f,
0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
@ -3556,123 +3782,129 @@ func file_filer_proto_rawDescGZIP() []byte {
return file_filer_proto_rawDescData return file_filer_proto_rawDescData
} }
var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 50)
var file_filer_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 52)
var file_filer_proto_goTypes = []interface{}{ var file_filer_proto_goTypes = []interface{}{
(*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
(*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
(*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest
(*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse
(*Entry)(nil), // 4: filer_pb.Entry
(*FullEntry)(nil), // 5: filer_pb.FullEntry
(*EventNotification)(nil), // 6: filer_pb.EventNotification
(*FileChunk)(nil), // 7: filer_pb.FileChunk
(*FileChunkManifest)(nil), // 8: filer_pb.FileChunkManifest
(*FileId)(nil), // 9: filer_pb.FileId
(*FuseAttributes)(nil), // 10: filer_pb.FuseAttributes
(*CreateEntryRequest)(nil), // 11: filer_pb.CreateEntryRequest
(*CreateEntryResponse)(nil), // 12: filer_pb.CreateEntryResponse
(*UpdateEntryRequest)(nil), // 13: filer_pb.UpdateEntryRequest
(*UpdateEntryResponse)(nil), // 14: filer_pb.UpdateEntryResponse
(*AppendToEntryRequest)(nil), // 15: filer_pb.AppendToEntryRequest
(*AppendToEntryResponse)(nil), // 16: filer_pb.AppendToEntryResponse
(*DeleteEntryRequest)(nil), // 17: filer_pb.DeleteEntryRequest
(*DeleteEntryResponse)(nil), // 18: filer_pb.DeleteEntryResponse
(*AtomicRenameEntryRequest)(nil), // 19: filer_pb.AtomicRenameEntryRequest
(*AtomicRenameEntryResponse)(nil), // 20: filer_pb.AtomicRenameEntryResponse
(*AssignVolumeRequest)(nil), // 21: filer_pb.AssignVolumeRequest
(*AssignVolumeResponse)(nil), // 22: filer_pb.AssignVolumeResponse
(*LookupVolumeRequest)(nil), // 23: filer_pb.LookupVolumeRequest
(*Locations)(nil), // 24: filer_pb.Locations
(*Location)(nil), // 25: filer_pb.Location
(*LookupVolumeResponse)(nil), // 26: filer_pb.LookupVolumeResponse
(*Collection)(nil), // 27: filer_pb.Collection
(*CollectionListRequest)(nil), // 28: filer_pb.CollectionListRequest
(*CollectionListResponse)(nil), // 29: filer_pb.CollectionListResponse
(*DeleteCollectionRequest)(nil), // 30: filer_pb.DeleteCollectionRequest
(*DeleteCollectionResponse)(nil), // 31: filer_pb.DeleteCollectionResponse
(*StatisticsRequest)(nil), // 32: filer_pb.StatisticsRequest
(*StatisticsResponse)(nil), // 33: filer_pb.StatisticsResponse
(*GetFilerConfigurationRequest)(nil), // 34: filer_pb.GetFilerConfigurationRequest
(*GetFilerConfigurationResponse)(nil), // 35: filer_pb.GetFilerConfigurationResponse
(*SubscribeMetadataRequest)(nil), // 36: filer_pb.SubscribeMetadataRequest
(*SubscribeMetadataResponse)(nil), // 37: filer_pb.SubscribeMetadataResponse
(*LogEntry)(nil), // 38: filer_pb.LogEntry
(*KeepConnectedRequest)(nil), // 39: filer_pb.KeepConnectedRequest
(*KeepConnectedResponse)(nil), // 40: filer_pb.KeepConnectedResponse
(*LocateBrokerRequest)(nil), // 41: filer_pb.LocateBrokerRequest
(*LocateBrokerResponse)(nil), // 42: filer_pb.LocateBrokerResponse
(*KvGetRequest)(nil), // 43: filer_pb.KvGetRequest
(*KvGetResponse)(nil), // 44: filer_pb.KvGetResponse
(*KvPutRequest)(nil), // 45: filer_pb.KvPutRequest
(*KvPutResponse)(nil), // 46: filer_pb.KvPutResponse
nil, // 47: filer_pb.Entry.ExtendedEntry
nil, // 48: filer_pb.LookupVolumeResponse.LocationsMapEntry
(*LocateBrokerResponse_Resource)(nil), // 49: filer_pb.LocateBrokerResponse.Resource
(FilerConf_PathConf_DiskType)(0), // 0: filer_pb.FilerConf.PathConf.DiskType
(*LookupDirectoryEntryRequest)(nil), // 1: filer_pb.LookupDirectoryEntryRequest
(*LookupDirectoryEntryResponse)(nil), // 2: filer_pb.LookupDirectoryEntryResponse
(*ListEntriesRequest)(nil), // 3: filer_pb.ListEntriesRequest
(*ListEntriesResponse)(nil), // 4: filer_pb.ListEntriesResponse
(*Entry)(nil), // 5: filer_pb.Entry
(*FullEntry)(nil), // 6: filer_pb.FullEntry
(*EventNotification)(nil), // 7: filer_pb.EventNotification
(*FileChunk)(nil), // 8: filer_pb.FileChunk
(*FileChunkManifest)(nil), // 9: filer_pb.FileChunkManifest
(*FileId)(nil), // 10: filer_pb.FileId
(*FuseAttributes)(nil), // 11: filer_pb.FuseAttributes
(*CreateEntryRequest)(nil), // 12: filer_pb.CreateEntryRequest
(*CreateEntryResponse)(nil), // 13: filer_pb.CreateEntryResponse
(*UpdateEntryRequest)(nil), // 14: filer_pb.UpdateEntryRequest
(*UpdateEntryResponse)(nil), // 15: filer_pb.UpdateEntryResponse
(*AppendToEntryRequest)(nil), // 16: filer_pb.AppendToEntryRequest
(*AppendToEntryResponse)(nil), // 17: filer_pb.AppendToEntryResponse
(*DeleteEntryRequest)(nil), // 18: filer_pb.DeleteEntryRequest
(*DeleteEntryResponse)(nil), // 19: filer_pb.DeleteEntryResponse
(*AtomicRenameEntryRequest)(nil), // 20: filer_pb.AtomicRenameEntryRequest
(*AtomicRenameEntryResponse)(nil), // 21: filer_pb.AtomicRenameEntryResponse
(*AssignVolumeRequest)(nil), // 22: filer_pb.AssignVolumeRequest
(*AssignVolumeResponse)(nil), // 23: filer_pb.AssignVolumeResponse
(*LookupVolumeRequest)(nil), // 24: filer_pb.LookupVolumeRequest
(*Locations)(nil), // 25: filer_pb.Locations
(*Location)(nil), // 26: filer_pb.Location
(*LookupVolumeResponse)(nil), // 27: filer_pb.LookupVolumeResponse
(*Collection)(nil), // 28: filer_pb.Collection
(*CollectionListRequest)(nil), // 29: filer_pb.CollectionListRequest
(*CollectionListResponse)(nil), // 30: filer_pb.CollectionListResponse
(*DeleteCollectionRequest)(nil), // 31: filer_pb.DeleteCollectionRequest
(*DeleteCollectionResponse)(nil), // 32: filer_pb.DeleteCollectionResponse
(*StatisticsRequest)(nil), // 33: filer_pb.StatisticsRequest
(*StatisticsResponse)(nil), // 34: filer_pb.StatisticsResponse
(*GetFilerConfigurationRequest)(nil), // 35: filer_pb.GetFilerConfigurationRequest
(*GetFilerConfigurationResponse)(nil), // 36: filer_pb.GetFilerConfigurationResponse
(*SubscribeMetadataRequest)(nil), // 37: filer_pb.SubscribeMetadataRequest
(*SubscribeMetadataResponse)(nil), // 38: filer_pb.SubscribeMetadataResponse
(*LogEntry)(nil), // 39: filer_pb.LogEntry
(*KeepConnectedRequest)(nil), // 40: filer_pb.KeepConnectedRequest
(*KeepConnectedResponse)(nil), // 41: filer_pb.KeepConnectedResponse
(*LocateBrokerRequest)(nil), // 42: filer_pb.LocateBrokerRequest
(*LocateBrokerResponse)(nil), // 43: filer_pb.LocateBrokerResponse
(*KvGetRequest)(nil), // 44: filer_pb.KvGetRequest
(*KvGetResponse)(nil), // 45: filer_pb.KvGetResponse
(*KvPutRequest)(nil), // 46: filer_pb.KvPutRequest
(*KvPutResponse)(nil), // 47: filer_pb.KvPutResponse
(*FilerConf)(nil), // 48: filer_pb.FilerConf
nil, // 49: filer_pb.Entry.ExtendedEntry
nil, // 50: filer_pb.LookupVolumeResponse.LocationsMapEntry
(*LocateBrokerResponse_Resource)(nil), // 51: filer_pb.LocateBrokerResponse.Resource
(*FilerConf_PathConf)(nil), // 52: filer_pb.FilerConf.PathConf
} }
var file_filer_proto_depIdxs = []int32{ var file_filer_proto_depIdxs = []int32{
4, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry
4, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry
7, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk
10, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes
47, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry
4, // 5: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry
4, // 6: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry
4, // 7: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry
9, // 8: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId
9, // 9: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId
7, // 10: filer_pb.FileChunkManifest.chunks:type_name -> filer_pb.FileChunk
4, // 11: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry
4, // 12: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry
7, // 13: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk
25, // 14: filer_pb.Locations.locations:type_name -> filer_pb.Location
48, // 15: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry
27, // 16: filer_pb.CollectionListResponse.collections:type_name -> filer_pb.Collection
6, // 17: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification
49, // 18: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource
24, // 19: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations
0, // 20: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest
2, // 21: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest
11, // 22: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest
13, // 23: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest
15, // 24: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest
17, // 25: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest
19, // 26: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest
21, // 27: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest
23, // 28: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest
28, // 29: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest
30, // 30: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest
32, // 31: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest
34, // 32: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest
36, // 33: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest
36, // 34: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest
39, // 35: filer_pb.SeaweedFiler.KeepConnected:input_type -> filer_pb.KeepConnectedRequest
41, // 36: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest
43, // 37: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest
45, // 38: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest
1, // 39: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse
3, // 40: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse
12, // 41: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse
14, // 42: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse
16, // 43: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse
18, // 44: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse
20, // 45: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse
22, // 46: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse
26, // 47: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse
29, // 48: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse
31, // 49: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse
33, // 50: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse
35, // 51: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse
37, // 52: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse
37, // 53: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse
40, // 54: filer_pb.SeaweedFiler.KeepConnected:output_type -> filer_pb.KeepConnectedResponse
42, // 55: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse
44, // 56: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse
46, // 57: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse
39, // [39:58] is the sub-list for method output_type
20, // [20:39] is the sub-list for method input_type
20, // [20:20] is the sub-list for extension type_name
20, // [20:20] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
5, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry
5, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry
8, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk
11, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes
49, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry
5, // 5: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry
5, // 6: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry
5, // 7: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry
10, // 8: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId
10, // 9: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId
8, // 10: filer_pb.FileChunkManifest.chunks:type_name -> filer_pb.FileChunk
5, // 11: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry
5, // 12: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry
8, // 13: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk
26, // 14: filer_pb.Locations.locations:type_name -> filer_pb.Location
50, // 15: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry
28, // 16: filer_pb.CollectionListResponse.collections:type_name -> filer_pb.Collection
7, // 17: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification
51, // 18: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource
52, // 19: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf
25, // 20: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations
0, // 21: filer_pb.FilerConf.PathConf.disk_type:type_name -> filer_pb.FilerConf.PathConf.DiskType
1, // 22: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest
3, // 23: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest
12, // 24: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest
14, // 25: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest
16, // 26: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest
18, // 27: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest
20, // 28: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest
22, // 29: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest
24, // 30: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest
29, // 31: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest
31, // 32: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest
33, // 33: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest
35, // 34: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest
37, // 35: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest
37, // 36: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest
40, // 37: filer_pb.SeaweedFiler.KeepConnected:input_type -> filer_pb.KeepConnectedRequest
42, // 38: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest
44, // 39: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest
46, // 40: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest
2, // 41: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse
4, // 42: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse
13, // 43: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse
15, // 44: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse
17, // 45: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse
19, // 46: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse
21, // 47: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse
23, // 48: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse
27, // 49: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse
30, // 50: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse
32, // 51: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse
34, // 52: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse
36, // 53: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse
38, // 54: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse
38, // 55: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse
41, // 56: filer_pb.SeaweedFiler.KeepConnected:output_type -> filer_pb.KeepConnectedResponse
43, // 57: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse
45, // 58: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse
47, // 59: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse
41, // [41:60] is the sub-list for method output_type
22, // [22:41] is the sub-list for method input_type
22, // [22:22] is the sub-list for extension type_name
22, // [22:22] is the sub-list for extension extendee
0, // [0:22] is the sub-list for field type_name
} }
func init() { file_filer_proto_init() } func init() { file_filer_proto_init() }
@ -4245,7 +4477,19 @@ func file_filer_proto_init() {
return nil return nil
} }
} }
file_filer_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FilerConf); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_filer_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LocateBrokerResponse_Resource); i { switch v := v.(*LocateBrokerResponse_Resource); i {
case 0: case 0:
return &v.state return &v.state
@ -4257,19 +4501,32 @@ func file_filer_proto_init() {
return nil return nil
} }
} }
file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FilerConf_PathConf); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
} }
type x struct{} type x struct{}
out := protoimpl.TypeBuilder{ out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{ File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_filer_proto_rawDesc, RawDescriptor: file_filer_proto_rawDesc,
NumEnums: 0,
NumMessages: 50,
NumEnums: 1,
NumMessages: 52,
NumExtensions: 0, NumExtensions: 0,
NumServices: 1, NumServices: 1,
}, },
GoTypes: file_filer_proto_goTypes, GoTypes: file_filer_proto_goTypes,
DependencyIndexes: file_filer_proto_depIdxs, DependencyIndexes: file_filer_proto_depIdxs,
EnumInfos: file_filer_proto_enumTypes,
MessageInfos: file_filer_proto_msgTypes, MessageInfos: file_filer_proto_msgTypes,
}.Build() }.Build()
File_filer_proto = out.File File_filer_proto = out.File

26
weed/pb/filer_pb/filer_pb_helper.go

@ -8,6 +8,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/golang/protobuf/proto"
"github.com/viant/ptrie"
) )
func ToFileIdObject(fileIdStr string) (*FileId, error) { func ToFileIdObject(fileIdStr string) (*FileId, error) {
@ -121,3 +123,27 @@ func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest
} }
var ErrNotFound = errors.New("filer: no entry is found in filer store") var ErrNotFound = errors.New("filer: no entry is found in filer store")
func IsCreate(event *SubscribeMetadataResponse) bool {
return event.EventNotification.NewEntry != nil && event.EventNotification.OldEntry == nil
}
func IsUpdate(event *SubscribeMetadataResponse) bool {
return event.EventNotification.NewEntry != nil &&
event.EventNotification.OldEntry != nil &&
event.Directory == event.EventNotification.NewParentPath
}
func IsDelete(event *SubscribeMetadataResponse) bool {
return event.EventNotification.NewEntry == nil && event.EventNotification.OldEntry != nil
}
func IsRename(event *SubscribeMetadataResponse) bool {
return event.EventNotification.NewEntry != nil &&
event.EventNotification.OldEntry != nil &&
event.Directory != event.EventNotification.NewParentPath
}
var _ = ptrie.KeyProvider(&FilerConf_PathConf{})
func (fp *FilerConf_PathConf) Key() interface{} {
key, _ := proto.Marshal(fp)
return string(key)
}

6
weed/pb/master.proto

@ -130,6 +130,7 @@ message VolumeLocation {
repeated uint32 new_vids = 3; repeated uint32 new_vids = 3;
repeated uint32 deleted_vids = 4; repeated uint32 deleted_vids = 4;
string leader = 5; // optional when leader is not itself string leader = 5; // optional when leader is not itself
string data_center = 6; // optional when DataCenter is in use
} }
message LookupVolumeRequest { message LookupVolumeRequest {
@ -187,11 +188,6 @@ message StatisticsResponse {
// //
// collection related // collection related
// //
message StorageType {
string replication = 1;
string ttl = 2;
}
message Collection { message Collection {
string name = 1; string name = 1;
} }

1001
weed/pb/master_pb/master.pb.go
File diff suppressed because it is too large
View File

44
weed/s3api/auth_credentials.go

@ -3,10 +3,11 @@ package s3api
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/jsonpb"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -128,8 +129,14 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
} }
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
errCode := iam.authRequest(r, action)
identity, errCode := iam.authRequest(r, action)
if errCode == s3err.ErrNone { if errCode == s3err.ErrNone {
if identity != nil && identity.Name != "" {
r.Header.Set(xhttp.AmzIdentityId, identity.Name)
if identity.isAdmin() {
r.Header.Set(xhttp.AmzIsAdmin, "true")
}
}
f(w, r) f(w, r)
return return
} }
@ -138,16 +145,16 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
} }
// check whether the request has valid access keys // check whether the request has valid access keys
func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) s3err.ErrorCode {
func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) (*Identity, s3err.ErrorCode) {
var identity *Identity var identity *Identity
var s3Err s3err.ErrorCode var s3Err s3err.ErrorCode
var found bool var found bool
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
case authTypeStreamingSigned: case authTypeStreamingSigned:
return s3err.ErrNone
return identity, s3err.ErrNone
case authTypeUnknown: case authTypeUnknown:
glog.V(3).Infof("unknown auth type") glog.V(3).Infof("unknown auth type")
return s3err.ErrAccessDenied
return identity, s3err.ErrAccessDenied
case authTypePresignedV2, authTypeSignedV2: case authTypePresignedV2, authTypeSignedV2:
glog.V(3).Infof("v2 auth type") glog.V(3).Infof("v2 auth type")
identity, s3Err = iam.isReqAuthenticatedV2(r) identity, s3Err = iam.isReqAuthenticatedV2(r)
@ -156,22 +163,22 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
identity, s3Err = iam.reqSignatureV4Verify(r) identity, s3Err = iam.reqSignatureV4Verify(r)
case authTypePostPolicy: case authTypePostPolicy:
glog.V(3).Infof("post policy auth type") glog.V(3).Infof("post policy auth type")
return s3err.ErrNone
return identity, s3err.ErrNone
case authTypeJWT: case authTypeJWT:
glog.V(3).Infof("jwt auth type") glog.V(3).Infof("jwt auth type")
return s3err.ErrNotImplemented
return identity, s3err.ErrNotImplemented
case authTypeAnonymous: case authTypeAnonymous:
identity, found = iam.lookupAnonymous() identity, found = iam.lookupAnonymous()
if !found { if !found {
return s3err.ErrAccessDenied
return identity, s3err.ErrAccessDenied
} }
default: default:
return s3err.ErrNotImplemented
return identity, s3err.ErrNotImplemented
} }
glog.V(3).Infof("auth error: %v", s3Err) glog.V(3).Infof("auth error: %v", s3Err)
if s3Err != s3err.ErrNone { if s3Err != s3err.ErrNone {
return s3Err
return identity, s3Err
} }
glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions) glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions)
@ -179,19 +186,17 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
if !identity.canDo(action, bucket) { if !identity.canDo(action, bucket) {
return s3err.ErrAccessDenied
return identity, s3err.ErrAccessDenied
} }
return s3err.ErrNone
return identity, s3err.ErrNone
} }
func (identity *Identity) canDo(action Action, bucket string) bool { func (identity *Identity) canDo(action Action, bucket string) bool {
for _, a := range identity.Actions {
if a == "Admin" {
if identity.isAdmin() {
return true return true
} }
}
for _, a := range identity.Actions { for _, a := range identity.Actions {
if a == action { if a == action {
return true return true
@ -208,3 +213,12 @@ func (identity *Identity) canDo(action Action, bucket string) bool {
} }
return false return false
} }
func (identity *Identity) isAdmin() bool {
for _, a := range identity.Actions {
if a == "Admin" {
return true
}
}
return false
}

10
weed/s3api/filer_util.go

@ -3,13 +3,14 @@ package s3api
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb" "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
proto "github.com/golang/protobuf/proto" proto "github.com/golang/protobuf/proto"
"google.golang.org/grpc" "google.golang.org/grpc"
"strings"
"github.com/chrislusf/seaweedfs/weed/util"
) )
func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
@ -78,6 +79,11 @@ func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isD
} }
func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) {
fullPath := util.NewFullPath(parentDirectoryPath, entryName)
return filer_pb.GetEntry(s3a, fullPath)
}
func loadS3config(iam *IdentityAccessManagement, option *S3ApiServerOption) error { func loadS3config(iam *IdentityAccessManagement, option *S3ApiServerOption) error {
return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection) client := filer_pb.NewSeaweedFilerClient(grpcConnection)

6
weed/s3api/http/header.go

@ -28,3 +28,9 @@ const (
AmzObjectTagging = "X-Amz-Tagging" AmzObjectTagging = "X-Amz-Tagging"
AmzTagCount = "x-amz-tagging-count" AmzTagCount = "x-amz-tagging-count"
) )
// Non-Standard S3 HTTP request constants
const (
AmzIdentityId = "x-amz-identity-id"
AmzIsAdmin = "x-amz-is-admin" // only set to http request header as a context
)

78
weed/s3api/s3api_bucket_handlers.go

@ -4,11 +4,13 @@ import (
"context" "context"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"math" "math"
"net/http" "net/http"
"time" "time"
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
@ -33,9 +35,14 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
return return
} }
identityId := r.Header.Get(xhttp.AmzIdentityId)
var buckets []*s3.Bucket var buckets []*s3.Bucket
for _, entry := range entries { for _, entry := range entries {
if entry.IsDirectory { if entry.IsDirectory {
if !s3a.hasAccess(r, entry) {
continue
}
buckets = append(buckets, &s3.Bucket{ buckets = append(buckets, &s3.Bucket{
Name: aws.String(entry.Name), Name: aws.String(entry.Name),
CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()), CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()),
@ -45,8 +52,8 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
response = ListAllMyBucketsResult{ response = ListAllMyBucketsResult{
Owner: &s3.Owner{ Owner: &s3.Owner{
ID: aws.String(""),
DisplayName: aws.String(""),
ID: aws.String(identityId),
DisplayName: aws.String(identityId),
}, },
Buckets: buckets, Buckets: buckets,
} }
@ -80,13 +87,25 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
writeErrorResponse(w, s3err.ErrInternalError, r.URL) writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return return
} }
if exist, err := s3a.exists(s3a.option.BucketsPath, bucket, true); err == nil && exist {
errCode = s3err.ErrBucketAlreadyExists
}
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL) writeErrorResponse(w, errCode, r.URL)
return return
} }
fn := func(entry *filer_pb.Entry) {
if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
entry.Extended[xhttp.AmzIdentityId] = []byte(identityId)
}
}
// create the folder for bucket, but lazily create actual collection // create the folder for bucket, but lazily create actual collection
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil {
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil {
glog.Errorf("PutBucketHandler mkdir: %v", err) glog.Errorf("PutBucketHandler mkdir: %v", err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL) writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return return
@ -99,6 +118,11 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
writeErrorResponse(w, err, r.URL)
return
}
err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// delete collection // delete collection
@ -128,28 +152,40 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: s3a.option.BucketsPath,
Name: bucket,
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
writeErrorResponse(w, err, r.URL)
return
} }
glog.V(1).Infof("lookup bucket: %v", request)
if _, err := filer_pb.LookupEntry(client, request); err != nil {
if err == filer_pb.ErrNotFound {
return filer_pb.ErrNotFound
}
return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
writeSuccessResponseEmpty(w)
}
func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode {
entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket)
if entry == nil || err == filer_pb.ErrNotFound {
return s3err.ErrNoSuchBucket
} }
return nil
})
if !s3a.hasAccess(r, entry) {
return s3err.ErrAccessDenied
}
return s3err.ErrNone
}
if err != nil {
writeErrorResponse(w, s3err.ErrNoSuchBucket, r.URL)
return
func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
isAdmin := r.Header.Get(xhttp.AmzIsAdmin) != ""
if isAdmin {
return true
}
if entry.Extended == nil {
return true
} }
writeSuccessResponseEmpty(w)
identityId := r.Header.Get(xhttp.AmzIdentityId)
if id, ok := entry.Extended[xhttp.AmzIdentityId]; ok {
if identityId != string(id) {
return false
}
}
return true
} }

3
weed/s3api/s3api_object_copy_handlers.go

@ -2,6 +2,7 @@ package s3api
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http" "net/http"
"net/url" "net/url"
@ -47,6 +48,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
} }
defer util.CloseResponse(resp) defer util.CloseResponse(resp)
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body) etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
@ -127,6 +129,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
} }
defer dataReader.Close() defer dataReader.Close()
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {

18
weed/s3api/s3api_object_handlers.go

@ -113,12 +113,6 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
bucket, object := getBucketAndObject(r) bucket, object := getBucketAndObject(r)
response, _ := s3a.listFilerEntries(bucket, object, 1, "", "/")
if len(response.Contents) != 0 && strings.HasSuffix(object, "/") {
w.WriteHeader(http.StatusNoContent)
return
}
destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true", destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
s3a.option.Filer, s3a.option.BucketsPath, bucket, object) s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
@ -266,11 +260,6 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
resp, postErr := client.Do(proxyReq) resp, postErr := client.Do(proxyReq)
if resp.ContentLength == -1 && !strings.HasSuffix(destUrl, "/") {
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
return
}
if postErr != nil { if postErr != nil {
glog.Errorf("post to filer: %v", postErr) glog.Errorf("post to filer: %v", postErr)
writeErrorResponse(w, s3err.ErrInternalError, r.URL) writeErrorResponse(w, s3err.ErrInternalError, r.URL)
@ -278,6 +267,11 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
} }
defer util.CloseResponse(resp) defer util.CloseResponse(resp)
if (resp.ContentLength == -1 || resp.StatusCode == 404) && !strings.HasSuffix(destUrl, "/") {
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
return
}
responseFn(resp, w) responseFn(resp, w)
} }
@ -323,7 +317,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp_body, ra_err := ioutil.ReadAll(resp.Body) resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil { if ra_err != nil {
glog.Errorf("upload to filer response read: %v", ra_err)
glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
return etag, s3err.ErrInternalError return etag, s3err.ErrInternalError
} }
var ret weed_server.FilerPostResult var ret weed_server.FilerPostResult

16
weed/s3api/s3api_object_multipart_handlers.go

@ -2,6 +2,7 @@ package s3api
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http" "net/http"
"net/url" "net/url"
@ -28,13 +29,13 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
Key: objectKey(aws.String(object)), Key: objectKey(aws.String(object)),
}) })
glog.V(2).Info("NewMultipartUploadHandler", string(encodeResponse(response)), errCode)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL) writeErrorResponse(w, errCode, r.URL)
return return
} }
// println("NewMultipartUploadHandler", string(encodeResponse(response)))
writeSuccessResponseXML(w, encodeResponse(response)) writeSuccessResponseXML(w, encodeResponse(response))
} }
@ -52,7 +53,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
UploadId: aws.String(uploadID), UploadId: aws.String(uploadID),
}) })
// println("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode)
glog.V(2).Info("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL) writeErrorResponse(w, errCode, r.URL)
@ -81,7 +82,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
return return
} }
// println("AbortMultipartUploadHandler", string(encodeResponse(response)))
glog.V(2).Info("AbortMultipartUploadHandler", string(encodeResponse(response)))
writeSuccessResponseXML(w, encodeResponse(response)) writeSuccessResponseXML(w, encodeResponse(response))
@ -114,13 +115,14 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
UploadIdMarker: aws.String(uploadIDMarker), UploadIdMarker: aws.String(uploadIDMarker),
}) })
glog.V(2).Info("ListMultipartUploadsHandler", string(encodeResponse(response)), errCode)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL) writeErrorResponse(w, errCode, r.URL)
return return
} }
// TODO handle encodingType // TODO handle encodingType
// println("ListMultipartUploadsHandler", string(encodeResponse(response)))
writeSuccessResponseXML(w, encodeResponse(response)) writeSuccessResponseXML(w, encodeResponse(response))
} }
@ -147,13 +149,13 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
UploadId: aws.String(uploadID), UploadId: aws.String(uploadID),
}) })
glog.V(2).Info("ListObjectPartsHandler", string(encodeResponse(response)), errCode)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL) writeErrorResponse(w, errCode, r.URL)
return return
} }
// println("ListObjectPartsHandler", string(encodeResponse(response)))
writeSuccessResponseXML(w, encodeResponse(response)) writeSuccessResponseXML(w, encodeResponse(response))
} }

3
weed/server/common.go

@ -38,11 +38,13 @@ func init() {
func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) (err error) { func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) (err error) {
var bytes []byte var bytes []byte
if obj != nil {
if r.FormValue("pretty") != "" { if r.FormValue("pretty") != "" {
bytes, err = json.MarshalIndent(obj, "", " ") bytes, err = json.MarshalIndent(obj, "", " ")
} else { } else {
bytes, err = json.Marshal(obj) bytes, err = json.Marshal(obj)
} }
}
if err != nil { if err != nil {
return return
} }
@ -125,6 +127,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
ar := &operation.VolumeAssignRequest{ ar := &operation.VolumeAssignRequest{
Count: count, Count: count,
DataCenter: r.FormValue("dataCenter"), DataCenter: r.FormValue("dataCenter"),
Rack: r.FormValue("rack"),
Replication: r.FormValue("replication"), Replication: r.FormValue("replication"),
Collection: r.FormValue("collection"), Collection: r.FormValue("collection"),
Ttl: r.FormValue("ttl"), Ttl: r.FormValue("ttl"),

60
weed/server/filer_grpc_server.go

@ -156,7 +156,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
resp = &filer_pb.CreateEntryResponse{} resp = &filer_pb.CreateEntryResponse{}
chunks, garbage, err2 := fs.cleanupChunks(nil, req.Entry)
chunks, garbage, err2 := fs.cleanupChunks(util.Join(req.Directory, req.Entry.Name), nil, req.Entry)
if err2 != nil { if err2 != nil {
return &filer_pb.CreateEntryResponse{}, fmt.Errorf("CreateEntry cleanupChunks %s %s: %v", req.Directory, req.Entry.Name, err2) return &filer_pb.CreateEntryResponse{}, fmt.Errorf("CreateEntry cleanupChunks %s %s: %v", req.Directory, req.Entry.Name, err2)
} }
@ -190,7 +190,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err)
} }
chunks, garbage, err2 := fs.cleanupChunks(entry, req.Entry)
chunks, garbage, err2 := fs.cleanupChunks(fullpath, entry, req.Entry)
if err2 != nil { if err2 != nil {
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2) return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2)
} }
@ -240,7 +240,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
return &filer_pb.UpdateEntryResponse{}, err return &filer_pb.UpdateEntryResponse{}, err
} }
func (fs *FilerServer) cleanupChunks(existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) {
func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) {
// remove old chunks if not included in the new ones // remove old chunks if not included in the new ones
if existingEntry != nil { if existingEntry != nil {
@ -257,7 +257,14 @@ func (fs *FilerServer) cleanupChunks(existingEntry *filer.Entry, newEntry *filer
garbage = append(garbage, coveredChunks...) garbage = append(garbage, coveredChunks...)
if newEntry.Attributes != nil { if newEntry.Attributes != nil {
chunks, err = filer.MaybeManifestize(fs.saveAsChunk(newEntry.Attributes.Replication, newEntry.Attributes.Collection, "", "", needle.SecondsToTTL(newEntry.Attributes.TtlSec), false), chunks)
so := fs.detectStorageOption(fullpath,
newEntry.Attributes.Collection,
newEntry.Attributes.Replication,
newEntry.Attributes.TtlSec,
"",
"",
)
chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), chunks)
if err != nil { if err != nil {
// not good, but should be ok // not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", err) glog.V(0).Infof("MaybeManifestize: %v", err)
@ -275,7 +282,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
fullpath := util.NewFullPath(req.Directory, req.EntryName) fullpath := util.NewFullPath(req.Directory, req.EntryName)
var offset int64 = 0 var offset int64 = 0
entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
entry, err := fs.filer.FindEntry(ctx, fullpath)
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
entry = &filer.Entry{ entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
@ -297,8 +304,8 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
} }
entry.Chunks = append(entry.Chunks, req.Chunks...) entry.Chunks = append(entry.Chunks, req.Chunks...)
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(entry.Replication, entry.Collection, "", "", needle.SecondsToTTL(entry.TtlSec), false), entry.Chunks)
so := fs.detectStorageOption(string(fullpath), entry.Collection, entry.Replication, entry.TtlSec, "", "")
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks)
if err != nil { if err != nil {
// not good, but should be ok // not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", err) glog.V(0).Infof("MaybeManifestize: %v", err)
@ -323,41 +330,10 @@ func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntr
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
ttlStr := ""
if req.TtlSec > 0 {
ttlStr = strconv.Itoa(int(req.TtlSec))
}
collection, replication, _ := fs.detectCollection(req.Path, req.Collection, req.Replication)
so := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DataCenter, req.Rack)
var altRequest *operation.VolumeAssignRequest
assignRequest, altRequest := so.ToAssignRequests(int(req.Count))
dataCenter := req.DataCenter
if dataCenter == "" {
dataCenter = fs.option.DataCenter
}
rack := req.Rack
if rack == "" {
rack = fs.option.Rack
}
assignRequest := &operation.VolumeAssignRequest{
Count: uint64(req.Count),
Replication: replication,
Collection: collection,
Ttl: ttlStr,
DataCenter: dataCenter,
Rack: rack,
}
if dataCenter != "" || rack != "" {
altRequest = &operation.VolumeAssignRequest{
Count: uint64(req.Count),
Replication: replication,
Collection: collection,
Ttl: ttlStr,
DataCenter: "",
Rack: "",
}
}
assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest)
if err != nil { if err != nil {
glog.V(3).Infof("AssignVolume: %v", err) glog.V(3).Infof("AssignVolume: %v", err)
@ -374,8 +350,8 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
Url: assignResult.Url, Url: assignResult.Url,
PublicUrl: assignResult.PublicUrl, PublicUrl: assignResult.PublicUrl,
Auth: string(assignResult.Auth), Auth: string(assignResult.Auth),
Collection: collection,
Replication: replication,
Collection: so.Collection,
Replication: so.Replication,
}, nil }, nil
} }

6
weed/server/filer_server.go

@ -89,7 +89,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
glog.Fatal("master list is required!") glog.Fatal("master list is required!")
} }
fs.filer = filer.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, func() {
fs.filer = filer.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, option.DataCenter, func() {
fs.listenersCond.Broadcast() fs.listenersCond.Broadcast()
}) })
fs.filer.Cipher = option.Cipher fs.filer.Cipher = option.Cipher
@ -114,6 +114,8 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete")
v.SetDefault("filer.options.buckets_folder", "/buckets") v.SetDefault("filer.options.buckets_folder", "/buckets")
fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder") fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder")
// TODO deprecated, will be be removed after 2020-12-31
// replaced by https://github.com/chrislusf/seaweedfs/wiki/Path-Specific-Configuration
fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync") fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync")
fs.filer.LoadConfiguration(v) fs.filer.LoadConfiguration(v)
@ -131,6 +133,8 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
fs.filer.LoadBuckets() fs.filer.LoadBuckets()
fs.filer.LoadFilerConf()
grace.OnInterrupt(func() { grace.OnInterrupt(func() {
fs.filer.Shutdown() fs.filer.Shutdown()
}) })

8
weed/server/filer_server_handlers.go

@ -26,11 +26,19 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds())
case "DELETE": case "DELETE":
stats.FilerRequestCounter.WithLabelValues("delete").Inc() stats.FilerRequestCounter.WithLabelValues("delete").Inc()
if _, ok := r.URL.Query()["tagging"]; ok {
fs.DeleteTaggingHandler(w, r)
} else {
fs.DeleteHandler(w, r) fs.DeleteHandler(w, r)
}
stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds())
case "PUT": case "PUT":
stats.FilerRequestCounter.WithLabelValues("put").Inc() stats.FilerRequestCounter.WithLabelValues("put").Inc()
if _, ok := r.URL.Query()["tagging"]; ok {
fs.PutTaggingHandler(w, r)
} else {
fs.PostHandler(w, r) fs.PostHandler(w, r)
}
stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds())
case "POST": case "POST":
stats.FilerRequestCounter.WithLabelValues("post").Inc() stats.FilerRequestCounter.WithLabelValues("post").Inc()

7
weed/server/filer_server_handlers_read.go

@ -94,10 +94,15 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
} }
} }
// print out the header from extended properties
for k, v := range entry.Extended {
w.Header().Set(k, string(v))
}
//set tag count //set tag count
if r.Method == "GET" { if r.Method == "GET" {
tagCount := 0 tagCount := 0
for k, _ := range entry.Extended {
for k := range entry.Extended {
if strings.HasPrefix(k, xhttp.AmzObjectTagging+"-") { if strings.HasPrefix(k, xhttp.AmzObjectTagging+"-") {
tagCount++ tagCount++
} }

102
weed/server/filer_server_handlers_tagging.go

@ -0,0 +1,102 @@
package weed_server
import (
"context"
"net/http"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
)
// add or replace one file Seaweed- prefixed attributes
// curl -X PUT -H "Seaweed-Name1: value1" http://localhost:8888/path/to/a/file?tagging
func (fs *FilerServer) PutTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
path := r.URL.Path
if strings.HasSuffix(path, "/") {
path = path[:len(path)-1]
}
existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path))
if err != nil {
writeJsonError(w, r, http.StatusNotFound, err)
return
}
if existingEntry == nil {
writeJsonError(w, r, http.StatusNotFound, err)
return
}
if existingEntry.Extended == nil {
existingEntry.Extended = make(map[string][]byte)
}
for header, values := range r.Header {
if strings.HasPrefix(header, needle.PairNamePrefix) {
for _, value := range values {
existingEntry.Extended[header] = []byte(value)
}
}
}
if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil); dbErr != nil {
glog.V(0).Infof("failing to update %s tagging : %v", path, dbErr)
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
writeJsonQuiet(w, r, http.StatusAccepted, nil)
return
}
// remove all Seaweed- prefixed attributes
// curl -X DELETE http://localhost:8888/path/to/a/file?tagging
func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
path := r.URL.Path
if strings.HasSuffix(path, "/") {
path = path[:len(path)-1]
}
existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path))
if err != nil {
writeJsonError(w, r, http.StatusNotFound, err)
return
}
if existingEntry == nil {
writeJsonError(w, r, http.StatusNotFound, err)
return
}
if existingEntry.Extended == nil {
existingEntry.Extended = make(map[string][]byte)
}
hasDeletion := false
for header, _ := range existingEntry.Extended {
if strings.HasPrefix(header, needle.PairNamePrefix) {
delete(existingEntry.Extended, header)
hasDeletion = true
}
}
if !hasDeletion {
writeJsonQuiet(w, r, http.StatusNotModified, nil)
return
}
if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil); dbErr != nil {
glog.V(0).Infof("failing to delete %s tagging : %v", path, dbErr)
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
writeJsonQuiet(w, r, http.StatusAccepted, nil)
return
}

96
weed/server/filer_server_handlers_write.go

@ -29,30 +29,13 @@ type FilerPostResult struct {
Url string `json:"url,omitempty"` Url string `json:"url,omitempty"`
} }
func (fs *FilerServer) assignNewFileInfo(replication, collection, dataCenter, rack, ttlString string, fsync bool) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
stats.FilerRequestCounter.WithLabelValues("assign").Inc() stats.FilerRequestCounter.WithLabelValues("assign").Inc()
start := time.Now() start := time.Now()
defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }() defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }()
ar := &operation.VolumeAssignRequest{
Count: 1,
Replication: replication,
Collection: collection,
Ttl: ttlString,
DataCenter: dataCenter,
}
var altRequest *operation.VolumeAssignRequest
if dataCenter != "" || rack != "" {
altRequest = &operation.VolumeAssignRequest{
Count: 1,
Replication: replication,
Collection: collection,
Ttl: ttlString,
DataCenter: "",
Rack: "",
}
}
ar, altRequest := so.ToAssignRequests(1)
assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest) assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest)
if ae != nil { if ae != nil {
@ -62,7 +45,7 @@ func (fs *FilerServer) assignNewFileInfo(replication, collection, dataCenter, ra
} }
fileId = assignResult.Fid fileId = assignResult.Fid
urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
if fsync {
if so.Fsync {
urlLocation += "?fsync=true" urlLocation += "?fsync=true"
} }
auth = assignResult.Auth auth = assignResult.Auth
@ -74,25 +57,15 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
ctx := context.Background() ctx := context.Background()
query := r.URL.Query() query := r.URL.Query()
collection, replication, fsync := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
dataCenter := query.Get("dataCenter")
if dataCenter == "" {
dataCenter = fs.option.DataCenter
}
rack := query.Get("rack")
if dataCenter == "" {
rack = fs.option.Rack
}
ttlString := r.URL.Query().Get("ttl")
so := fs.detectStorageOption0(r.RequestURI,
query.Get("collection"),
query.Get("replication"),
query.Get("ttl"),
query.Get("dataCenter"),
query.Get("rack"),
)
// read ttl in seconds
ttl, err := needle.ReadTTL(ttlString)
ttlSeconds := int32(0)
if err == nil {
ttlSeconds = int32(ttl.Minutes()) * 60
}
fs.autoChunk(ctx, w, r, replication, collection, dataCenter, rack, ttlSeconds, ttlString, fsync)
fs.autoChunk(ctx, w, r, so)
} }
@ -130,21 +103,12 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
} }
func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string, fsync bool) {
// default
collection = fs.option.Collection
replication = fs.option.DefaultReplication
// get default collection settings
if qCollection != "" {
collection = qCollection
}
if qReplication != "" {
replication = qReplication
}
func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication string, ttlSeconds int32, dataCenter, rack string) *operation.StorageOption {
collection := util.Nvl(qCollection, fs.option.Collection)
replication := util.Nvl(qReplication, fs.option.DefaultReplication)
// required by buckets folder // required by buckets folder
bucketDefaultReplication := ""
bucketDefaultReplication, fsync := "", false
if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") { if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") {
bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:] bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:]
t := strings.Index(bucketAndObjectKey, "/") t := strings.Index(bucketAndObjectKey, "/")
@ -160,5 +124,33 @@ func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication st
replication = bucketDefaultReplication replication = bucketDefaultReplication
} }
return
rule := fs.filer.FilerConf.MatchStorageRule(requestURI)
if ttlSeconds == 0 {
ttl, err := needle.ReadTTL(rule.GetTtl())
if err != nil {
glog.Errorf("fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err)
}
ttlSeconds = int32(ttl.Minutes()) * 60
}
return &operation.StorageOption{
Replication: util.Nvl(replication, rule.Replication),
Collection: util.Nvl(collection, rule.Collection),
DataCenter: util.Nvl(dataCenter, fs.option.DataCenter),
Rack: util.Nvl(rack, fs.option.Rack),
TtlSeconds: ttlSeconds,
Fsync: fsync || rule.Fsync,
VolumeGrowthCount: rule.VolumeGrowthCount,
}
}
func (fs *FilerServer) detectStorageOption0(requestURI, qCollection, qReplication string, qTtl string, dataCenter, rack string) *operation.StorageOption {
ttl, err := needle.ReadTTL(qTtl)
if err != nil {
glog.Errorf("fail to parse ttl %s: %v", qTtl, err)
}
return fs.detectStorageOption(requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, dataCenter, rack)
} }

55
weed/server/filer_server_handlers_write_autochunk.go

@ -21,10 +21,11 @@ import (
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string, rack string, ttlSec int32, ttlString string, fsync bool) {
func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) {
// autoChunking can be set at the command-line level or as a query param. Query param overrides command-line // autoChunking can be set at the command-line level or as a query param. Query param overrides command-line
query := r.URL.Query() query := r.URL.Query()
@ -50,10 +51,10 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") { if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") {
reply, err = fs.mkdir(ctx, w, r) reply, err = fs.mkdir(ctx, w, r)
} else { } else {
reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, replication, collection, dataCenter, rack, ttlSec, ttlString, fsync)
reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, so)
} }
} else { } else {
reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, replication, collection, dataCenter, rack, ttlSec, ttlString, fsync)
reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, so)
} }
if err != nil { if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err) writeJsonError(w, r, http.StatusInternalServerError, err)
@ -65,7 +66,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
} }
} }
func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, replication string, collection string, dataCenter string, rack string, ttlSec int32, ttlString string, fsync bool) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
multipartReader, multipartReaderErr := r.MultipartReader() multipartReader, multipartReaderErr := r.MultipartReader()
if multipartReaderErr != nil { if multipartReaderErr != nil {
@ -86,46 +87,46 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
contentType = "" contentType = ""
} }
fileChunks, md5Hash, chunkOffset, err := fs.uploadReaderToChunks(w, r, part1, chunkSize, replication, collection, dataCenter, rack, ttlString, fileName, contentType, fsync)
fileChunks, md5Hash, chunkOffset, err := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, so)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, rack, ttlString, fsync), fileChunks)
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), fileChunks)
if replyerr != nil { if replyerr != nil {
glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr) glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
return return
} }
md5bytes = md5Hash.Sum(nil) md5bytes = md5Hash.Sum(nil)
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, replication, collection, ttlSec, contentType, md5bytes, fileChunks, chunkOffset)
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset)
return return
} }
func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, replication string, collection string, dataCenter string, rack string, ttlSec int32, ttlString string, fsync bool) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
fileName := "" fileName := ""
contentType := "" contentType := ""
fileChunks, md5Hash, chunkOffset, err := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, replication, collection, dataCenter, rack, ttlString, fileName, contentType, fsync)
fileChunks, md5Hash, chunkOffset, err := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, so)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, rack, ttlString, fsync), fileChunks)
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), fileChunks)
if replyerr != nil { if replyerr != nil {
glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr) glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
return return
} }
md5bytes = md5Hash.Sum(nil) md5bytes = md5Hash.Sum(nil)
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, replication, collection, ttlSec, contentType, md5bytes, fileChunks, chunkOffset)
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset)
return return
} }
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, replication string, collection string, ttlSec int32, contentType string, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64) (filerResult *FilerPostResult, replyerr error) {
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64) (filerResult *FilerPostResult, replyerr error) {
// detect file mode // detect file mode
modeStr := r.URL.Query().Get("mode") modeStr := r.URL.Query().Get("mode")
@ -162,9 +163,9 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
Mode: os.FileMode(mode), Mode: os.FileMode(mode),
Uid: OS_UID, Uid: OS_UID,
Gid: OS_GID, Gid: OS_GID,
Replication: replication,
Collection: collection,
TtlSec: ttlSec,
Replication: so.Replication,
Collection: so.Collection,
TtlSec: so.TtlSeconds,
Mime: contentType, Mime: contentType,
Md5: md5bytes, Md5: md5bytes,
FileSize: uint64(chunkOffset), FileSize: uint64(chunkOffset),
@ -177,8 +178,18 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
Size: chunkOffset, Size: chunkOffset,
} }
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
fs.saveAmzMetaData(r, entry) fs.saveAmzMetaData(r, entry)
for k, v := range r.Header {
if len(v) > 0 && strings.HasPrefix(k, needle.PairNamePrefix) {
entry.Extended[k] = []byte(v[0])
}
}
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil { if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil {
fs.filer.DeleteChunks(entry.Chunks) fs.filer.DeleteChunks(entry.Chunks)
replyerr = dbErr replyerr = dbErr
@ -188,7 +199,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
return filerResult, replyerr return filerResult, replyerr
} }
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, replication string, collection string, dataCenter string, rack string, ttlString string, fileName string, contentType string, fsync bool) ([]*filer_pb.FileChunk, hash.Hash, int64, error) {
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error) {
var fileChunks []*filer_pb.FileChunk var fileChunks []*filer_pb.FileChunk
md5Hash := md5.New() md5Hash := md5.New()
@ -200,7 +211,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
limitedReader := io.LimitReader(partReader, int64(chunkSize)) limitedReader := io.LimitReader(partReader, int64(chunkSize))
// assign one file id for one chunk // assign one file id for one chunk
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(replication, collection, dataCenter, rack, ttlString, fsync)
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so)
if assignErr != nil { if assignErr != nil {
return nil, nil, 0, assignErr return nil, nil, 0, assignErr
} }
@ -244,11 +255,11 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht
return uploadResult, err return uploadResult, err
} }
func (fs *FilerServer) saveAsChunk(replication string, collection string, dataCenter string, rack string, ttlString string, fsync bool) filer.SaveDataAsChunkFunctionType {
func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType {
return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) { return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) {
// assign one file id for one chunk // assign one file id for one chunk
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(replication, collection, dataCenter, rack, ttlString, fsync)
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so)
if assignErr != nil { if assignErr != nil {
return nil, "", "", assignErr return nil, "", "", assignErr
} }
@ -259,7 +270,7 @@ func (fs *FilerServer) saveAsChunk(replication string, collection string, dataCe
return nil, "", "", uploadErr return nil, "", "", uploadErr
} }
return uploadResult.ToPbFileChunk(fileId, offset), collection, replication, nil
return uploadResult.ToPbFileChunk(fileId, offset), so.Collection, so.Replication, nil
} }
} }
@ -314,10 +325,6 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http
func (fs *FilerServer) saveAmzMetaData(r *http.Request, entry *filer.Entry) { func (fs *FilerServer) saveAmzMetaData(r *http.Request, entry *filer.Entry) {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" { if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
entry.Extended[xhttp.AmzStorageClass] = []byte(sc) entry.Extended[xhttp.AmzStorageClass] = []byte(sc)
} }

12
weed/server/filer_server_handlers_write_cipher.go

@ -16,12 +16,12 @@ import (
) )
// handling single chunk POST or PUT upload // handling single chunk POST or PUT upload
func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string, rack string, ttlSeconds int32, ttlString string, fsync bool) (filerResult *FilerPostResult, err error) {
func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) (filerResult *FilerPostResult, err error) {
fileId, urlLocation, auth, err := fs.assignNewFileInfo(replication, collection, dataCenter, rack, ttlString, fsync)
fileId, urlLocation, auth, err := fs.assignNewFileInfo(so)
if err != nil || fileId == "" || urlLocation == "" { if err != nil || fileId == "" || urlLocation == "" {
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter)
} }
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
@ -65,9 +65,9 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
Mode: 0660, Mode: 0660,
Uid: OS_UID, Uid: OS_UID,
Gid: OS_GID, Gid: OS_GID,
Replication: replication,
Collection: collection,
TtlSec: ttlSeconds,
Replication: so.Replication,
Collection: so.Collection,
TtlSec: so.TtlSeconds,
Mime: pu.MimeType, Mime: pu.MimeType,
Md5: util.Base64Md5ToBytes(pu.ContentMd5), Md5: util.Base64Md5ToBytes(pu.ContentMd5),
}, },

2
weed/server/master_grpc_server.go

@ -88,6 +88,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
message := &master_pb.VolumeLocation{ message := &master_pb.VolumeLocation{
Url: dn.Url(), Url: dn.Url(),
PublicUrl: dn.PublicUrl, PublicUrl: dn.PublicUrl,
DataCenter: string(dn.GetDataCenter().Id()),
} }
if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 { if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 {
// process delta volume ids if exists for fast volume id updates // process delta volume ids if exists for fast volume id updates
@ -148,7 +149,6 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
} }
} }
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 { if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {
ms.clientChansLock.RLock() ms.clientChansLock.RLock()
for host, ch := range ms.clientChans { for host, ch := range ms.clientChans {

2
weed/server/master_server.go

@ -93,7 +93,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
preallocateSize: preallocateSize, preallocateSize: preallocateSize,
clientChans: make(map[string]chan *master_pb.VolumeLocation), clientChans: make(map[string]chan *master_pb.VolumeLocation),
grpcDialOption: grpcDialOption, grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, peers),
MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, "", peers),
adminLocks: NewAdminLocks(), adminLocks: NewAdminLocks(),
} }
ms.bounedLeaderChan = make(chan int, 16) ms.bounedLeaderChan = make(chan int, 16)

1
weed/server/volume_grpc_client_to_master.go

@ -203,6 +203,7 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
} }
case <-volumeTickChan: case <-volumeTickChan:
glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port) glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port)
vs.store.MaybeAdjustVolumeMax()
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err return "", err

151
weed/shell/command_fs_configure.go

@ -0,0 +1,151 @@
package shell
import (
"bytes"
"flag"
"fmt"
"io"
"math"
"net/http"
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
Commands = append(Commands, &commandFsConfigure{})
}
type commandFsConfigure struct {
}
func (c *commandFsConfigure) Name() string {
return "fs.configure"
}
func (c *commandFsConfigure) Help() string {
return `configure and apply storage options for each location
# see the current configuration file content
fs.configure
# trying the changes and see the possible configuration file content
fs.configure -locationPrfix=/my/folder -collection=abc
fs.configure -locationPrfix=/my/folder -collection=abc -ttl=7d
# example: configure adding only 1 physical volume for each bucket collection
fs.configure -locationPrfix=/buckets/ -volumeGrowthCount=1
# apply the changes
fs.configure -locationPrfix=/my/folder -collection=abc -apply
# delete the changes
fs.configure -locationPrfix=/my/folder -delete -apply
`
}
func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
fsConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
locationPrefix := fsConfigureCommand.String("locationPrefix", "", "path prefix, required to update the path-specific configuration")
collection := fsConfigureCommand.String("collection", "", "assign writes to this collection")
replication := fsConfigureCommand.String("replication", "", "assign writes with this replication")
ttl := fsConfigureCommand.String("ttl", "", "assign writes with this ttl")
fsync := fsConfigureCommand.Bool("fsync", false, "fsync for the writes")
volumeGrowthCount := fsConfigureCommand.Int("volumeGrowthCount", 0, "the number of physical volumes to add if no writable volumes")
isDelete := fsConfigureCommand.Bool("delete", false, "delete the configuration by locationPrefix")
apply := fsConfigureCommand.Bool("apply", false, "update and apply filer configuration")
if err = fsConfigureCommand.Parse(args); err != nil {
return nil
}
var buf bytes.Buffer
if err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: filer.DirectoryEtc,
Name: filer.FilerConfName,
}
respLookupEntry, err := filer_pb.LookupEntry(client, request)
if err != nil {
return err
}
return filer.StreamContent(commandEnv.MasterClient, &buf, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
}); err != nil {
return err
}
fc := filer.NewFilerConf()
if err = fc.LoadFromBytes(buf.Bytes()); err != nil {
return err
}
if *locationPrefix != "" {
locConf := &filer_pb.FilerConf_PathConf{
LocationPrefix: *locationPrefix,
Collection: *collection,
Replication: *replication,
Ttl: *ttl,
Fsync: *fsync,
VolumeGrowthCount: uint32(*volumeGrowthCount),
}
// check collection
if *collection != "" && strings.HasPrefix(*locationPrefix, "/buckets/") {
return fmt.Errorf("one s3 bucket goes to one collection and not customizable.")
}
// check replication
if *replication != "" {
rp, err := super_block.NewReplicaPlacementFromString(*replication)
if err != nil {
return fmt.Errorf("parse replication %s: %v", *replication, err)
}
if *volumeGrowthCount % rp.GetCopyCount() != 0 {
return fmt.Errorf("volumeGrowthCount %d should be devided by replication copy count %d", *volumeGrowthCount, rp.GetCopyCount())
}
}
// save it
if *isDelete {
fc.DeleteLocationConf(*locationPrefix)
} else {
fc.AddLocationConf(locConf)
}
}
buf.Reset()
fc.ToText(&buf)
fmt.Fprintf(writer, string(buf.Bytes()))
fmt.Fprintln(writer)
if *apply {
target := fmt.Sprintf("http://%s:%d%s/%s", commandEnv.option.FilerHost, commandEnv.option.FilerPort, filer.DirectoryEtc, filer.FilerConfName)
// set the HTTP method, url, and request body
req, err := http.NewRequest(http.MethodPut, target, &buf)
if err != nil {
return err
}
// set the request header Content-Type for json
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
util.CloseResponse(resp)
}
return nil
}

22
weed/shell/command_volume_fix_replication.go

@ -369,18 +369,20 @@ func countReplicas(replicas []*VolumeReplica) (diffDc, diffRack, diffNode map[st
func pickOneReplicaToDelete(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica { func pickOneReplicaToDelete(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica {
allSame := true
oldest := replicas[0]
for _, replica := range replicas {
if replica.info.ModifiedAtSecond < oldest.info.ModifiedAtSecond {
oldest = replica
allSame = false
sort.Slice(replicas, func(i, j int) bool {
a, b := replicas[i], replicas[j]
if a.info.CompactRevision != b.info.CompactRevision {
return a.info.CompactRevision < b.info.CompactRevision
} }
if a.info.ModifiedAtSecond != b.info.ModifiedAtSecond {
return a.info.ModifiedAtSecond < b.info.ModifiedAtSecond
} }
if !allSame {
return oldest
if a.info.Size != b.info.Size {
return a.info.Size < b.info.Size
} }
return false
})
return replicas[0]
// TODO what if all the replicas have the same timestamp?
return oldest
} }

2
weed/shell/commands.go

@ -45,7 +45,7 @@ var (
func NewCommandEnv(options ShellOptions) *CommandEnv { func NewCommandEnv(options ShellOptions) *CommandEnv {
ce := &CommandEnv{ ce := &CommandEnv{
env: make(map[string]string), env: make(map[string]string),
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, "", 0, strings.Split(*options.Masters, ",")),
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(*options.Masters, ",")),
option: options, option: options,
} }
ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient) ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient)

3
weed/storage/disk_location.go

@ -19,6 +19,7 @@ import (
type DiskLocation struct { type DiskLocation struct {
Directory string Directory string
MaxVolumeCount int MaxVolumeCount int
OriginalMaxVolumeCount int
MinFreeSpacePercent float32 MinFreeSpacePercent float32
volumes map[needle.VolumeId]*Volume volumes map[needle.VolumeId]*Volume
volumesLock sync.RWMutex volumesLock sync.RWMutex
@ -31,7 +32,7 @@ type DiskLocation struct {
} }
func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32) *DiskLocation { func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32) *DiskLocation {
location := &DiskLocation{Directory: dir, MaxVolumeCount: maxVolumeCount, MinFreeSpacePercent: minFreeSpacePercent}
location := &DiskLocation{Directory: dir, MaxVolumeCount: maxVolumeCount, OriginalMaxVolumeCount: maxVolumeCount, MinFreeSpacePercent: minFreeSpacePercent}
location.volumes = make(map[needle.VolumeId]*Volume) location.volumes = make(map[needle.VolumeId]*Volume)
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume) location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
go location.CheckDiskSpace() go location.CheckDiskSpace()

7
weed/storage/store.go

@ -461,7 +461,8 @@ func (s *Store) GetVolumeSizeLimit() uint64 {
func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) { func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
volumeSizeLimit := s.GetVolumeSizeLimit() volumeSizeLimit := s.GetVolumeSizeLimit()
for _, diskLocation := range s.Locations { for _, diskLocation := range s.Locations {
if diskLocation.MaxVolumeCount == 0 {
if diskLocation.OriginalMaxVolumeCount == 0 {
currentMaxVolumeCount := diskLocation.MaxVolumeCount
diskStatus := stats.NewDiskStatus(diskLocation.Directory) diskStatus := stats.NewDiskStatus(diskLocation.Directory)
unusedSpace := diskLocation.UnUsedSpace(volumeSizeLimit) unusedSpace := diskLocation.UnUsedSpace(volumeSizeLimit)
unclaimedSpaces := int64(diskStatus.Free) - int64(unusedSpace) unclaimedSpaces := int64(diskStatus.Free) - int64(unusedSpace)
@ -471,9 +472,9 @@ func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1 maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
} }
diskLocation.MaxVolumeCount = maxVolumeCount diskLocation.MaxVolumeCount = maxVolumeCount
glog.V(0).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
glog.V(2).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024) diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
hasChanges = true
hasChanges = hasChanges || currentMaxVolumeCount != diskLocation.MaxVolumeCount
} }
} }
return return

1
weed/topology/store_replicate.go

@ -81,6 +81,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
} }
// volume server do not know about encryption // volume server do not know about encryption
// TODO optimize here to compress data only once
_, err := operation.UploadData(u.String(), string(n.Name), false, n.Data, n.IsCompressed(), string(n.Mime), pairMap, jwt) _, err := operation.UploadData(u.String(), string(n.Name), false, n.Data, n.IsCompressed(), string(n.Mime), pairMap, jwt)
return err return err
}); err != nil { }); err != nil {

2
weed/topology/volume_layout.go

@ -132,6 +132,8 @@ func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSi
} }
func (vl *VolumeLayout) String() string { func (vl *VolumeLayout) String() string {
vl.accessLock.RLock()
defer vl.accessLock.RUnlock()
return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit) return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit)
} }

2
weed/util/constants.go

@ -5,7 +5,7 @@ import (
) )
var ( var (
VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 8)
VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 11)
COMMIT = "" COMMIT = ""
) )

18
weed/util/http_util.go

@ -1,7 +1,6 @@
package util package util
import ( import (
"bytes"
"compress/gzip" "compress/gzip"
"encoding/json" "encoding/json"
"errors" "errors"
@ -29,22 +28,6 @@ func init() {
} }
} }
func PostBytes(url string, body []byte) ([]byte, error) {
r, err := client.Post(url, "", bytes.NewReader(body))
if err != nil {
return nil, fmt.Errorf("Post to %s: %v", url, err)
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, fmt.Errorf("Read response body: %v", err)
}
if r.StatusCode >= 400 {
return nil, fmt.Errorf("%s: %s", url, r.Status)
}
return b, nil
}
func Post(url string, values url.Values) ([]byte, error) { func Post(url string, values url.Values) ([]byte, error) {
r, err := client.PostForm(url, values) r, err := client.PostForm(url, values)
if err != nil { if err != nil {
@ -370,7 +353,6 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer CloseResponse(r)
if r.StatusCode >= 400 { if r.StatusCode >= 400 {
return nil, fmt.Errorf("%s: %s", fileUrl, r.Status) return nil, fmt.Errorf("%s: %s", fileUrl, r.Status)
} }

10
weed/util/retry.go

@ -29,3 +29,13 @@ func Retry(name string, job func() error) (err error) {
} }
return err return err
} }
// return the first non empty string
func Nvl(values ...string) string {
for _, s := range values {
if s != "" {
return s
}
}
return ""
}

7
weed/wdclient/masterclient.go

@ -24,14 +24,14 @@ type MasterClient struct {
vidMap vidMap
} }
func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientHost string, clientGrpcPort uint32, masters []string) *MasterClient {
func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientHost string, clientGrpcPort uint32, clientDataCenter string, masters []string) *MasterClient {
return &MasterClient{ return &MasterClient{
clientType: clientType, clientType: clientType,
clientHost: clientHost, clientHost: clientHost,
grpcPort: clientGrpcPort, grpcPort: clientGrpcPort,
masters: masters, masters: masters,
grpcDialOption: grpcDialOption, grpcDialOption: grpcDialOption,
vidMap: newVidMap(),
vidMap: newVidMap(clientDataCenter),
} }
} }
@ -89,7 +89,7 @@ func (mc *MasterClient) tryAllMasters() {
} }
mc.currentMaster = "" mc.currentMaster = ""
mc.vidMap = newVidMap()
mc.vidMap = newVidMap("")
} }
} }
@ -132,6 +132,7 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri
loc := Location{ loc := Location{
Url: volumeLocation.Url, Url: volumeLocation.Url,
PublicUrl: volumeLocation.PublicUrl, PublicUrl: volumeLocation.PublicUrl,
DataCenter: volumeLocation.DataCenter,
} }
for _, newVid := range volumeLocation.NewVids { for _, newVid := range volumeLocation.NewVids {
glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid) glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid)

10
weed/wdclient/vid_map.go

@ -18,18 +18,20 @@ const (
type Location struct { type Location struct {
Url string `json:"url,omitempty"` Url string `json:"url,omitempty"`
PublicUrl string `json:"publicUrl,omitempty"` PublicUrl string `json:"publicUrl,omitempty"`
DataCenter string `json:"dataCenter,omitempty"`
} }
type vidMap struct { type vidMap struct {
sync.RWMutex sync.RWMutex
vid2Locations map[uint32][]Location vid2Locations map[uint32][]Location
DataCenter string
cursor int32 cursor int32
} }
func newVidMap() vidMap {
func newVidMap(dataCenter string) vidMap {
return vidMap{ return vidMap{
vid2Locations: make(map[uint32][]Location), vid2Locations: make(map[uint32][]Location),
DataCenter: dataCenter,
cursor: -1, cursor: -1,
} }
} }
@ -56,7 +58,11 @@ func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrls []string, err er
return nil, fmt.Errorf("volume %d not found", id) return nil, fmt.Errorf("volume %d not found", id)
} }
for _, loc := range locations { for _, loc := range locations {
if vc.DataCenter == "" || loc.DataCenter == "" || vc.DataCenter != loc.DataCenter {
serverUrls = append(serverUrls, loc.Url) serverUrls = append(serverUrls, loc.Url)
} else {
serverUrls = append([]string{loc.Url}, serverUrls...)
}
} }
return return
} }

2
weed/wdclient/vid_map_test.go

@ -45,7 +45,7 @@ func TestLocationIndex(t *testing.T) {
mustOk(7, maxCursorIndex, 0) mustOk(7, maxCursorIndex, 0)
// test with constructor // test with constructor
vm = newVidMap()
vm = newVidMap("")
length := 7 length := 7
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
got, err := vm.getLocationIndex(length) got, err := vm.getLocationIndex(length)

Loading…
Cancel
Save