Browse Source

Merge pull request #76 from chrislusf/master

sync
pull/2394/head
hilimd 4 years ago
committed by GitHub
parent
commit
aabea37af1
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 0
      .github/ISSUE_TEMPLATE.md
  2. 8
      .github/workflows/release.yml
  3. 1
      .travis.yml
  4. 6
      README.md
  5. 3
      docker/Dockerfile
  6. 4
      docker/Makefile
  7. 3
      docker/compose/local-dev-compose.yml
  8. 2
      docker/compose/local-mount-compose.yml
  9. 9
      go.mod
  10. 14
      go.sum
  11. 4
      k8s/seaweedfs/Chart.yaml
  12. 2
      k8s/seaweedfs/templates/volume-service.yaml
  13. 2
      k8s/seaweedfs/values.yaml
  14. 52
      other/java/client/src/main/java/seaweedfs/client/FilerClient.java
  15. 5
      other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java
  16. 11
      other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java
  17. 22
      other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile2.java
  18. 2
      unmaintained/repeated_vacuum/repeated_vacuum.go
  19. 2
      weed/Makefile
  20. 2
      weed/command/command.go
  21. 2
      weed/command/export.go
  22. 27
      weed/command/filer.go
  23. 2
      weed/command/filer_cat.go
  24. 113
      weed/command/filer_copy.go
  25. 93
      weed/command/gateway.go
  26. 97
      weed/command/iam.go
  27. 7
      weed/command/mount_std.go
  28. 2
      weed/command/scaffold.go
  29. 9
      weed/command/server.go
  30. 28
      weed/command/volume.go
  31. 4
      weed/filer/abstract_sql/abstract_sql_store.go
  32. 6
      weed/filer/filechunks.go
  33. 2
      weed/filer/filer_buckets.go
  34. 1
      weed/filer/filer_conf.go
  35. 2
      weed/filer/filer_delete_entry.go
  36. 4
      weed/filer/filer_notify.go
  37. 2
      weed/filer/filer_on_meta_event.go
  38. 34
      weed/filer/filer_search.go
  39. 2
      weed/filer/filerstore_wrapper.go
  40. 6
      weed/filer/leveldb/leveldb_store_test.go
  41. 6
      weed/filer/leveldb2/leveldb2_store_test.go
  42. 6
      weed/filer/leveldb3/leveldb3_store_test.go
  43. 10
      weed/filer/read_write.go
  44. 22
      weed/filer/reader_at.go
  45. 5
      weed/filer/reader_at_test.go
  46. 6
      weed/filer/rocksdb/rocksdb_store_test.go
  47. 26
      weed/filer/stream.go
  48. 305
      weed/filesys/dir.go
  49. 21
      weed/filesys/dir_link.go
  50. 21
      weed/filesys/dir_rename.go
  51. 3
      weed/filesys/dirty_page.go
  52. 104
      weed/filesys/file.go
  53. 49
      weed/filesys/filehandle.go
  54. 4
      weed/filesys/meta_cache/meta_cache.go
  55. 8
      weed/filesys/meta_cache/meta_cache_init.go
  56. 70
      weed/filesys/wfs.go
  57. 59
      weed/filesys/wfs_write.go
  58. 2
      weed/glog/glog.go
  59. 105
      weed/iamapi/iamapi_handlers.go
  60. 453
      weed/iamapi/iamapi_management_handlers.go
  61. 103
      weed/iamapi/iamapi_response.go
  62. 149
      weed/iamapi/iamapi_server.go
  63. 181
      weed/iamapi/iamapi_test.go
  64. 45
      weed/messaging/broker/broker_append.go
  65. 1
      weed/operation/assign_file_id.go
  66. 11
      weed/operation/upload_content.go
  67. 1
      weed/pb/master.proto
  68. 206
      weed/pb/master_pb/master.pb.go
  69. 45
      weed/replication/sink/filersink/fetch_write.go
  70. 28
      weed/s3api/auth_credentials.go
  71. 30
      weed/s3api/auth_signature_v4.go
  72. 2
      weed/s3api/auto_signature_v4_test.go
  73. 4
      weed/s3api/chunked_reader_v4.go
  74. 8
      weed/s3api/s3api_object_handlers.go
  75. 15
      weed/s3api/s3api_objects_list_handlers.go
  76. 8
      weed/server/common.go
  77. 2
      weed/server/filer_grpc_server.go
  78. 2
      weed/server/filer_grpc_server_rename.go
  79. 12
      weed/server/filer_server_handlers_read.go
  80. 3
      weed/server/filer_server_handlers_read_dir.go
  81. 20
      weed/server/filer_server_handlers_tagging.go
  82. 8
      weed/server/filer_server_handlers_write_autochunk.go
  83. 169
      weed/server/filer_server_handlers_write_upload.go
  84. 2
      weed/server/filer_ui/breadcrumb.go
  85. 2
      weed/server/filer_ui/templates.go
  86. 106
      weed/server/gateway_server.go
  87. 21
      weed/server/master_grpc_server_admin.go
  88. 102
      weed/server/master_grpc_server_volume.go
  89. 22
      weed/server/master_server.go
  90. 20
      weed/server/master_server_handlers.go
  91. 10
      weed/server/master_server_handlers_admin.go
  92. 12
      weed/server/master_server_handlers_ui.go
  93. 10
      weed/server/master_ui/templates.go
  94. 9
      weed/server/volume_grpc_vacuum.go
  95. 4
      weed/server/volume_server.go
  96. 7
      weed/server/volume_server_handlers_read.go
  97. 3
      weed/server/volume_server_handlers_write.go
  98. 2
      weed/server/volume_server_ui/templates.go
  99. 41
      weed/server/webdav_server.go
  100. 47
      weed/shell/command_ec_encode.go

0
.github/ISSUE_TEMPLATE/bug_report.md → .github/ISSUE_TEMPLATE.md

8
.github/workflows/release.yml

@ -33,8 +33,9 @@ jobs:
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
uses: wangyoucao577/go-release-action@v1.17
with:
goversion: 1.16
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
@ -49,8 +50,9 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
uses: wangyoucao577/go-release-action@v1.17
with:
goversion: 1.16
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
@ -60,5 +62,5 @@ jobs:
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-
binary_name: weed
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"

1
.travis.yml

@ -1,7 +1,6 @@
sudo: false
language: go
go:
- 1.15.x
- 1.16.x
before_install:

6
README.md

@ -68,12 +68,16 @@ Table of Contents
* [License](#license)
## Quick Start ##
## Quick Start with single binary ##
* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it!
## Quick Start for S3 API on Docker ##
`docker run -p 8333:8333 chrislusf/seaweedfs server -s3`
## Introduction ##
SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:

3
docker/Dockerfile

@ -1,6 +1,7 @@
FROM alpine
ARG RELEASE=latest # 'latest' or 'dev'
# 'latest' or 'dev'
ARG RELEASE=latest
RUN \
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \

4
docker/Makefile

@ -5,7 +5,9 @@ all: gen
gen: dev
binary:
cd ../weed; CGO_ENABLED=0 GOOS=linux go build -ldflags "-extldflags -static"; mv weed ../docker/
export SWCOMMIT=$(shell git rev-parse --short HEAD)
export SWLDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)"
cd ../weed; CGO_ENABLED=0 GOOS=linux go build -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/
build: binary
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .

3
docker/compose/local-dev-compose.yml

@ -26,9 +26,10 @@ services:
filer:
image: chrislusf/seaweedfs:local
ports:
- 8111:8111
- 8888:8888
- 18888:18888
command: '-v=1 filer -master="master:9333"'
command: '-v=1 filer -master="master:9333" -iam'
depends_on:
- master
- volume

2
docker/compose/local-mount-compose.yml

@ -38,7 +38,7 @@ services:
mount_2:
image: chrislusf/seaweedfs:local
privileged: true
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1 -volumeServerAcess=publicUrl"'
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1"'
depends_on:
- master
- volume

9
go.mod

@ -15,7 +15,7 @@ require (
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/bwmarrin/snowflake v0.3.0
github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v1.0.4
github.com/chrislusf/raft v1.0.6
github.com/coreos/go-semver v0.3.0 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/disintegration/imaging v1.6.2
@ -39,9 +39,10 @@ require (
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/jinzhu/copier v0.2.8
github.com/json-iterator/go v1.1.10
github.com/karlseguin/ccache v2.0.3+incompatible // indirect
github.com/karlseguin/ccache/v2 v2.0.7
@ -61,7 +62,7 @@ require (
github.com/prometheus/client_golang v1.3.0
github.com/rakyll/statik v0.1.7
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/seaweedfs/fuse v1.1.3
github.com/seaweedfs/fuse v1.1.6
github.com/seaweedfs/goexif v1.0.2
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/spaolacci/murmur3 v1.1.0 // indirect
@ -89,7 +90,7 @@ require (
gocloud.dev/pubsub/rabbitpubsub v0.20.0
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509
google.golang.org/api v0.26.0

14
go.sum

@ -155,6 +155,10 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chrislusf/raft v1.0.4 h1:THhbsVik2hxdE0/VXX834f64Wn9RzgVPp+E+XCWZdKM=
github.com/chrislusf/raft v1.0.4/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/chrislusf/raft v1.0.5 h1:g8GxKCSStfm0/bGBDpNEbmEXL6MJkpXX+NI0ksbX5D4=
github.com/chrislusf/raft v1.0.5/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/chrislusf/raft v1.0.6 h1:wunb85WWhMKhNRn7EmdIw35D4Lmew0ZJv8oYDizR/+Y=
github.com/chrislusf/raft v1.0.6/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -435,6 +439,8 @@ github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjL
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jinzhu/copier v0.2.8 h1:N8MbL5niMwE3P4dOwurJixz5rMkKfujmMRFmAanSzWE=
github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@ -687,6 +693,12 @@ github.com/seaweedfs/fuse v1.1.1 h1:WD51YFJcBViOx8I89jeqPD+vAKl4EowzBy9GUw0plb0=
github.com/seaweedfs/fuse v1.1.1/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
github.com/seaweedfs/fuse v1.1.3 h1:0DddotXwSRGbYG2kynoJyr8GHCy30Z2SpdhP3vdyijY=
github.com/seaweedfs/fuse v1.1.3/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
github.com/seaweedfs/fuse v1.1.4 h1:YYqkK86agMhXRSwR+wFbRI8ikMgk3kL6PNTna1MAHyQ=
github.com/seaweedfs/fuse v1.1.4/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
github.com/seaweedfs/fuse v1.1.5 h1:wyuRh/mDvrvt8ZLDS7YdPSe6nczniSx4sQFs/Jonveo=
github.com/seaweedfs/fuse v1.1.5/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
github.com/seaweedfs/fuse v1.1.6 h1:kvCqaIsCEaYOBw5r8kJPUs9GcbwlIKcScnkPLT7HLuQ=
github.com/seaweedfs/fuse v1.1.6/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw=
@ -961,8 +973,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0=
golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

4
k8s/seaweedfs/Chart.yaml

@ -1,5 +1,5 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "2.38"
version: 2.38
appVersion: "2.43"
version: 2.43

2
k8s/seaweedfs/templates/volume-service.yaml

@ -23,6 +23,6 @@ spec:
targetPort: {{ .Values.volume.metricsPort }}
protocol: TCP
{{- end }}
selector:
selector:
app: {{ template "seaweedfs.name" . }}
component: volume

2
k8s/seaweedfs/values.yaml

@ -4,7 +4,7 @@ global:
registry: ""
repository: ""
imageName: chrislusf/seaweedfs
# imageTag: "2.38" - started using {.Chart.appVersion}
# imageTag: "2.43" - started using {.Chart.appVersion}
imagePullPolicy: IfNotPresent
imagePullSecrets: imagepullsecret
restartPolicy: Always

52
other/java/client/src/main/java/seaweedfs/client/FilerClient.java

@ -126,6 +126,18 @@ public class FilerClient extends FilerGrpcClient {
}
public boolean exists(String path){
File pathFile = new File(path);
String parent = pathFile.getParent();
String entryName = pathFile.getName();
if(parent == null) {
parent = path;
entryName ="";
}
return lookupEntry(parent, entryName) != null;
}
public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
File pathFile = new File(path);
@ -142,10 +154,12 @@ public class FilerClient extends FilerGrpcClient {
public boolean touch(String path, int mode) {
String currentUser = System.getProperty("user.name");
return touch(path, mode, 0, 0, currentUser, new String[]{});
long now = System.currentTimeMillis() / 1000L;
return touch(path, now, mode, 0, 0, currentUser, new String[]{});
}
public boolean touch(String path, int mode, int uid, int gid, String userName, String[] groupNames) {
public boolean touch(String path, long modifiedTimeSecond, int mode, int uid, int gid, String userName, String[] groupNames) {
File pathFile = new File(path);
String parent = pathFile.getParent().replace('\\','/');
@ -155,17 +169,25 @@ public class FilerClient extends FilerGrpcClient {
if (entry == null) {
return createEntry(
parent,
newFileEntry(name, mode, uid, gid, userName, groupNames).build()
newFileEntry(name, modifiedTimeSecond, mode, uid, gid, userName, groupNames).build()
);
}
long now = System.currentTimeMillis() / 1000L;
FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder()
.setMtime(now)
.setUid(uid)
.setGid(gid)
.setUserName(userName)
.clearGroupName()
.addAllGroupName(Arrays.asList(groupNames));
FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder();
if (modifiedTimeSecond>0) {
attr.setMtime(modifiedTimeSecond);
}
if (uid>0) {
attr.setUid(uid);
}
if (gid>0) {
attr.setGid(gid);
}
if (userName!=null) {
attr.setUserName(userName);
}
if (groupNames!=null) {
attr.clearGroupName().addAllGroupName(Arrays.asList(groupNames));
}
return updateEntry(parent, entry.toBuilder().setAttributes(attr).build());
}
@ -188,17 +210,15 @@ public class FilerClient extends FilerGrpcClient {
.addAllGroupName(Arrays.asList(groupNames)));
}
public FilerProto.Entry.Builder newFileEntry(String name, int mode,
public FilerProto.Entry.Builder newFileEntry(String name, long modifiedTimeSecond, int mode,
int uid, int gid, String userName, String[] groupNames) {
long now = System.currentTimeMillis() / 1000L;
return FilerProto.Entry.newBuilder()
.setName(name)
.setIsDirectory(false)
.setAttributes(FilerProto.FuseAttributes.newBuilder()
.setMtime(now)
.setCrtime(now)
.setMtime(modifiedTimeSecond)
.setCrtime(modifiedTimeSecond)
.setUid(uid)
.setGid(gid)
.setFileMode(mode)

5
other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java

@ -6,6 +6,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
@ -34,6 +35,10 @@ public class SeaweedInputStream extends InputStream {
this.entry = filerClient.lookupEntry(
SeaweedOutputStream.getParentDirectory(fullpath),
SeaweedOutputStream.getFileName(fullpath));
if(entry == null){
throw new FileNotFoundException();
}
this.contentLength = SeaweedRead.fileSize(entry);
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());

11
other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java

@ -16,8 +16,17 @@ public class SeaweedFilerTest {
filerClient.mkdirs("/new_folder", 0755);
filerClient.touch("/new_folder/new_empty_file", 0755);
filerClient.touch("/new_folder/new_empty_file2", 0755);
if(!filerClient.exists("/new_folder/new_empty_file")){
System.out.println("/new_folder/new_empty_file should exists");
}
filerClient.rm("/new_folder/new_empty_file", false, true);
filerClient.rm("/new_folder", true, true);
if(filerClient.exists("/new_folder/new_empty_file")){
System.out.println("/new_folder/new_empty_file should not exists");
}
if(!filerClient.exists("/")){
System.out.println("/ should exists");
}
}
}

22
other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile2.java

@ -0,0 +1,22 @@
package com.seaweedfs.examples;
import com.google.common.io.Files;
import seaweedfs.client.FilerClient;
import seaweedfs.client.SeaweedOutputStream;
import java.io.File;
import java.io.IOException;
public class ExampleWriteFile2 {
public static void main(String[] args) throws IOException {
FilerClient filerClient = new FilerClient("localhost", 18888);
SeaweedOutputStream seaweedOutputStream = new SeaweedOutputStream(filerClient, "/test/1");
Files.copy(new File("/etc/resolv.conf"), seaweedOutputStream);
seaweedOutputStream.close();
}
}

2
unmaintained/repeated_vacuum/repeated_vacuum.go

@ -52,7 +52,7 @@ func main() {
}
func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{
assignResult, err := operation.Assign(func() string { return *master }, grpcDialOption, &operation.VolumeAssignRequest{
Count: 1,
Replication: *replication,
})

2
weed/Makefile

@ -16,7 +16,7 @@ debug_shell:
debug_mount:
go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets
debug_server:
go build -gcflags="all=-N -l"

2
weed/command/command.go

@ -22,9 +22,11 @@ var Commands = []*Command{
cmdFilerReplicate,
cmdFilerSynchronize,
cmdFix,
cmdGateway,
cmdMaster,
cmdMount,
cmdS3,
cmdIam,
cmdMsgBroker,
cmdScaffold,
cmdServer,

2
weed/command/export.go

@ -215,7 +215,7 @@ func runExport(cmd *Command, args []string) bool {
err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner)
if err != nil && err != io.EOF {
glog.Fatalf("Export Volume File [ERROR] %s\n", err)
glog.Errorf("Export Volume File [ERROR] %s\n", err)
}
return true
}

27
weed/command/filer.go

@ -25,6 +25,8 @@ var (
filerS3Options S3Options
filerStartWebDav *bool
filerWebDavOptions WebDavOption
filerStartIam *bool
filerIamOptions IamOptions
)
type FilerOptions struct {
@ -91,6 +93,10 @@ func init() {
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
// start iam on filer
filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service")
filerIamOptions.port = cmdFiler.Flag.Int("iam.port", 8111, "iam server http listen port")
}
var cmdFiler = &Command{
@ -108,7 +114,7 @@ var cmdFiler = &Command{
GET /path/to/
The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order.
If the "filer.toml" is not found, an embedded filer store will be craeted under "-defaultStoreDir".
If the "filer.toml" is not found, an embedded filer store will be created under "-defaultStoreDir".
The example filer.toml configuration file can be generated by "weed scaffold -config=filer"
@ -121,22 +127,33 @@ func runFiler(cmd *Command, args []string) bool {
go stats_collect.StartMetricsServer(*f.metricsHttpPort)
filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
startDelay := time.Duration(2)
if *filerStartS3 {
filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
filerS3Options.filer = &filerAddress
go func() {
time.Sleep(2 * time.Second)
time.Sleep(startDelay * time.Second)
filerS3Options.startS3Server()
}()
startDelay++
}
if *filerStartWebDav {
filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
filerWebDavOptions.filer = &filerAddress
go func() {
time.Sleep(2 * time.Second)
time.Sleep(startDelay * time.Second)
filerWebDavOptions.startWebDav()
}()
startDelay++
}
if *filerStartIam {
filerIamOptions.filer = &filerAddress
filerIamOptions.masters = f.masters
go func() {
time.Sleep(startDelay * time.Second)
filerIamOptions.startIamServer()
}()
}
f.startFiler()

2
weed/command/filer_cat.go

@ -110,7 +110,7 @@ func runFilerCat(cmd *Command, args []string) bool {
filerCat.filerClient = client
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false)
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
})

113
weed/command/filer_copy.go

@ -207,16 +207,6 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
}
mode := fi.Mode()
if mode.IsDir() {
files, _ := ioutil.ReadDir(fileOrDir)
for _, subFileOrDir := range files {
if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
return err
}
}
return nil
}
uid, gid := util.GetFileUidGid(fi)
fileCopyTaskChan <- FileCopyTask{
@ -228,6 +218,16 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
gid: gid,
}
if mode.IsDir() {
files, _ := ioutil.ReadDir(fileOrDir)
println("checking directory", fileOrDir)
for _, subFileOrDir := range files {
if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
return err
}
}
}
return nil
}
@ -293,38 +293,42 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
// upload the file content
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
data, err := ioutil.ReadAll(f)
if err != nil {
return err
}
var mimeType string
var chunks []*filer_pb.FileChunk
var assignResult *filer_pb.AssignVolumeResponse
var assignError error
if task.fileSize > 0 {
if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 {
mimeType = detectMimeType(f)
data, err := ioutil.ReadAll(f)
if err != nil {
return err
}
// assign a volume
err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
DiskType: *worker.options.diskType,
Path: task.destinationUrlPath,
}
err = util.Retry("assignVolume", func() error {
return pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
assignResult, assignError = client.AssignVolume(context.Background(), request)
if assignError != nil {
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
}
if assignResult.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
}
return nil
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
DiskType: *worker.options.diskType,
Path: task.destinationUrlPath,
}
assignResult, assignError = client.AssignVolume(context.Background(), request)
if assignError != nil {
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
}
if assignResult.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
}
return nil
})
})
if err != nil {
return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
@ -402,31 +406,30 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
// assign a volume
var assignResult *filer_pb.AssignVolumeResponse
var assignError error
err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
DiskType: *worker.options.diskType,
Path: task.destinationUrlPath + fileName,
}
assignResult, assignError = client.AssignVolume(context.Background(), request)
if assignError != nil {
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
}
if assignResult.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
}
return nil
err := util.Retry("assignVolume", func() error {
return pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
DiskType: *worker.options.diskType,
Path: task.destinationUrlPath + fileName,
}
assignResult, assignError = client.AssignVolume(context.Background(), request)
if assignError != nil {
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
}
if assignResult.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
}
return nil
})
})
if err != nil {
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
if err != nil {
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
if collection == "" {

93
weed/command/gateway.go

@ -0,0 +1,93 @@
package command
import (
"net/http"
"strconv"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
gatewayOptions GatewayOptions
)
type GatewayOptions struct {
masters *string
filers *string
bindIp *string
port *int
maxMB *int
}
func init() {
cmdGateway.Run = runGateway // break init cycle
gatewayOptions.masters = cmdGateway.Flag.String("master", "localhost:9333", "comma-separated master servers")
gatewayOptions.filers = cmdGateway.Flag.String("filer", "localhost:8888", "comma-separated filer servers")
gatewayOptions.bindIp = cmdGateway.Flag.String("ip.bind", "localhost", "ip address to bind to")
gatewayOptions.port = cmdGateway.Flag.Int("port", 5647, "gateway http listen port")
gatewayOptions.maxMB = cmdGateway.Flag.Int("maxMB", 4, "split files larger than the limit")
}
var cmdGateway = &Command{
UsageLine: "gateway -port=8888 -master=<ip:port>[,<ip:port>]* -filer=<ip:port>[,<ip:port>]*",
Short: "start a gateway server that points to a list of master servers or a list of filers",
Long: `start a gateway server which accepts REST operation to write any blobs, files, or topic messages.
POST /blobs/
upload the blob and return a chunk id
DELETE /blobs/<chunk_id>
delete a chunk id
/*
POST /files/path/to/a/file
save /path/to/a/file on filer
DELETE /files/path/to/a/file
delete /path/to/a/file on filer
POST /topics/topicName
save on filer to /topics/topicName/<ds>/ts.json
*/
`,
}
func runGateway(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
gatewayOptions.startGateway()
return true
}
func (gw *GatewayOptions) startGateway() {
defaultMux := http.NewServeMux()
_, gws_err := weed_server.NewGatewayServer(defaultMux, &weed_server.GatewayOption{
Masters: strings.Split(*gw.masters, ","),
Filers: strings.Split(*gw.filers, ","),
MaxMB: *gw.maxMB,
})
if gws_err != nil {
glog.Fatalf("Gateway startup error: %v", gws_err)
}
glog.V(0).Infof("Start Seaweed Gateway %s at %s:%d", util.Version(), *gw.bindIp, *gw.port)
gatewayListener, e := util.NewListener(
*gw.bindIp+":"+strconv.Itoa(*gw.port),
time.Duration(10)*time.Second,
)
if e != nil {
glog.Fatalf("Filer listener error: %v", e)
}
httpS := &http.Server{Handler: defaultMux}
if err := httpS.Serve(gatewayListener); err != nil {
glog.Fatalf("Gateway Fail to serve: %v", e)
}
}

97
weed/command/iam.go

@ -0,0 +1,97 @@
package command
import (
"context"
"fmt"
"net/http"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/iamapi"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/gorilla/mux"
"time"
)
var (
iamStandaloneOptions IamOptions
)
type IamOptions struct {
filer *string
masters *string
port *int
}
func init() {
cmdIam.Run = runIam // break init cycle
iamStandaloneOptions.filer = cmdIam.Flag.String("filer", "localhost:8888", "filer server address")
iamStandaloneOptions.masters = cmdIam.Flag.String("master", "localhost:9333", "comma-separated master servers")
iamStandaloneOptions.port = cmdIam.Flag.Int("port", 8111, "iam server http listen port")
}
var cmdIam = &Command{
UsageLine: "iam [-port=8111] [-filer=<ip:port>] [-masters=<ip:port>,<ip:port>]",
Short: "start a iam API compatible server",
Long: "start a iam API compatible server.",
}
func runIam(cmd *Command, args []string) bool {
return iamStandaloneOptions.startIamServer()
}
func (iamopt *IamOptions) startIamServer() bool {
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*iamopt.filer)
if err != nil {
glog.Fatal(err)
return false
}
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for {
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
glog.V(0).Infof("IAM read filer configuration: %s", resp)
return nil
})
if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
break
}
}
router := mux.NewRouter().SkipClean(true)
_, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{
Filer: *iamopt.filer,
Port: *iamopt.port,
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: grpcDialOption,
})
glog.V(0).Info("NewIamApiServer created")
if iamApiServer_err != nil {
glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err)
}
httpS := &http.Server{Handler: router}
listenAddress := fmt.Sprintf(":%d", *iamopt.port)
iamApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil {
glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err)
}
glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port)
if err = httpS.Serve(iamApiListener); err != nil {
glog.Fatalf("IAM API Server Fail to serve: %v", err)
}
return true
}

7
weed/command/mount_std.go

@ -149,8 +149,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
fuse.Subtype("seaweedfs"),
// fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders
fuse.NoAppleXattr(),
fuse.NoBrowse(),
fuse.AutoXattr(),
fuse.ExclCreate(),
fuse.DaemonTimeout("3600"),
fuse.AllowSUID(),
@ -169,6 +167,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
if *option.nonempty {
options = append(options, fuse.AllowNonEmptyMount())
}
if *option.readOnly {
options = append(options, fuse.ReadOnly())
}
// find mount point
mountRoot := filerMountRootPath
@ -193,7 +194,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
CacheDir: *option.cacheDir,
CacheSizeMB: *option.cacheSizeMB,
DataCenter: *option.dataCenter,
EntryCacheTtl: 3 * time.Second,
MountUid: uid,
MountGid: gid,
MountMode: mountMode,
@ -203,7 +203,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
VolumeServerAccess: *mountOptions.volumeServerAccess,
Cipher: cipher,
UidGidMapper: uidGidMapper,
ReadOnly: *option.readOnly,
})
// mount

2
weed/command/scaffold.go

@ -281,7 +281,7 @@ index.max_result_window = 10000
# Make sure they are not the same if using the same store type!
# 4. Set enabled to true
#
# The following is just using cassandra as an example
# The following is just using redis as an example
##########################
[redis2.tmp]
enabled = false

9
weed/command/server.go

@ -58,7 +58,8 @@ var (
serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.")
volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.")
volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly (deprecated, use minFreeSpace instead).")
volumeMinFreeSpace = cmdServer.Flag.String("volume.minFreeSpace", "", "min free disk space (value<=100 as percentage like 1, other as human readable bytes, like 10GiB). Low disk space will mark all volumes as ReadOnly.")
serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
// pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
@ -244,8 +245,8 @@ func runServer(cmd *Command, args []string) bool {
// start volume server
if *isStartingVolumeServer {
go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent)
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, minFreeSpaces)
}
if *isStartingMasterServer {
@ -253,6 +254,4 @@ func runServer(cmd *Command, args []string) bool {
}
select {}
return true
}

28
weed/command/volume.go

@ -57,7 +57,6 @@ type VolumeServerOptions struct {
compactionMBPerSecond *int
fileSizeLimitMB *int
concurrentUploadLimitMB *int
minFreeSpacePercents []float32
pprof *bool
preStopSeconds *int
metricsHttpPort *int
@ -105,7 +104,8 @@ var (
volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
maxVolumeCounts = cmdVolume.Flag.String("max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.")
volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
minFreeSpacePercent = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.")
minFreeSpacePercent = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly (deprecated, use minFreeSpace instead).")
minFreeSpace = cmdVolume.Flag.String("minFreeSpace", "", "min free disk space (value<=100 as percentage like 1, other as human readable bytes, like 10GiB). Low disk space will mark all volumes as ReadOnly.")
)
func runVolume(cmd *Command, args []string) bool {
@ -120,12 +120,13 @@ func runVolume(cmd *Command, args []string) bool {
go stats_collect.StartMetricsServer(*v.metricsHttpPort)
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent)
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, minFreeSpaces)
return true
}
func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption, minFreeSpacePercent string) {
func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string, minFreeSpaces []util.MinFreeSpace) {
// Set multiple folders and each folder's max volume count limit'
v.folders = strings.Split(volumeFolders, ",")
@ -153,22 +154,13 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
}
// set minFreeSpacePercent
minFreeSpacePercentStrings := strings.Split(minFreeSpacePercent, ",")
for _, freeString := range minFreeSpacePercentStrings {
if value, e := strconv.ParseFloat(freeString, 32); e == nil {
v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value))
} else {
glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
}
}
if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 {
if len(minFreeSpaces) == 1 && len(v.folders) > 1 {
for i := 0; i < len(v.folders)-1; i++ {
v.minFreeSpacePercents = append(v.minFreeSpacePercents, v.minFreeSpacePercents[0])
minFreeSpaces = append(minFreeSpaces, minFreeSpaces[0])
}
}
if len(v.folders) != len(v.minFreeSpacePercents) {
glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
if len(v.folders) != len(minFreeSpaces) {
glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(minFreeSpaces))
}
// set disk types
@ -231,7 +223,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
*v.ip, *v.port, *v.publicUrl,
v.folders, v.folderMaxLimits, v.minFreeSpacePercents, diskTypes,
v.folders, v.folderMaxLimits, minFreeSpaces, diskTypes,
*v.idxFolder,
volumeNeedleMapKind,
strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,

4
weed/filer/abstract_sql/abstract_sql_store.go

@ -276,7 +276,9 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
}
}
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), fullpath)
glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
if err != nil {
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
}

6
weed/filer/filechunks.go

@ -2,7 +2,6 @@ package filer
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"math"
@ -43,12 +42,11 @@ func ETagEntry(entry *Entry) (etag string) {
func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
if len(chunks) == 1 {
return chunks[0].ETag
return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag))
}
md5_digests := [][]byte{}
for _, c := range chunks {
md5_decoded, _ := hex.DecodeString(c.ETag)
md5_digests = append(md5_digests, md5_decoded)
md5_digests = append(md5_digests, util.Base64Md5ToBytes(c.ETag))
}
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
}

2
weed/filer/filer_buckets.go

@ -29,7 +29,7 @@ func (f *Filer) LoadBuckets() {
limit := int64(math.MaxInt32)
entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "")
entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "", "")
if err != nil {
glog.V(1).Infof("no buckets found: %v", err)

1
weed/filer/filer_conf.go

@ -18,6 +18,7 @@ const (
FilerConfName = "filer.conf"
IamConfigDirecotry = "/etc/iam"
IamIdentityFile = "identity.json"
IamPoliciesFile = "policies.json"
)
type FilerConf struct {

2
weed/filer/filer_delete_entry.go

@ -73,7 +73,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
includeLastFile := false
if !isDeletingBucket {
for {
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "")
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)

4
weed/filer/filer_notify.go

@ -116,13 +116,13 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
sizeBuf := make([]byte, 4)
startTsNs := startTime.UnixNano()
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "")
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "", "")
if listDayErr != nil {
return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
}
for _, dayEntry := range dayEntries {
// println("checking day", dayEntry.FullPath)
hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "")
hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "", "")
if listHourMinuteErr != nil {
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
}

2
weed/filer/filer_on_meta_event.go

@ -52,7 +52,7 @@ func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataR
func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) {
var buf bytes.Buffer
err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64, false)
err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64)
if err != nil {
return nil, err
}

34
weed/filer/filer_search.go

@ -20,9 +20,9 @@ func splitPattern(pattern string) (prefix string, restPattern string) {
}
// For now, prefix and namePattern are mutually exclusive
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string) (entries []*Entry, hasMore bool, err error) {
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string) (entries []*Entry, hasMore bool, err error) {
_, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, func(entry *Entry) bool {
_, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, namePatternExclude, func(entry *Entry) bool {
entries = append(entries, entry)
return true
})
@ -36,7 +36,7 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, start
}
// For now, prefix and namePattern are mutually exclusive
func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
p = p[0 : len(p)-1]
}
@ -47,30 +47,38 @@ func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath,
}
var missedCount int64
missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, eachEntryFunc)
missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, namePatternExclude, eachEntryFunc)
for missedCount > 0 && err == nil {
missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, eachEntryFunc)
missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, namePatternExclude, eachEntryFunc)
}
return
}
func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) {
func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) {
if len(restNamePattern) == 0 {
if len(restNamePattern) == 0 && len(namePatternExclude) == 0 {
lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
return 0, lastFileName, err
}
lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
nameToTest := strings.ToLower(entry.Name())
if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && matched {
if !eachEntryFunc(entry) {
return false
nameToTest := entry.Name()
if len(namePatternExclude) > 0 {
if matched, matchErr := filepath.Match(namePatternExclude, nameToTest); matchErr == nil && matched {
missedCount++
return true
}
} else {
missedCount++
}
if len(restNamePattern) > 0 {
if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && !matched {
missedCount++
return true
}
}
if !eachEntryFunc(entry) {
return false
}
return true
})

2
weed/filer/filerstore_wrapper.go

@ -149,8 +149,8 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "find").Observe(time.Since(start).Seconds())
}()
glog.V(4).Infof("FindEntry %s", fp)
entry, err = actualStore.FindEntry(ctx, fp)
glog.V(4).Infof("FindEntry %s: %v", fp, err)
if err != nil {
return nil, err
}

6
weed/filer/leveldb/leveldb_store_test.go

@ -51,14 +51,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@ -77,7 +77,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background()
// checking one upper directory
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return

6
weed/filer/leveldb2/leveldb2_store_test.go

@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background()
// checking one upper directory
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return

6
weed/filer/leveldb3/leveldb3_store_test.go

@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background()
// checking one upper directory
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return

10
weed/filer/read_write.go

@ -27,7 +27,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed
return err
}
return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false)
return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
}
@ -41,8 +41,12 @@ func ReadContent(filerAddress string, dir, name string) ([]byte, error) {
}
func SaveAs(host string, port int, dir, name string, contentType string, byteBuffer *bytes.Buffer) error {
target := fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name)
var target string
if port == 0 {
target = fmt.Sprintf("http://%s%s/%s", host, dir, name)
} else {
target = fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name)
}
// set the HTTP method, url, and request body
req, err := http.NewRequest(http.MethodPut, target, byteBuffer)

22
weed/filer/reader_at.go

@ -139,13 +139,15 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
}
glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
var buffer []byte
buffer, err = c.readFromWholeChunkData(chunk, nextChunk)
bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
bufferLength := chunkStop - chunkStart
buffer, err = c.readChunkSlice(chunk, nextChunk, uint64(bufferOffset), uint64(bufferLength))
if err != nil {
glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
return
}
bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
copied := copy(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], buffer[bufferOffset:bufferOffset+chunkStop-chunkStart])
copied := copy(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], buffer)
n += copied
startOffset, remaining = startOffset+int64(copied), remaining-int64(copied)
}
@ -167,6 +169,20 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
}
func (c *ChunkReadAt) readChunkSlice(chunkView *ChunkView, nextChunkViews *ChunkView, offset, length uint64) ([]byte, error) {
chunkSlice := c.chunkCache.GetChunkSlice(chunkView.FileId, offset, length)
if len(chunkSlice) > 0 {
return chunkSlice, nil
}
chunkData, err := c.readFromWholeChunkData(chunkView, nextChunkViews)
if err != nil {
return nil, err
}
wanted := min(int64(length), int64(len(chunkData))-int64(offset))
return chunkData[offset : int64(offset)+wanted], nil
}
func (c *ChunkReadAt) readFromWholeChunkData(chunkView *ChunkView, nextChunkViews ...*ChunkView) (chunkData []byte, err error) {
if c.lastChunkFileId == chunkView.FileId {

5
weed/filer/reader_at_test.go

@ -20,6 +20,11 @@ func (m *mockChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) {
}
return data
}
func (m *mockChunkCache) GetChunkSlice(fileId string, offset, length uint64) []byte {
return nil
}
func (m *mockChunkCache) SetChunk(fileId string, data []byte) {
}

6
weed/filer/rocksdb/rocksdb_store_test.go

@ -53,14 +53,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@ -79,7 +79,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background()
// checking one upper directory
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return

26
weed/filer/stream.go

@ -3,7 +3,6 @@ package filer
import (
"bytes"
"fmt"
"golang.org/x/sync/errgroup"
"io"
"math"
"strings"
@ -14,7 +13,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, isCheck bool) error {
func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks)
chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size)
@ -34,17 +33,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
fileId2Url[chunkView.FileId] = urlStrings
}
if isCheck {
// Pre-check all chunkViews urls
gErr := new(errgroup.Group)
CheckAllChunkViews(chunkViews, &fileId2Url, gErr)
if err := gErr.Wait(); err != nil {
glog.Errorf("check all chunks: %v", err)
return fmt.Errorf("check all chunks: %v", err)
}
return nil
}
for _, chunkView := range chunkViews {
urlStrings := fileId2Url[chunkView.FileId]
@ -53,7 +41,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
glog.Errorf("read chunk: %v", err)
return fmt.Errorf("read chunk: %v", err)
}
_, err = w.Write(data)
if err != nil {
glog.Errorf("write chunk: %v", err)
@ -65,17 +52,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
}
func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) {
for _, chunkView := range chunkViews {
urlStrings := (*fileId2Url)[chunkView.FileId]
glog.V(9).Infof("Check chunk: %+v\n url: %v", chunkView, urlStrings)
gErr.Go(func() error {
_, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
return err
})
}
}
// ---------------- ReadAllReader ----------------------------------
func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) {

305
weed/filesys/dir.go

@ -24,9 +24,12 @@ type Dir struct {
wfs *WFS
entry *filer_pb.Entry
parent *Dir
id uint64
}
var _ = fs.Node(&Dir{})
var _ = fs.NodeIdentifier(&Dir{})
var _ = fs.NodeCreater(&Dir{})
var _ = fs.NodeMknoder(&Dir{})
var _ = fs.NodeMkdirer(&Dir{})
@ -42,6 +45,13 @@ var _ = fs.NodeRemovexattrer(&Dir{})
var _ = fs.NodeListxattrer(&Dir{})
var _ = fs.NodeForgetter(&Dir{})
func (dir *Dir) Id() uint64 {
if dir.parent == nil {
return 1
}
return dir.id
}
func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
// https://github.com/bazil/fuse/issues/196
@ -53,17 +63,18 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
return nil
}
if err := dir.maybeLoadEntry(); err != nil {
entry, err := dir.maybeLoadEntry()
if err != nil {
glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
return err
}
// attr.Inode = util.FullPath(dir.FullPath()).AsInode()
attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir
attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0)
attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0)
attr.Gid = dir.entry.Attributes.Gid
attr.Uid = dir.entry.Attributes.Uid
attr.Inode = dir.Id()
attr.Mode = os.FileMode(entry.Attributes.FileMode) | os.ModeDir
attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
attr.Gid = entry.Attributes.Gid
attr.Uid = entry.Attributes.Uid
glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
@ -74,16 +85,18 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f
glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil {
entry, err := dir.maybeLoadEntry()
if err != nil {
return err
}
return getxattr(dir.entry, req, resp)
return getxattr(entry, req, resp)
}
func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
// attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
attr.Valid = time.Second
attr.Inode = dir.Id()
attr.Uid = dir.wfs.option.MountUid
attr.Gid = dir.wfs.option.MountGid
attr.Mode = dir.wfs.option.MountMode
@ -102,73 +115,88 @@ func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
return nil
}
func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node {
f := dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node {
return &File{
Name: name,
dir: dir,
wfs: dir.wfs,
entry: entry,
entryViewCache: nil,
}
})
f.(*File).dir = dir // in case dir node was created later
return f
func (dir *Dir) newFile(name string) fs.Node {
fileFullPath := util.NewFullPath(dir.FullPath(), name)
fileId := fileFullPath.AsInode()
dir.wfs.handlesLock.Lock()
existingHandle, found := dir.wfs.handles[fileId]
dir.wfs.handlesLock.Unlock()
if found {
glog.V(4).Infof("newFile found opened file handle: %+v", fileFullPath)
return existingHandle.f
}
return &File{
Name: name,
dir: dir,
wfs: dir.wfs,
id: fileId,
}
}
func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node {
func (dir *Dir) newDirectory(fullpath util.FullPath) fs.Node {
return &Dir{name: fullpath.Name(), wfs: dir.wfs, parent: dir, id: fullpath.AsInode()}
d := dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node {
return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir}
})
d.(*Dir).parent = dir // in case dir node was created later
return d
}
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
if dir.wfs.option.ReadOnly {
return nil, nil, fuse.EPERM
}
request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, req.Flags&fuse.OpenExclusive != 0)
exclusive := req.Flags&fuse.OpenExclusive != 0
isDirectory := req.Mode&os.ModeDir > 0
if err != nil {
return nil, nil, err
if exclusive || isDirectory {
_, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, exclusive)
if err != nil {
return nil, nil, err
}
}
var node fs.Node
if request.Entry.IsDirectory {
node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry)
if isDirectory {
node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name))
return node, nil, nil
}
node = dir.newFile(req.Name, request.Entry)
node = dir.newFile(req.Name)
file := node.(*File)
fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
file.entry = &filer_pb.Entry{
Name: req.Name,
IsDirectory: req.Mode&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
Uid: req.Uid,
Gid: req.Gid,
Collection: dir.wfs.option.Collection,
Replication: dir.wfs.option.Replication,
TtlSec: dir.wfs.option.TtlSec,
},
}
file.dirtyMetadata = true
fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid, req.Flags&fuse.OpenWriteOnly > 0)
return file, fh, nil
}
func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
if dir.wfs.option.ReadOnly {
return nil, fuse.EPERM
}
request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false)
_, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false)
if err != nil {
return nil, err
}
var node fs.Node
node = dir.newFile(req.Name, request.Entry)
node = dir.newFile(req.Name)
return node, nil
}
func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exlusive bool) (*filer_pb.CreateEntryRequest, error) {
func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exclusive bool) (*filer_pb.CreateEntryRequest, error) {
dirFullPath := dir.FullPath()
request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(),
Directory: dirFullPath,
Entry: &filer_pb.Entry{
Name: name,
IsDirectory: mode&os.ModeDir > 0,
@ -183,10 +211,10 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex
TtlSec: dir.wfs.option.TtlSec,
},
},
OExcl: exlusive,
OExcl: exclusive,
Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("create %s/%s", dir.FullPath(), name)
glog.V(1).Infof("create %s/%s", dirFullPath, name)
err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -197,11 +225,14 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex
if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST
}
glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), name, err)
glog.V(0).Infof("create %s/%s: %v", dirFullPath, name, err)
return fuse.EIO
}
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
glog.Errorf("local InsertEntry dir %s/%s: %v", dirFullPath, name, err)
return fuse.EIO
}
return nil
})
@ -210,10 +241,6 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
if dir.wfs.option.ReadOnly {
return nil, fuse.EPERM
}
glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
newEntry := &filer_pb.Entry{
@ -228,35 +255,40 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
},
}
dirFullPath := dir.FullPath()
err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir.wfs.mapPbIdFromLocalToFiler(newEntry)
defer dir.wfs.mapPbIdFromFilerToLocal(newEntry)
request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(),
Directory: dirFullPath,
Entry: newEntry,
Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("mkdir: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err)
return err
}
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
glog.Errorf("local mkdir dir %s/%s: %v", dirFullPath, req.Name, err)
return fuse.EIO
}
return nil
})
if err == nil {
node := dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), newEntry)
node := dir.newDirectory(util.NewFullPath(dirFullPath, req.Name))
return node, nil
}
glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err)
return nil, fuse.EIO
}
@ -272,40 +304,41 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
return nil, fuse.EIO
}
cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
localEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
if cacheErr == filer_pb.ErrNotFound {
return nil, fuse.ENOENT
}
entry := cachedEntry.ToProtoEntry()
if entry == nil {
if localEntry == nil {
// glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath)
entry, err := filer_pb.GetEntry(dir.wfs, fullFilePath)
if err != nil {
glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
localEntry = filer.FromPbEntry(string(dirPath), entry)
} else {
glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath)
}
if entry != nil {
if entry.IsDirectory {
node = dir.newDirectory(fullFilePath, entry)
if localEntry != nil {
if localEntry.IsDirectory() {
node = dir.newDirectory(fullFilePath)
} else {
node = dir.newFile(req.Name, entry)
node = dir.newFile(req.Name)
}
// resp.EntryValid = time.Second
// resp.Attr.Inode = fullFilePath.AsInode()
resp.Attr.Inode = fullFilePath.AsInode()
resp.Attr.Valid = time.Second
resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode)
resp.Attr.Gid = entry.Attributes.Gid
resp.Attr.Uid = entry.Attributes.Uid
if entry.HardLinkCounter > 0 {
resp.Attr.Nlink = uint32(entry.HardLinkCounter)
resp.Attr.Size = localEntry.FileSize
resp.Attr.Mtime = localEntry.Attr.Mtime
resp.Attr.Crtime = localEntry.Attr.Crtime
resp.Attr.Mode = localEntry.Attr.Mode
resp.Attr.Gid = localEntry.Attr.Gid
resp.Attr.Uid = localEntry.Attr.Uid
if localEntry.HardLinkCounter > 0 {
resp.Attr.Nlink = uint32(localEntry.HardLinkCounter)
}
return node, nil
@ -320,15 +353,14 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
dirPath := util.FullPath(dir.FullPath())
glog.V(4).Infof("dir ReadDirAll %s", dirPath)
processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
if entry.IsDirectory {
dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir}
processEachEntryFn := func(entry *filer.Entry, isLast bool) {
if entry.IsDirectory() {
dirent := fuse.Dirent{Name: entry.Name(), Type: fuse.DT_Dir, Inode: dirPath.Child(entry.Name()).AsInode()}
ret = append(ret, dirent)
} else {
dirent := fuse.Dirent{Name: entry.Name, Type: findFileType(uint16(entry.Attributes.FileMode))}
dirent := fuse.Dirent{Name: entry.Name(), Type: findFileType(uint16(entry.Attr.Mode)), Inode: dirPath.Child(entry.Name()).AsInode()}
ret = append(ret, dirent)
}
return nil
}
if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
@ -336,7 +368,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
return nil, fuse.EIO
}
listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool {
processEachEntryFn(entry.ToProtoEntry(), false)
processEachEntryFn(entry, false)
return true
})
if listErr != nil {
@ -368,11 +400,6 @@ func findFileType(mode uint16) fuse.DirentType {
func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
if dir.wfs.option.ReadOnly {
return fuse.EPERM
}
if !req.Dir {
return dir.removeOneFile(req)
}
@ -383,40 +410,32 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
filePath := util.NewFullPath(dir.FullPath(), req.Name)
dirFullPath := dir.FullPath()
filePath := util.NewFullPath(dirFullPath, req.Name)
entry, err := filer_pb.GetEntry(dir.wfs, filePath)
if err != nil {
return err
}
if entry == nil {
return nil
}
// first, ensure the filer store can correctly delete
glog.V(3).Infof("remove file: %v", req)
isDeleteData := entry.HardLinkCounter <= 1
err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
isDeleteData := entry != nil && entry.HardLinkCounter <= 1
err = filer_pb.Remove(dir.wfs, dirFullPath, req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
if err != nil {
glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
glog.V(3).Infof("not found remove file %s: %v", filePath, err)
return fuse.ENOENT
}
// then, delete meta cache and fsNode cache
dir.wfs.metaCache.DeleteEntry(context.Background(), filePath)
// clear entry inside the file
fsNode := dir.wfs.fsNodeCache.GetFsNode(filePath)
dir.wfs.fsNodeCache.DeleteFsNode(filePath)
if fsNode != nil {
if file, ok := fsNode.(*File); ok {
file.clearEntry()
}
if err = dir.wfs.metaCache.DeleteEntry(context.Background(), filePath); err != nil {
glog.V(3).Infof("local DeleteEntry %s: %v", filePath, err)
return fuse.ESTALE
}
// remove current file handle if any
dir.wfs.handlesLock.Lock()
defer dir.wfs.handlesLock.Unlock()
inodeId := util.NewFullPath(dir.FullPath(), req.Name).AsInode()
inodeId := filePath.AsInode()
delete(dir.wfs.handles, inodeId)
return nil
@ -425,20 +444,20 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
dirFullPath := dir.FullPath()
glog.V(3).Infof("remove directory entry: %v", req)
ignoreRecursiveErr := true // ignore recursion error since the OS should manage it
err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
err := filer_pb.Remove(dir.wfs, dirFullPath, req.Name, true, true, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
if err != nil {
glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
glog.V(0).Infof("remove %s/%s: %v", dirFullPath, req.Name, err)
if strings.Contains(err.Error(), "non-empty") {
return fuse.EEXIST
}
return fuse.ENOENT
}
t := util.NewFullPath(dir.FullPath(), req.Name)
t := util.NewFullPath(dirFullPath, req.Name)
dir.wfs.metaCache.DeleteEntry(context.Background(), t)
dir.wfs.fsNodeCache.DeleteFsNode(t)
return nil
@ -446,73 +465,64 @@ func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
if dir.wfs.option.ReadOnly {
return fuse.EPERM
}
glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req)
if err := dir.maybeLoadEntry(); err != nil {
entry, err := dir.maybeLoadEntry()
if err != nil {
return err
}
if req.Valid.Mode() {
dir.entry.Attributes.FileMode = uint32(req.Mode)
entry.Attributes.FileMode = uint32(req.Mode)
}
if req.Valid.Uid() {
dir.entry.Attributes.Uid = req.Uid
entry.Attributes.Uid = req.Uid
}
if req.Valid.Gid() {
dir.entry.Attributes.Gid = req.Gid
entry.Attributes.Gid = req.Gid
}
if req.Valid.Mtime() {
dir.entry.Attributes.Mtime = req.Mtime.Unix()
entry.Attributes.Mtime = req.Mtime.Unix()
}
return dir.saveEntry()
return dir.saveEntry(entry)
}
func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
if dir.wfs.option.ReadOnly {
return fuse.EPERM
}
glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil {
entry, err := dir.maybeLoadEntry()
if err != nil {
return err
}
if err := setxattr(dir.entry, req); err != nil {
if err := setxattr(entry, req); err != nil {
return err
}
return dir.saveEntry()
return dir.saveEntry(entry)
}
func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
if dir.wfs.option.ReadOnly {
return fuse.EPERM
}
glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil {
entry, err := dir.maybeLoadEntry()
if err != nil {
return err
}
if err := removexattr(dir.entry, req); err != nil {
if err := removexattr(entry, req); err != nil {
return err
}
return dir.saveEntry()
return dir.saveEntry(entry)
}
@ -520,11 +530,12 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil {
entry, err := dir.maybeLoadEntry()
if err != nil {
return err
}
if err := listxattr(dir.entry, req, resp); err != nil {
if err := listxattr(entry, req, resp); err != nil {
return err
}
@ -534,34 +545,25 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
func (dir *Dir) Forget() {
glog.V(4).Infof("Forget dir %s", dir.FullPath())
dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
}
func (dir *Dir) maybeLoadEntry() error {
if dir.entry == nil {
parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName()
entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name)
if err != nil {
return err
}
dir.entry = entry
}
return nil
func (dir *Dir) maybeLoadEntry() (*filer_pb.Entry, error) {
parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName()
return dir.wfs.maybeLoadEntry(parentDirPath, name)
}
func (dir *Dir) saveEntry() error {
func (dir *Dir) saveEntry(entry *filer_pb.Entry) error {
parentDir, name := util.FullPath(dir.FullPath()).DirAndName()
return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir.wfs.mapPbIdFromLocalToFiler(dir.entry)
defer dir.wfs.mapPbIdFromFilerToLocal(dir.entry)
dir.wfs.mapPbIdFromLocalToFiler(entry)
defer dir.wfs.mapPbIdFromFilerToLocal(entry)
request := &filer_pb.UpdateEntryRequest{
Directory: parentDir,
Entry: dir.entry,
Entry: entry,
Signatures: []int32{dir.wfs.signature},
}
@ -572,7 +574,10 @@ func (dir *Dir) saveEntry() error {
return fuse.EIO
}
dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
if err := dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.ESTALE
}
return nil
})

21
weed/filesys/dir_link.go

@ -24,10 +24,6 @@ const (
func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) {
if dir.wfs.option.ReadOnly {
return nil, fuse.EPERM
}
oldFile, ok := old.(*File)
if !ok {
glog.Errorf("old node is not a file: %+v", old)
@ -35,11 +31,11 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
if _, err := oldFile.maybeLoadEntry(ctx); err != nil {
oldEntry, err := oldFile.maybeLoadEntry(ctx)
if err != nil {
return nil, err
}
oldEntry := oldFile.getEntry()
if oldEntry == nil {
return nil, fuse.EIO
}
@ -72,7 +68,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
}
// apply changes to the filer, and also apply to local metaCache
err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
@ -97,11 +93,8 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
}
// create new file node
newNode := dir.newFile(req.NewName, request.Entry)
newNode := dir.newFile(req.NewName)
newFile := newNode.(*File)
if _, err := newFile.maybeLoadEntry(ctx); err != nil {
return nil, err
}
return newFile, err
@ -109,10 +102,6 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
if dir.wfs.option.ReadOnly {
return nil, fuse.EPERM
}
glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{
@ -147,7 +136,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
return nil
})
symlink := dir.newFile(req.NewName, request.Entry)
symlink := dir.newFile(req.NewName)
return symlink, err

21
weed/filesys/dir_rename.go

@ -13,10 +13,6 @@ import (
func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error {
if dir.wfs.option.ReadOnly {
return fuse.EPERM
}
newDir := newDirectory.(*Dir)
newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
@ -68,19 +64,28 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
return fuse.EIO
}
// fmt.Printf("rename path: %v => %v\n", oldPath, newPath)
dir.wfs.fsNodeCache.Move(oldPath, newPath)
oldFsNode := NodeWithId(oldPath.AsInode())
newFsNode := NodeWithId(newPath.AsInode())
dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) {
if file, ok := internalNode.(*File); ok {
glog.V(4).Infof("internal node %s", file.Name)
file.Name = req.NewName
file.id = uint64(newFsNode)
}
})
// change file handle
dir.wfs.handlesLock.Lock()
defer dir.wfs.handlesLock.Unlock()
inodeId := oldPath.AsInode()
existingHandle, found := dir.wfs.handles[inodeId]
glog.V(4).Infof("has open filehandle %s: %v", oldPath, found)
if !found || existingHandle == nil {
return err
return nil
}
glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath)
delete(dir.wfs.handles, inodeId)
dir.wfs.handles[newPath.AsInode()] = existingHandle
return err
return nil
}

3
weed/filesys/dirty_page.go

@ -13,6 +13,7 @@ import (
type ContinuousDirtyPages struct {
intervals *ContinuousIntervals
f *File
fh *FileHandle
writeWaitGroup sync.WaitGroup
chunkAddLock sync.Mutex
lastErr error
@ -94,7 +95,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
defer pages.writeWaitGroup.Done()
reader = io.LimitReader(reader, size)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath(), pages.fh.writeOnly)(reader, pages.f.Name, offset)
if err != nil {
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
pages.lastErr = err

104
weed/filesys/file.go

@ -2,10 +2,8 @@ package filesys
import (
"context"
"io"
"os"
"sort"
"sync"
"time"
"github.com/seaweedfs/fuse"
@ -20,6 +18,7 @@ import (
const blockSize = 512
var _ = fs.Node(&File{})
var _ = fs.NodeIdentifier(&File{})
var _ = fs.NodeOpener(&File{})
var _ = fs.NodeFsyncer(&File{})
var _ = fs.NodeSetattrer(&File{})
@ -30,37 +29,37 @@ var _ = fs.NodeListxattrer(&File{})
var _ = fs.NodeForgetter(&File{})
type File struct {
Name string
dir *Dir
wfs *WFS
entry *filer_pb.Entry
entryLock sync.RWMutex
entryViewCache []filer.VisibleInterval
isOpen int
reader io.ReaderAt
dirtyMetadata bool
Name string
dir *Dir
wfs *WFS
entry *filer_pb.Entry
isOpen int
dirtyMetadata bool
id uint64
}
func (file *File) fullpath() util.FullPath {
return util.NewFullPath(file.dir.FullPath(), file.Name)
}
func (file *File) Id() uint64 {
return file.id
}
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr)
entry := file.getEntry()
if file.isOpen <= 0 || entry == nil {
if entry, err = file.maybeLoadEntry(ctx); err != nil {
return err
}
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
return err
}
if entry == nil {
return fuse.ENOENT
}
// attr.Inode = file.fullpath().AsInode()
attr.Inode = file.Id()
attr.Valid = time.Second
attr.Mode = os.FileMode(entry.Attributes.FileMode)
attr.Size = filer.FileSize(entry)
@ -98,7 +97,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid, req.Flags&fuse.OpenWriteOnly > 0)
resp.Handle = fuse.HandleID(handle.handle)
@ -110,10 +109,6 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
if file.wfs.option.ReadOnly {
return fuse.EPERM
}
glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
entry, err := file.maybeLoadEntry(ctx)
@ -122,7 +117,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
}
if file.isOpen > 0 {
file.wfs.handlesLock.Lock()
fileHandle := file.wfs.handles[file.fullpath().AsInode()]
fileHandle := file.wfs.handles[file.Id()]
file.wfs.handlesLock.Unlock()
if fileHandle != nil {
@ -154,8 +149,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
}
}
entry.Chunks = chunks
file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(file.wfs.LookupFn(), chunks)
file.setReader(nil)
}
entry.Attributes.FileSize = req.Size
file.dirtyMetadata = true
@ -204,10 +197,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
if file.wfs.option.ReadOnly {
return fuse.EPERM
}
glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx)
@ -225,10 +214,6 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
if file.wfs.option.ReadOnly {
return fuse.EPERM
}
glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx)
@ -272,16 +257,20 @@ func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
func (file *File) Forget() {
t := util.NewFullPath(file.dir.FullPath(), file.Name)
glog.V(4).Infof("Forget file %s", t)
file.wfs.fsNodeCache.DeleteFsNode(t)
file.wfs.ReleaseHandle(t, 0)
file.setReader(nil)
file.wfs.ReleaseHandle(t, fuse.HandleID(t.AsInode()))
}
func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) {
entry = file.getEntry()
if file.isOpen > 0 {
return entry, nil
file.wfs.handlesLock.Lock()
handle, found := file.wfs.handles[file.Id()]
file.wfs.handlesLock.Unlock()
entry = file.entry
if found {
glog.V(4).Infof("maybeLoadEntry found opened file %s/%s", file.dir.FullPath(), file.Name)
entry = handle.f.entry
}
if entry != nil {
if len(entry.HardLinkId) == 0 {
// only always reload hard link
@ -294,7 +283,7 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er
return entry, err
}
if entry != nil {
file.setEntry(entry)
// file.entry = entry
} else {
glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
}
@ -336,44 +325,11 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
return lessThan(chunks[i], chunks[j])
})
// add to entry view cache
for _, chunk := range chunks {
file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk)
}
file.setReader(nil)
glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(entry.Chunks), len(chunks))
entry.Chunks = append(entry.Chunks, newChunks...)
}
func (file *File) setReader(reader io.ReaderAt) {
r := file.reader
if r != nil {
if closer, ok := r.(io.Closer); ok {
closer.Close()
}
}
file.reader = reader
}
func (file *File) setEntry(entry *filer_pb.Entry) {
file.entryLock.Lock()
defer file.entryLock.Unlock()
file.entry = entry
file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(file.wfs.LookupFn(), entry.Chunks)
file.setReader(nil)
}
func (file *File) clearEntry() {
file.entryLock.Lock()
defer file.entryLock.Unlock()
file.entry = nil
file.entryViewCache = nil
file.setReader(nil)
}
func (file *File) saveEntry(entry *filer_pb.Entry) error {
return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -400,7 +356,5 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
}
func (file *File) getEntry() *filer_pb.Entry {
file.entryLock.RLock()
defer file.entryLock.RUnlock()
return file.entry
}

49
weed/filesys/filehandle.go

@ -20,9 +20,11 @@ import (
type FileHandle struct {
// cache file has been written to
dirtyPages *ContinuousDirtyPages
contentType string
handle uint64
dirtyPages *ContinuousDirtyPages
entryViewCache []filer.VisibleInterval
reader io.ReaderAt
contentType string
handle uint64
sync.Mutex
f *File
@ -30,7 +32,7 @@ type FileHandle struct {
NodeId fuse.NodeID // file or directory the request is about
Uid uint32 // user ID of process making request
Gid uint32 // group ID of process making request
writeOnly bool
}
func newFileHandle(file *File, uid, gid uint32) *FileHandle {
@ -40,6 +42,7 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle {
Uid: uid,
Gid: gid,
}
fh.dirtyPages.fh = fh
entry := fh.f.getEntry()
if entry != nil {
entry.Attributes.FileSize = filer.FileSize(entry)
@ -125,20 +128,20 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
}
var chunkResolveErr error
if fh.f.entryViewCache == nil {
fh.f.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks)
if fh.entryViewCache == nil {
fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks)
if chunkResolveErr != nil {
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
}
fh.f.setReader(nil)
fh.reader = nil
}
reader := fh.f.reader
reader := fh.reader
if reader == nil {
chunkViews := filer.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, math.MaxInt64)
reader = filer.NewChunkReaderAtFromClient(fh.f.wfs.LookupFn(), chunkViews, fh.f.wfs.chunkCache, fileSize)
}
fh.f.setReader(reader)
fh.reader = reader
totalRead, err := reader.ReadAt(buff, offset)
@ -154,10 +157,6 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
// Write to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
if fh.f.wfs.option.ReadOnly {
return fuse.EPERM
}
fh.Lock()
defer fh.Unlock()
@ -195,25 +194,27 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle)
glog.V(4).Infof("Release %v fh %d open=%d", fh.f.fullpath(), fh.handle, fh.f.isOpen)
fh.Lock()
defer fh.Unlock()
fh.f.isOpen--
if fh.f.isOpen <= 0 {
fh.f.entry = nil
fh.entryViewCache = nil
fh.reader = nil
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
}
if fh.f.isOpen < 0 {
glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
fh.f.isOpen = 0
return nil
}
if fh.f.isOpen == 1 {
fh.f.isOpen--
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
fh.f.setReader(nil)
}
return nil
}
@ -289,7 +290,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks)
chunks, _ := filer.CompactFileChunks(fh.f.wfs.LookupFn(), nonManifestChunks)
chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks)
chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath(), fh.writeOnly), chunks)
if manifestErr != nil {
// not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", manifestErr)

4
weed/filesys/meta_cache/meta_cache.go

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"os"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer"
@ -31,9 +30,6 @@ func NewMetaCache(dbFolder string, baseDir util.FullPath, uidGidMapper *UidGidMa
visitedBoundary: bounded_tree.NewBoundedTree(baseDir),
uidGidMapper: uidGidMapper,
invalidateFunc: func(fullpath util.FullPath) {
if baseDir != "/" && strings.HasPrefix(string(fullpath), string(baseDir)) {
fullpath = fullpath[len(baseDir):]
}
invalidateFunc(fullpath)
},
}

8
weed/filesys/meta_cache/meta_cache_init.go

@ -17,9 +17,9 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
glog.V(4).Infof("ReadDirAllEntries %s ...", path)
util.Retry("ReadDirAllEntries", func() error {
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(dirPath), pbEntry)
if IsHiddenSystemEntry(string(dirPath), entry.Name()) {
err = filer_pb.ReadDirAllEntries(client, path, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(path), pbEntry)
if IsHiddenSystemEntry(string(path), entry.Name()) {
return nil
}
if err := mc.doInsertEntry(context.Background(), entry); err != nil {
@ -35,7 +35,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
})
if err != nil {
err = fmt.Errorf("list %s: %v", dirPath, err)
err = fmt.Errorf("list %s: %v", path, err)
}
return

70
weed/filesys/wfs.go

@ -41,9 +41,7 @@ type Option struct {
CacheDir string
CacheSizeMB int64
DataCenter string
EntryCacheTtl time.Duration
Umask os.FileMode
ReadOnly bool
MountUid uint32
MountGid uint32
@ -105,24 +103,19 @@ func NewSeaweedFileSystem(option *Option) *WFS {
}
wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), util.FullPath(option.FilerMountRootPath), option.UidGidMapper, func(filePath util.FullPath) {
fsNode := wfs.fsNodeCache.GetFsNode(filePath)
if fsNode != nil {
if file, ok := fsNode.(*File); ok {
if err := wfs.Server.InvalidateNodeData(file); err != nil {
glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err)
}
file.clearEntry()
}
fsNode := NodeWithId(filePath.AsInode())
if err := wfs.Server.InvalidateNodeData(fsNode); err != nil {
glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err)
}
dir, name := filePath.DirAndName()
parent := wfs.root
if dir != "/" {
parent = wfs.fsNodeCache.GetFsNode(util.FullPath(dir))
parent := NodeWithId(util.FullPath(dir).AsInode())
if dir == option.FilerMountRootPath {
parent = NodeWithId(1)
}
if parent != nil {
if err := wfs.Server.InvalidateEntry(parent, name); err != nil {
glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err)
}
if err := wfs.Server.InvalidateEntry(parent, name); err != nil {
glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err)
}
})
startTime := time.Now()
@ -131,8 +124,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
wfs.metaCache.Shutdown()
})
entry, _ := filer_pb.GetEntry(wfs, util.FullPath(wfs.option.FilerMountRootPath))
wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, entry: entry}
wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, id: 1}
wfs.fsNodeCache = newFsCache(wfs.root)
if wfs.option.ConcurrentWriters > 0 {
@ -146,30 +138,37 @@ func (wfs *WFS) Root() (fs.Node, error) {
return wfs.root, nil
}
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32, writeOnly bool) (fileHandle *FileHandle) {
fullpath := file.fullpath()
glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
inodeId := file.Id()
inodeId := file.fullpath().AsInode()
if file.isOpen > 0 {
existingHandle, found := wfs.handles[inodeId]
if found && existingHandle != nil {
file.isOpen++
return existingHandle
wfs.handlesLock.Lock()
existingHandle, found := wfs.handles[inodeId]
wfs.handlesLock.Unlock()
if found && existingHandle != nil {
existingHandle.f.isOpen++
if existingHandle.writeOnly {
existingHandle.writeOnly = writeOnly
}
glog.V(4).Infof("Acquired Handle %s open %d", fullpath, existingHandle.f.isOpen)
return existingHandle
}
entry, _ := file.maybeLoadEntry(context.Background())
file.entry = entry
fileHandle = newFileHandle(file, uid, gid)
file.maybeLoadEntry(context.Background())
fileHandle.writeOnly = writeOnly
file.isOpen++
wfs.handlesLock.Lock()
wfs.handles[inodeId] = fileHandle
wfs.handlesLock.Unlock()
fileHandle.handle = inodeId
glog.V(4).Infof("Acquired new Handle %s open %d", fullpath, file.isOpen)
return
}
@ -177,9 +176,9 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
glog.V(4).Infof("ReleaseHandle %s id %d current handles length %d", fullpath, handleId, len(wfs.handles))
delete(wfs.handles, fullpath.AsInode())
delete(wfs.handles, uint64(handleId))
return
}
@ -269,3 +268,12 @@ func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType {
return filer.LookupFn(wfs)
}
type NodeWithId uint64
func (n NodeWithId) Id() uint64 {
return uint64(n)
}
func (n NodeWithId) Attr(ctx context.Context, attr *fuse.Attr) error {
return nil
}

59
weed/filesys/wfs_write.go

@ -13,42 +13,43 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
)
func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType {
func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath, writeOnly bool) filer.SaveDataAsChunkFunctionType {
return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) {
var fileId, host string
var auth security.EncodedJwt
if err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
return util.Retry("assignVolume", func() error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: wfs.option.Replication,
Collection: wfs.option.Collection,
TtlSec: wfs.option.TtlSec,
DiskType: string(wfs.option.DiskType),
DataCenter: wfs.option.DataCenter,
Path: string(fullPath),
}
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: wfs.option.Replication,
Collection: wfs.option.Collection,
TtlSec: wfs.option.TtlSec,
DiskType: string(wfs.option.DiskType),
DataCenter: wfs.option.DataCenter,
Path: string(fullPath),
}
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth)
loc := &filer_pb.Location{
Url: resp.Url,
PublicUrl: resp.PublicUrl,
}
host = wfs.AdjustedUrl(loc)
collection, replication = resp.Collection, resp.Replication
fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth)
loc := &filer_pb.Location{
Url: resp.Url,
PublicUrl: resp.PublicUrl,
}
host = wfs.AdjustedUrl(loc)
collection, replication = resp.Collection, resp.Replication
return nil
return nil
})
}); err != nil {
return nil, "", "", fmt.Errorf("filerGrpcAddress assign volume: %v", err)
}
@ -67,7 +68,9 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error)
}
wfs.chunkCache.SetChunk(fileId, data)
if !writeOnly {
wfs.chunkCache.SetChunk(fileId, data)
}
chunk = uploadResult.ToPbFileChunk(fileId, offset)
return chunk, collection, replication, nil

2
weed/glog/glog.go

@ -398,7 +398,7 @@ type flushSyncWriter interface {
func init() {
flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", true, "log to standard error as well as files")
flag.Var(&logging.verbosity, "v", "log level for V logs")
flag.Var(&logging.verbosity, "v", "log levels [0|1|2|3|4], default to 0")
flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")

105
weed/iamapi/iamapi_handlers.go

@ -0,0 +1,105 @@
package iamapi
import (
"bytes"
"encoding/xml"
"fmt"
"strconv"
"net/http"
"net/url"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/aws/aws-sdk-go/service/iam"
)
type mimeType string
const (
mimeNone mimeType = ""
mimeXML mimeType = "application/xml"
)
func setCommonHeaders(w http.ResponseWriter) {
w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
w.Header().Set("Accept-Ranges", "bytes")
}
// Encodes the response headers into XML format.
func encodeResponse(response interface{}) []byte {
var bytesBuffer bytes.Buffer
bytesBuffer.WriteString(xml.Header)
e := xml.NewEncoder(&bytesBuffer)
e.Encode(response)
return bytesBuffer.Bytes()
}
// If none of the http routes match respond with MethodNotAllowed
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL)
}
func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) {
apiError := s3err.GetAPIError(errorCode)
errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) {
errCode := err.Error()
errorResp := ErrorResponse{}
errorResp.Error.Type = "Sender"
errorResp.Error.Code = &errCode
if msg != nil {
errMsg := msg.Error()
errorResp.Error.Message = &errMsg
}
glog.Errorf("Response %+v", err)
switch errCode {
case iam.ErrCodeNoSuchEntityException:
msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value)
errorResp.Error.Message = &msg
writeResponse(w, http.StatusNotFound, encodeResponse(errorResp), mimeXML)
case iam.ErrCodeServiceFailureException:
writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
default:
writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
}
}
func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse {
return s3err.RESTErrorResponse{
Code: err.Code,
Message: err.Description,
Resource: resource,
RequestID: fmt.Sprintf("%d", time.Now().UnixNano()),
}
}
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
setCommonHeaders(w)
if response != nil {
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
}
if mType != mimeNone {
w.Header().Set("Content-Type", string(mType))
}
w.WriteHeader(statusCode)
if response != nil {
glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response))
_, err := w.Write(response)
if err != nil {
glog.V(0).Infof("write err: %v", err)
}
w.(http.Flusher).Flush()
}
}
func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
writeResponse(w, http.StatusOK, response, mimeXML)
}

453
weed/iamapi/iamapi_management_handlers.go

@ -0,0 +1,453 @@
package iamapi
import (
"crypto/sha1"
"encoding/json"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"math/rand"
"net/http"
"net/url"
"reflect"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/service/iam"
)
const (
charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/"
policyDocumentVersion = "2012-10-17"
StatementActionAdmin = "*"
StatementActionWrite = "Put*"
StatementActionRead = "Get*"
StatementActionList = "List*"
StatementActionTagging = "Tagging*"
)
var (
seededRand *rand.Rand = rand.New(
rand.NewSource(time.Now().UnixNano()))
policyDocuments = map[string]*PolicyDocument{}
policyLock = sync.RWMutex{}
)
func MapToStatementAction(action string) string {
switch action {
case StatementActionAdmin:
return s3_constants.ACTION_ADMIN
case StatementActionWrite:
return s3_constants.ACTION_WRITE
case StatementActionRead:
return s3_constants.ACTION_READ
case StatementActionList:
return s3_constants.ACTION_LIST
case StatementActionTagging:
return s3_constants.ACTION_TAGGING
default:
return ""
}
}
func MapToIdentitiesAction(action string) string {
switch action {
case s3_constants.ACTION_ADMIN:
return StatementActionAdmin
case s3_constants.ACTION_WRITE:
return StatementActionWrite
case s3_constants.ACTION_READ:
return StatementActionRead
case s3_constants.ACTION_LIST:
return StatementActionList
case s3_constants.ACTION_TAGGING:
return StatementActionTagging
default:
return ""
}
}
type Statement struct {
Effect string `json:"Effect"`
Action []string `json:"Action"`
Resource []string `json:"Resource"`
}
type Policies struct {
Policies map[string]PolicyDocument `json:"policies"`
}
type PolicyDocument struct {
Version string `json:"Version"`
Statement []*Statement `json:"Statement"`
}
func (p PolicyDocument) String() string {
b, _ := json.Marshal(p)
return string(b)
}
func Hash(s *string) string {
h := sha1.New()
h.Write([]byte(*s))
return fmt.Sprintf("%x", h.Sum(nil))
}
func StringWithCharset(length int, charset string) string {
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
}
return string(b)
}
func (iama *IamApiServer) ListUsers(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListUsersResponse) {
for _, ident := range s3cfg.Identities {
resp.ListUsersResult.Users = append(resp.ListUsersResult.Users, &iam.User{UserName: &ident.Name})
}
return resp
}
func (iama *IamApiServer) ListAccessKeys(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListAccessKeysResponse) {
status := iam.StatusTypeActive
userName := values.Get("UserName")
for _, ident := range s3cfg.Identities {
if userName != "" && userName != ident.Name {
continue
}
for _, cred := range ident.Credentials {
resp.ListAccessKeysResult.AccessKeyMetadata = append(resp.ListAccessKeysResult.AccessKeyMetadata,
&iam.AccessKeyMetadata{UserName: &ident.Name, AccessKeyId: &cred.AccessKey, Status: &status},
)
}
}
return resp
}
func (iama *IamApiServer) CreateUser(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateUserResponse) {
userName := values.Get("UserName")
resp.CreateUserResult.User.UserName = &userName
s3cfg.Identities = append(s3cfg.Identities, &iam_pb.Identity{Name: userName})
return resp
}
func (iama *IamApiServer) DeleteUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp DeleteUserResponse, err error) {
for i, ident := range s3cfg.Identities {
if userName == ident.Name {
s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...)
return resp, nil
}
}
return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
}
func (iama *IamApiServer) GetUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp GetUserResponse, err error) {
for _, ident := range s3cfg.Identities {
if userName == ident.Name {
resp.GetUserResult.User = iam.User{UserName: &ident.Name}
return resp, nil
}
}
return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
}
func GetPolicyDocument(policy *string) (policyDocument PolicyDocument, err error) {
if err = json.Unmarshal([]byte(*policy), &policyDocument); err != nil {
return PolicyDocument{}, err
}
return policyDocument, err
}
func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, err error) {
policyName := values.Get("PolicyName")
policyDocumentString := values.Get("PolicyDocument")
policyDocument, err := GetPolicyDocument(&policyDocumentString)
if err != nil {
return CreatePolicyResponse{}, err
}
policyId := Hash(&policyDocumentString)
arn := fmt.Sprintf("arn:aws:iam:::policy/%s", policyName)
resp.CreatePolicyResult.Policy.PolicyName = &policyName
resp.CreatePolicyResult.Policy.Arn = &arn
resp.CreatePolicyResult.Policy.PolicyId = &policyId
policies := Policies{}
policyLock.Lock()
defer policyLock.Unlock()
if err = iama.s3ApiConfig.GetPolicies(&policies); err != nil {
return resp, err
}
policies.Policies[policyName] = policyDocument
if err = iama.s3ApiConfig.PutPolicies(&policies); err != nil {
return resp, err
}
return resp, nil
}
func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) {
userName := values.Get("UserName")
policyName := values.Get("PolicyName")
policyDocumentString := values.Get("PolicyDocument")
policyDocument, err := GetPolicyDocument(&policyDocumentString)
if err != nil {
return PutUserPolicyResponse{}, err
}
policyDocuments[policyName] = &policyDocument
actions := GetActions(&policyDocument)
for _, ident := range s3cfg.Identities {
if userName == ident.Name {
for _, action := range actions {
ident.Actions = append(ident.Actions, action)
}
break
}
}
return resp, nil
}
func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp GetUserPolicyResponse, err error) {
userName := values.Get("UserName")
policyName := values.Get("PolicyName")
for _, ident := range s3cfg.Identities {
if userName != ident.Name {
continue
}
resp.GetUserPolicyResult.UserName = userName
resp.GetUserPolicyResult.PolicyName = policyName
if len(ident.Actions) == 0 {
return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
}
policyDocument := PolicyDocument{Version: policyDocumentVersion}
statements := make(map[string][]string)
for _, action := range ident.Actions {
// parse "Read:EXAMPLE-BUCKET"
act := strings.Split(action, ":")
resource := "*"
if len(act) == 2 {
resource = fmt.Sprintf("arn:aws:s3:::%s/*", act[1])
}
statements[resource] = append(statements[resource],
fmt.Sprintf("s3:%s", MapToIdentitiesAction(act[0])),
)
}
for resource, actions := range statements {
isEqAction := false
for i, statement := range policyDocument.Statement {
if reflect.DeepEqual(statement.Action, actions) {
policyDocument.Statement[i].Resource = append(
policyDocument.Statement[i].Resource, resource)
isEqAction = true
break
}
}
if isEqAction {
continue
}
policyDocumentStatement := Statement{
Effect: "Allow",
Action: actions,
}
policyDocumentStatement.Resource = append(policyDocumentStatement.Resource, resource)
policyDocument.Statement = append(policyDocument.Statement, &policyDocumentStatement)
}
resp.GetUserPolicyResult.PolicyDocument = policyDocument.String()
return resp, nil
}
return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
}
func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) {
userName := values.Get("UserName")
for i, ident := range s3cfg.Identities {
if ident.Name == userName {
s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...)
return resp, nil
}
}
return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
}
func GetActions(policy *PolicyDocument) (actions []string) {
for _, statement := range policy.Statement {
if statement.Effect != "Allow" {
continue
}
for _, resource := range statement.Resource {
// Parse "arn:aws:s3:::my-bucket/shared/*"
res := strings.Split(resource, ":")
if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" {
glog.Infof("not match resource: %s", res)
continue
}
for _, action := range statement.Action {
// Parse "s3:Get*"
act := strings.Split(action, ":")
if len(act) != 2 || act[0] != "s3" {
glog.Infof("not match action: %s", act)
continue
}
statementAction := MapToStatementAction(act[1])
if res[5] == "*" {
actions = append(actions, statementAction)
continue
}
// Parse my-bucket/shared/*
path := strings.Split(res[5], "/")
if len(path) != 2 || path[1] != "*" {
glog.Infof("not match bucket: %s", path)
continue
}
actions = append(actions, fmt.Sprintf("%s:%s", statementAction, path[0]))
}
}
}
return actions
}
func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateAccessKeyResponse) {
userName := values.Get("UserName")
status := iam.StatusTypeActive
accessKeyId := StringWithCharset(21, charsetUpper)
secretAccessKey := StringWithCharset(42, charset)
resp.CreateAccessKeyResult.AccessKey.AccessKeyId = &accessKeyId
resp.CreateAccessKeyResult.AccessKey.SecretAccessKey = &secretAccessKey
resp.CreateAccessKeyResult.AccessKey.UserName = &userName
resp.CreateAccessKeyResult.AccessKey.Status = &status
changed := false
for _, ident := range s3cfg.Identities {
if userName == ident.Name {
ident.Credentials = append(ident.Credentials,
&iam_pb.Credential{AccessKey: accessKeyId, SecretKey: secretAccessKey})
changed = true
break
}
}
if !changed {
s3cfg.Identities = append(s3cfg.Identities,
&iam_pb.Identity{Name: userName,
Credentials: []*iam_pb.Credential{
{
AccessKey: accessKeyId,
SecretKey: secretAccessKey,
},
},
},
)
}
return resp
}
func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp DeleteAccessKeyResponse) {
userName := values.Get("UserName")
accessKeyId := values.Get("AccessKeyId")
for _, ident := range s3cfg.Identities {
if userName == ident.Name {
for i, cred := range ident.Credentials {
if cred.AccessKey == accessKeyId {
ident.Credentials = append(ident.Credentials[:i], ident.Credentials[i+1:]...)
break
}
}
break
}
}
return resp
}
func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
return
}
values := r.PostForm
var s3cfgLock sync.RWMutex
s3cfgLock.RLock()
s3cfg := &iam_pb.S3ApiConfiguration{}
if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil {
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
s3cfgLock.RUnlock()
glog.V(4).Infof("DoActions: %+v", values)
var response interface{}
var err error
changed := true
switch r.Form.Get("Action") {
case "ListUsers":
response = iama.ListUsers(s3cfg, values)
changed = false
case "ListAccessKeys":
response = iama.ListAccessKeys(s3cfg, values)
changed = false
case "CreateUser":
response = iama.CreateUser(s3cfg, values)
case "GetUser":
userName := values.Get("UserName")
response, err = iama.GetUser(s3cfg, userName)
if err != nil {
writeIamErrorResponse(w, err, "user", userName, nil)
return
}
changed = false
case "DeleteUser":
userName := values.Get("UserName")
response, err = iama.DeleteUser(s3cfg, userName)
if err != nil {
writeIamErrorResponse(w, err, "user", userName, nil)
return
}
case "CreateAccessKey":
response = iama.CreateAccessKey(s3cfg, values)
case "DeleteAccessKey":
response = iama.DeleteAccessKey(s3cfg, values)
case "CreatePolicy":
response, err = iama.CreatePolicy(s3cfg, values)
if err != nil {
glog.Errorf("CreatePolicy: %+v", err)
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
return
}
case "PutUserPolicy":
response, err = iama.PutUserPolicy(s3cfg, values)
if err != nil {
glog.Errorf("PutUserPolicy: %+v", err)
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
return
}
case "GetUserPolicy":
response, err = iama.GetUserPolicy(s3cfg, values)
if err != nil {
writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
return
}
changed = false
case "DeleteUserPolicy":
if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil {
writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
}
default:
errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented)
errorResponse := ErrorResponse{}
errorResponse.Error.Code = &errNotImplemented.Code
errorResponse.Error.Message = &errNotImplemented.Description
writeResponse(w, errNotImplemented.HTTPStatusCode, encodeResponse(errorResponse), mimeXML)
return
}
if changed {
s3cfgLock.Lock()
err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg)
s3cfgLock.Unlock()
if err != nil {
writeIamErrorResponse(w, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err)
return
}
}
writeSuccessResponseXML(w, encodeResponse(response))
}

103
weed/iamapi/iamapi_response.go

@ -0,0 +1,103 @@
package iamapi
import (
"encoding/xml"
"fmt"
"time"
"github.com/aws/aws-sdk-go/service/iam"
)
type CommonResponse struct {
ResponseMetadata struct {
RequestId string `xml:"RequestId"`
} `xml:"ResponseMetadata"`
}
type ListUsersResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListUsersResponse"`
ListUsersResult struct {
Users []*iam.User `xml:"Users>member"`
IsTruncated bool `xml:"IsTruncated"`
} `xml:"ListUsersResult"`
}
type ListAccessKeysResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListAccessKeysResponse"`
ListAccessKeysResult struct {
AccessKeyMetadata []*iam.AccessKeyMetadata `xml:"AccessKeyMetadata>member"`
IsTruncated bool `xml:"IsTruncated"`
} `xml:"ListAccessKeysResult"`
}
type DeleteAccessKeyResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteAccessKeyResponse"`
}
type CreatePolicyResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreatePolicyResponse"`
CreatePolicyResult struct {
Policy iam.Policy `xml:"Policy"`
} `xml:"CreatePolicyResult"`
}
type CreateUserResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateUserResponse"`
CreateUserResult struct {
User iam.User `xml:"User"`
} `xml:"CreateUserResult"`
}
type DeleteUserResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteUserResponse"`
}
type GetUserResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserResponse"`
GetUserResult struct {
User iam.User `xml:"User"`
} `xml:"GetUserResult"`
}
type CreateAccessKeyResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateAccessKeyResponse"`
CreateAccessKeyResult struct {
AccessKey iam.AccessKey `xml:"AccessKey"`
} `xml:"CreateAccessKeyResult"`
}
type PutUserPolicyResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ PutUserPolicyResponse"`
}
type GetUserPolicyResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserPolicyResponse"`
GetUserPolicyResult struct {
UserName string `xml:"UserName"`
PolicyName string `xml:"PolicyName"`
PolicyDocument string `xml:"PolicyDocument"`
} `xml:"GetUserPolicyResult"`
}
type ErrorResponse struct {
CommonResponse
XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ErrorResponse"`
Error struct {
iam.ErrorDetails
Type string `xml:"Type"`
} `xml:"Error"`
}
func (r *CommonResponse) SetRequestId() {
r.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano())
}

149
weed/iamapi/iamapi_server.go

@ -0,0 +1,149 @@
package iamapi
// https://docs.aws.amazon.com/cli/latest/reference/iam/list-roles.html
import (
"bytes"
"encoding/json"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
"github.com/chrislusf/seaweedfs/weed/s3api"
. "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"github.com/gorilla/mux"
"google.golang.org/grpc"
"net/http"
"strings"
)
type IamS3ApiConfig interface {
GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error)
PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error)
GetPolicies(policies *Policies) (err error)
PutPolicies(policies *Policies) (err error)
}
type IamS3ApiConfigure struct {
option *IamServerOption
masterClient *wdclient.MasterClient
}
type IamServerOption struct {
Masters string
Filer string
Port int
FilerGrpcAddress string
GrpcDialOption grpc.DialOption
}
type IamApiServer struct {
s3ApiConfig IamS3ApiConfig
iam *s3api.IdentityAccessManagement
}
var s3ApiConfigure IamS3ApiConfig
func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
s3ApiConfigure = IamS3ApiConfigure{
option: option,
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(option.Masters, ",")),
}
s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
iamApiServer = &IamApiServer{
s3ApiConfig: s3ApiConfigure,
iam: s3api.NewIdentityAccessManagement(&s3Option),
}
iamApiServer.registerRouter(router)
return iamApiServer, nil
}
func (iama *IamApiServer) registerRouter(router *mux.Router) {
// API Router
apiRouter := router.PathPrefix("/").Subrouter()
// ListBuckets
// apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST"))
apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
//
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
}
func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
var buf bytes.Buffer
err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
if buf.Len() > 0 {
if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg); err != nil {
return err
}
}
return nil
}
func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
buf := bytes.Buffer{}
if err := filer.S3ConfigurationToText(&buf, s3cfg); err != nil {
return fmt.Errorf("S3ConfigurationToText: %s", err)
}
return pb.WithGrpcFilerClient(
iam.option.FilerGrpcAddress,
iam.option.GrpcDialOption,
func(client filer_pb.SeaweedFilerClient) error {
if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes()); err != nil {
return err
}
return nil
},
)
}
func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) {
var buf bytes.Buffer
err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamPoliciesFile, &buf); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
if buf.Len() == 0 {
policies.Policies = make(map[string]PolicyDocument)
return nil
}
if err := json.Unmarshal(buf.Bytes(), policies); err != nil {
return err
}
return nil
}
func (iam IamS3ApiConfigure) PutPolicies(policies *Policies) (err error) {
var b []byte
if b, err = json.Marshal(policies); err != nil {
return err
}
return pb.WithGrpcFilerClient(
iam.option.FilerGrpcAddress,
iam.option.GrpcDialOption,
func(client filer_pb.SeaweedFilerClient) error {
if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamPoliciesFile, b); err != nil {
return err
}
return nil
},
)
}

181
weed/iamapi/iamapi_test.go

@ -0,0 +1,181 @@
package iamapi
import (
"encoding/xml"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
"github.com/gorilla/mux"
"github.com/jinzhu/copier"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
)
var GetS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error)
var PutS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error)
var GetPolicies func(policies *Policies) (err error)
var PutPolicies func(policies *Policies) (err error)
var s3config = iam_pb.S3ApiConfiguration{}
var policiesFile = Policies{Policies: make(map[string]PolicyDocument)}
var ias = IamApiServer{s3ApiConfig: iamS3ApiConfigureMock{}}
type iamS3ApiConfigureMock struct{}
func (iam iamS3ApiConfigureMock) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
_ = copier.Copy(&s3cfg.Identities, &s3config.Identities)
return nil
}
func (iam iamS3ApiConfigureMock) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
_ = copier.Copy(&s3config.Identities, &s3cfg.Identities)
return nil
}
func (iam iamS3ApiConfigureMock) GetPolicies(policies *Policies) (err error) {
_ = copier.Copy(&policies, &policiesFile)
return nil
}
func (iam iamS3ApiConfigureMock) PutPolicies(policies *Policies) (err error) {
_ = copier.Copy(&policiesFile, &policies)
return nil
}
func TestCreateUser(t *testing.T) {
userName := aws.String("Test")
params := &iam.CreateUserInput{UserName: userName}
req, _ := iam.New(session.New()).CreateUserRequest(params)
_ = req.Build()
out := CreateUserResponse{}
response, err := executeRequest(req.HTTPRequest, out)
assert.Equal(t, nil, err)
assert.Equal(t, http.StatusOK, response.Code)
//assert.Equal(t, out.XMLName, "lol")
}
func TestListUsers(t *testing.T) {
params := &iam.ListUsersInput{}
req, _ := iam.New(session.New()).ListUsersRequest(params)
_ = req.Build()
out := ListUsersResponse{}
response, err := executeRequest(req.HTTPRequest, out)
assert.Equal(t, nil, err)
assert.Equal(t, http.StatusOK, response.Code)
}
func TestListAccessKeys(t *testing.T) {
svc := iam.New(session.New())
params := &iam.ListAccessKeysInput{}
req, _ := svc.ListAccessKeysRequest(params)
_ = req.Build()
out := ListAccessKeysResponse{}
response, err := executeRequest(req.HTTPRequest, out)
assert.Equal(t, nil, err)
assert.Equal(t, http.StatusOK, response.Code)
}
func TestGetUser(t *testing.T) {
userName := aws.String("Test")
params := &iam.GetUserInput{UserName: userName}
req, _ := iam.New(session.New()).GetUserRequest(params)
_ = req.Build()
out := GetUserResponse{}
response, err := executeRequest(req.HTTPRequest, out)
assert.Equal(t, nil, err)
assert.Equal(t, http.StatusOK, response.Code)
}
// Todo flat statement
func TestCreatePolicy(t *testing.T) {
params := &iam.CreatePolicyInput{
PolicyName: aws.String("S3-read-only-example-bucket"),
PolicyDocument: aws.String(`
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:Get*",
"s3:List*"
],
"Resource": [
"arn:aws:s3:::EXAMPLE-BUCKET",
"arn:aws:s3:::EXAMPLE-BUCKET/*"
]
}
]
}`),
}
req, _ := iam.New(session.New()).CreatePolicyRequest(params)
_ = req.Build()
out := CreatePolicyResponse{}
response, err := executeRequest(req.HTTPRequest, out)
assert.Equal(t, nil, err)
assert.Equal(t, http.StatusOK, response.Code)
}
func TestPutUserPolicy(t *testing.T) {
userName := aws.String("Test")
params := &iam.PutUserPolicyInput{
UserName: userName,
PolicyName: aws.String("S3-read-only-example-bucket"),
PolicyDocument: aws.String(
`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:Get*",
"s3:List*"
],
"Resource": [
"arn:aws:s3:::EXAMPLE-BUCKET",
"arn:aws:s3:::EXAMPLE-BUCKET/*"
]
}
]
}`),
}
req, _ := iam.New(session.New()).PutUserPolicyRequest(params)
_ = req.Build()
out := PutUserPolicyResponse{}
response, err := executeRequest(req.HTTPRequest, out)
assert.Equal(t, nil, err)
assert.Equal(t, http.StatusOK, response.Code)
}
func TestGetUserPolicy(t *testing.T) {
userName := aws.String("Test")
params := &iam.GetUserPolicyInput{UserName: userName, PolicyName: aws.String("S3-read-only-example-bucket")}
req, _ := iam.New(session.New()).GetUserPolicyRequest(params)
_ = req.Build()
out := GetUserPolicyResponse{}
response, err := executeRequest(req.HTTPRequest, out)
assert.Equal(t, nil, err)
assert.Equal(t, http.StatusOK, response.Code)
}
func TestDeleteUser(t *testing.T) {
userName := aws.String("Test")
params := &iam.DeleteUserInput{UserName: userName}
req, _ := iam.New(session.New()).DeleteUserRequest(params)
_ = req.Build()
out := DeleteUserResponse{}
response, err := executeRequest(req.HTTPRequest, out)
assert.Equal(t, nil, err)
assert.Equal(t, http.StatusOK, response.Code)
}
func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) {
rr := httptest.NewRecorder()
apiRouter := mux.NewRouter().SkipClean(true)
apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions)
apiRouter.ServeHTTP(rr, req)
return rr, xml.Unmarshal(rr.Body.Bytes(), &v)
}

45
weed/messaging/broker/broker_append.go

@ -3,6 +3,7 @@ package broker
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"io"
"github.com/chrislusf/seaweedfs/weed/glog"
@ -10,7 +11,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -53,26 +53,33 @@ func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConf
// assign a volume location
if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: topicConfig.Replication,
Collection: topicConfig.Collection,
}
assignErr := util.Retry("assignVolume", func() error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: topicConfig.Replication,
Collection: topicConfig.Collection,
}
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
assignResult.Auth = security.EncodedJwt(resp.Auth)
assignResult.Fid = resp.FileId
assignResult.Url = resp.Url
assignResult.PublicUrl = resp.PublicUrl
assignResult.Count = uint64(resp.Count)
assignResult.Auth = security.EncodedJwt(resp.Auth)
assignResult.Fid = resp.FileId
assignResult.Url = resp.Url
assignResult.PublicUrl = resp.PublicUrl
assignResult.Count = uint64(resp.Count)
return nil
})
if assignErr != nil {
return assignErr
}
return nil
}); err != nil {

1
weed/operation/assign_file_id.go

@ -86,6 +86,7 @@ func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest
continue
}
break
}
return ret, lastError

11
weed/operation/upload_content.go

@ -11,7 +11,6 @@ import (
"net/http"
"net/textproto"
"path/filepath"
"runtime/debug"
"strings"
"time"
@ -40,7 +39,7 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *fi
Offset: offset,
Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
ETag: uploadResult.ETag,
ETag: uploadResult.ContentMd5,
CipherKey: uploadResult.CipherKey,
IsCompressed: uploadResult.Gzip > 0,
Fid: fid,
@ -235,8 +234,12 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
// print("+")
resp, post_err := HttpClient.Do(req)
if post_err != nil {
glog.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
debug.PrintStack()
if strings.Contains(post_err.Error(), "connection reset by peer") ||
strings.Contains(post_err.Error(), "use of closed network connection") {
resp, post_err = HttpClient.Do(req)
}
}
if post_err != nil {
return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
}
// print("-")

1
weed/pb/master.proto

@ -288,6 +288,7 @@ message LeaseAdminTokenRequest {
int64 previous_token = 1;
int64 previous_lock_time = 2;
string lock_name = 3;
string client_name = 4;
}
message LeaseAdminTokenResponse {
int64 token = 1;

206
weed/pb/master_pb/master.pb.go

@ -2468,6 +2468,7 @@ type LeaseAdminTokenRequest struct {
PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"`
PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"`
LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"`
ClientName string `protobuf:"bytes,4,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"`
}
func (x *LeaseAdminTokenRequest) Reset() {
@ -2523,6 +2524,13 @@ func (x *LeaseAdminTokenRequest) GetLockName() string {
return ""
}
func (x *LeaseAdminTokenRequest) GetClientName() string {
if x != nil {
return x.ClientName
}
return ""
}
type LeaseAdminTokenResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -3268,7 +3276,7 @@ var file_master_proto_rawDesc = []byte{
0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f,
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x8a,
0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0xab,
0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b,
0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65,
0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
@ -3277,103 +3285,105 @@ var file_master_proto_rawDesc = []byte{
0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72,
0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b,
0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, 0x4c,
0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x0a,
0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69,
0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c,
0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f,
0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76,
0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09,
0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x6c,
0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, 0x65,
0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62,
0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, 0x0a,
0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1f,
0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43,
0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01,
0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, 0x2e,
0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69,
0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53,
0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61,
0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x6f,
0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x22,
0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43,
0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70,
0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73,
0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56,
0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12,
0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75,
0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75,
0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d,
0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e,
0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64,
0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54,
0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d,
0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69,
0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54,
0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41,
0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77,
0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17,
0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a,
0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
0x03, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18,
0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76,
0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65,
0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a,
0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65,
0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77,
0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74,
0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73,
0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51,
0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12,
0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70,
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30,
0x01, 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18,
0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74,
0x69, 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74,
0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73,
0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43,
0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12,
0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c,
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63,
0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63,
0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63,
0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e,
0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73,
0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24,
0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d,
0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41,
0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d,
0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64,
0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d,
0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d,
0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65,
0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61,
0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d,
0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

45
weed/replication/sink/filersink/fetch_write.go

@ -71,29 +71,30 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
var auth security.EncodedJwt
if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
return util.Retry("assignVolume", func() error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: fs.replication,
Collection: fs.collection,
TtlSec: fs.ttlSec,
DataCenter: fs.dataCenter,
DiskType: fs.diskType,
Path: path,
}
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: fs.replication,
Collection: fs.collection,
TtlSec: fs.ttlSec,
DataCenter: fs.dataCenter,
DiskType: fs.diskType,
Path: path,
}
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
return nil
return nil
})
}); err != nil {
return "", fmt.Errorf("filerGrpcAddress assign volume: %v", err)
}

28
weed/s3api/auth_credentials.go

@ -3,14 +3,14 @@ package s3api
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"io/ioutil"
"net/http"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io/ioutil"
"net/http"
"strings"
)
type Action string
@ -255,11 +255,21 @@ func (identity *Identity) canDo(action Action, bucket string) bool {
limitedByBucket := string(action) + ":" + bucket
adminLimitedByBucket := s3_constants.ACTION_ADMIN + ":" + bucket
for _, a := range identity.Actions {
if string(a) == limitedByBucket {
return true
}
if string(a) == adminLimitedByBucket {
return true
act := string(a)
if strings.HasSuffix(act, "*") {
if strings.HasPrefix(limitedByBucket, act[:len(act)-1]) {
return true
}
if strings.HasPrefix(adminLimitedByBucket, act[:len(act)-1]) {
return true
}
} else {
if act == limitedByBucket {
return true
}
if act == adminLimitedByBucket {
return true
}
}
}
return false

30
weed/s3api/auth_signature_v4.go

@ -24,6 +24,7 @@ import (
"crypto/subtle"
"encoding/hex"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io/ioutil"
"net/http"
"net/url"
"regexp"
@ -132,6 +133,17 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r
// Query string.
queryStr := req.URL.Query().Encode()
// Get hashed Payload
if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil {
buf, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
b, _ := ioutil.ReadAll(bytes.NewBuffer(buf))
if len(b) != 0 {
bodyHash := sha256.Sum256(b)
hashedPayload = hex.EncodeToString(bodyHash[:])
}
}
// Get canonical request.
canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method)
@ -139,7 +151,10 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r
stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope())
// Get hmac signing key.
signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region)
signingKey := getSigningKey(cred.SecretKey,
signV4Values.Credential.scope.date,
signV4Values.Credential.scope.region,
signV4Values.Credential.scope.service)
// Calculate signature.
newSignature := getSignature(signingKey, stringToSign)
@ -310,7 +325,7 @@ func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.
}
// Get signing key.
signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region)
signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region, credHeader.scope.service)
// Get signature.
newSignature := getSignature(signingKey, formValues.Get("Policy"))
@ -427,7 +442,10 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s
presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope())
// Get hmac presigned signing key.
presignedSigningKey := getSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region)
presignedSigningKey := getSigningKey(cred.SecretKey,
pSignValues.Credential.scope.date,
pSignValues.Credential.scope.region,
pSignValues.Credential.scope.service)
// Get new signature.
newSignature := getSignature(presignedSigningKey, presignedStringToSign)
@ -655,11 +673,11 @@ func sumHMAC(key []byte, data []byte) []byte {
}
// getSigningKey hmac seed to calculate final signature.
func getSigningKey(secretKey string, t time.Time, region string) []byte {
func getSigningKey(secretKey string, t time.Time, region string, service string) []byte {
date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd)))
regionBytes := sumHMAC(date, []byte(region))
service := sumHMAC(regionBytes, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
serviceBytes := sumHMAC(regionBytes, []byte(service))
signingKey := sumHMAC(serviceBytes, []byte("aws4_request"))
return signingKey
}

2
weed/s3api/auto_signature_v4_test.go

@ -370,7 +370,7 @@ func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires i
queryStr := strings.Replace(query.Encode(), "+", "%20", -1)
canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method)
stringToSign := getStringToSign(canonicalRequest, date, scope)
signingKey := getSigningKey(secretAccessKey, date, region)
signingKey := getSigningKey(secretAccessKey, date, region, "s3")
signature := getSignature(signingKey, stringToSign)
req.URL.RawQuery = query.Encode()

4
weed/s3api/chunked_reader_v4.go

@ -45,7 +45,7 @@ func getChunkSignature(secretKey string, seedSignature string, region string, da
hashedChunk
// Get hmac signing key.
signingKey := getSigningKey(secretKey, date, region)
signingKey := getSigningKey(secretKey, date, region, "s3")
// Calculate signature.
newSignature := getSignature(signingKey, stringToSign)
@ -117,7 +117,7 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr
stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope())
// Get hmac signing key.
signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region)
signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, "s3")
// Calculate signature.
newSignature := getSignature(signingKey, stringToSign)

8
weed/s3api/s3api_object_handlers.go

@ -311,7 +311,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
}
defer util.CloseResponse(resp)
if resp.ContentLength == -1 || resp.StatusCode == 404 {
if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 {
if r.Method != "DELETE" {
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
return
@ -326,11 +326,7 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) {
for k, v := range proxyResponse.Header {
w.Header()[k] = v
}
if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 {
w.WriteHeader(http.StatusPartialContent)
} else {
w.WriteHeader(proxyResponse.StatusCode)
}
w.WriteHeader(proxyResponse.StatusCode)
io.Copy(w, proxyResponse.Body)
}

15
weed/s3api/s3api_objects_list_handlers.go

@ -63,6 +63,14 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
if len(response.Contents) == 0 {
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
writeErrorResponse(w, s3err.ErrNoSuchBucket, r.URL)
return
}
}
responseV2 := &ListBucketResultV2{
XMLName: response.XMLName,
Name: response.Name,
@ -106,6 +114,13 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
return
}
if len(response.Contents) == 0 {
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
writeErrorResponse(w, s3err.ErrNoSuchBucket, r.URL)
return
}
}
writeSuccessResponseXML(w, encodeResponse(response))
}

8
weed/server/common.go

@ -234,12 +234,12 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file
}
}
func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64, httpStatusCode int) error) {
rangeReq := r.Header.Get("Range")
if rangeReq == "" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
if err := writeFn(w, 0, totalSize); err != nil {
if err := writeFn(w, 0, totalSize, 0); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -279,7 +279,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
w.Header().Set("Content-Range", ra.contentRange(totalSize))
err = writeFn(w, ra.start, ra.length)
err = writeFn(w, ra.start, ra.length, http.StatusPartialContent)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@ -307,7 +307,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
pw.CloseWithError(e)
return
}
if e = writeFn(part, ra.start, ra.length); e != nil {
if e = writeFn(part, ra.start, ra.length, 0); e != nil {
pw.CloseWithError(e)
return
}

2
weed/server/filer_grpc_server.go

@ -63,7 +63,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
var listErr error
for limit > 0 {
var hasEntries bool
lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", func(entry *filer.Entry) bool {
lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", "", func(entry *filer.Entry) bool {
hasEntries = true
if err = stream.Send(&filer_pb.ListEntriesResponse{
Entry: &filer_pb.Entry{

2
weed/server/filer_grpc_server_rename.go

@ -74,7 +74,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
includeLastFile := false
for {
entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "")
entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "", "")
if err != nil {
return err
}

12
weed/server/filer_server_handlers_read.go

@ -79,7 +79,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat))
if r.Header.Get("If-Modified-Since") != "" {
if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
if t.After(entry.Attr.Mtime) {
if !t.Before(entry.Attr.Mtime) {
w.WriteHeader(http.StatusNotModified)
return
}
@ -131,9 +131,6 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, true)
})
return
}
@ -153,7 +150,10 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
}
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error {
if httpStatusCode != 0 {
w.WriteHeader(httpStatusCode)
}
if offset+size <= int64(len(entry.Content)) {
_, err := writer.Write(entry.Content[offset : offset+size])
if err != nil {
@ -161,7 +161,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
return err
}
return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, false)
return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
})
}

3
weed/server/filer_server_handlers_read_dir.go

@ -35,8 +35,9 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
lastFileName := r.FormValue("lastFileName")
namePattern := r.FormValue("namePattern")
namePatternExclude := r.FormValue("namePatternExclude")
entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern)
entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern, namePatternExclude)
if err != nil {
glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)

20
weed/server/filer_server_handlers_tagging.go

@ -78,11 +78,27 @@ func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Reque
existingEntry.Extended = make(map[string][]byte)
}
// parse out tags to be deleted
toDelete := strings.Split(r.URL.Query().Get("tagging"), ",")
deletions := make(map[string]struct{})
for _, deletion := range toDelete {
deletions[deletion] = struct{}{}
}
// delete all tags or specific tags
hasDeletion := false
for header, _ := range existingEntry.Extended {
if strings.HasPrefix(header, needle.PairNamePrefix) {
delete(existingEntry.Extended, header)
hasDeletion = true
if len(deletions) == 0 {
delete(existingEntry.Extended, header)
hasDeletion = true
} else {
tag := header[len(needle.PairNamePrefix):]
if _, found := deletions[tag]; found {
delete(existingEntry.Extended, header)
hasDeletion = true
}
}
}
}

8
weed/server/filer_server_handlers_write_autochunk.go

@ -142,6 +142,14 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
if fileName != "" {
path += fileName
}
} else {
if fileName != "" {
if possibleDirEntry, findDirErr := fs.filer.FindEntry(ctx, util.FullPath(path)); findDirErr == nil {
if possibleDirEntry.IsDirectory() {
path += "/" + fileName
}
}
}
}
var entry *filer.Entry

169
weed/server/filer_server_handlers_write_upload.go

@ -1,14 +1,13 @@
package weed_server
import (
"bytes"
"crypto/md5"
"hash"
"io"
"io/ioutil"
"net/http"
"runtime"
"strings"
"sync"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
@ -20,136 +19,82 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
limitedUploadProcessor = util.NewLimitedOutOfOrderProcessor(int32(runtime.NumCPU()))
)
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, dataSize int64, err error, smallContent []byte) {
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) {
var fileChunks []*filer_pb.FileChunk
md5Hash = md5.New()
md5Hash := md5.New()
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
// save small content directly
if !isAppend(r) && ((0 < contentLength && contentLength < fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && contentLength < 4*1024) {
smallContent, err = ioutil.ReadAll(partReader)
dataSize = int64(len(smallContent))
return
}
chunkOffset := int64(0)
var smallContent []byte
resultsChan := make(chan *ChunkCreationResult, 2)
for {
limitedReader := io.LimitReader(partReader, int64(chunkSize))
var waitForAllData sync.WaitGroup
waitForAllData.Add(1)
go func() {
// process upload results
defer waitForAllData.Done()
for result := range resultsChan {
if result.err != nil {
err = result.err
continue
}
// Save to chunk manifest structure
fileChunks = append(fileChunks, result.chunk)
data, err := ioutil.ReadAll(limitedReader)
if err != nil {
return nil, nil, 0, err, nil
}
}()
var lock sync.Mutex
readOffset := int64(0)
var wg sync.WaitGroup
for err == nil {
wg.Add(1)
request := func() {
defer wg.Done()
var localOffset int64
// read from the input
lock.Lock()
localOffset = readOffset
limitedReader := io.LimitReader(partReader, int64(chunkSize))
data, readErr := ioutil.ReadAll(limitedReader)
readOffset += int64(len(data))
lock.Unlock()
// handle read errors
if readErr != nil {
if readErr != io.EOF {
if err == nil {
err = readErr
}
resultsChan <- &ChunkCreationResult{
err: readErr,
}
}
return
if chunkOffset == 0 && !isAppend(r) {
if len(data) < int(fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
smallContent = data
chunkOffset += int64(len(data))
break
}
if len(data) == 0 {
readErr = io.EOF
return
}
dataReader := util.NewBytesReader(data)
// retry to assign a different file id
var fileId, urlLocation string
var auth security.EncodedJwt
var assignErr, uploadErr error
var uploadResult *operation.UploadResult
for i := 0; i < 3; i++ {
// assign one file id for one chunk
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
if assignErr != nil {
return nil, nil, 0, assignErr, nil
}
// upload
dataReader := util.NewBytesReader(data)
fileId, uploadResult, uploadErr := fs.doCreateChunk(w, r, so, dataReader, fileName, contentType)
// upload the chunk to the volume server
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
if uploadErr != nil {
if err == nil {
err = uploadErr
}
resultsChan <- &ChunkCreationResult{
err: uploadErr,
}
return
time.Sleep(251 * time.Millisecond)
continue
}
break
}
if uploadErr != nil {
return nil, nil, 0, uploadErr, nil
}
glog.V(4).Infof("uploaded %s to %s [%d,%d)", fileName, fileId, localOffset, localOffset+int64(uploadResult.Size))
// send back uploaded file chunk
resultsChan <- &ChunkCreationResult{
chunk: uploadResult.ToPbFileChunk(fileId, localOffset),
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
break
}
if chunkOffset == 0 {
uploadedMd5 := util.Base64Md5ToBytes(uploadResult.ContentMd5)
readedMd5 := md5Hash.Sum(nil)
if !bytes.Equal(uploadedMd5, readedMd5) {
glog.Errorf("md5 %x does not match %x uploaded chunk %s to the volume server", readedMd5, uploadedMd5, uploadResult.Name)
}
}
limitedUploadProcessor.Execute(request)
}
go func() {
wg.Wait()
close(resultsChan)
}()
waitForAllData.Wait()
// Save to chunk manifest structure
fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
return fileChunks, md5Hash, readOffset, err, nil
}
type ChunkCreationResult struct {
chunk *filer_pb.FileChunk
err error
}
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
func (fs *FilerServer) doCreateChunk(w http.ResponseWriter, r *http.Request, so *operation.StorageOption, dataReader *util.BytesReader, fileName string, contentType string) (string, *operation.UploadResult, error) {
// retry to assign a different file id
var fileId, urlLocation string
var auth security.EncodedJwt
var assignErr, uploadErr error
var uploadResult *operation.UploadResult
for i := 0; i < 3; i++ {
// assign one file id for one chunk
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
if assignErr != nil {
return "", nil, assignErr
}
// reset variables for the next chunk
chunkOffset = chunkOffset + int64(uploadResult.Size)
// upload the chunk to the volume server
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
if uploadErr != nil {
time.Sleep(251 * time.Millisecond)
continue
// if last chunk was not at full chunk size, but already exhausted the reader
if int64(uploadResult.Size) < int64(chunkSize) {
break
}
break
}
return fileId, uploadResult, uploadErr
return fileChunks, md5Hash, chunkOffset, nil, smallContent
}
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {

2
weed/server/filer_ui/breadcrumb.go

@ -1,4 +1,4 @@
package master_ui
package filer_ui
import (
"strings"

2
weed/server/filer_ui/templates.go

@ -1,4 +1,4 @@
package master_ui
package filer_ui
import (
"github.com/dustin/go-humanize"

106
weed/server/gateway_server.go

@ -0,0 +1,106 @@
package weed_server
import (
"github.com/chrislusf/seaweedfs/weed/operation"
"google.golang.org/grpc"
"math/rand"
"net/http"
"github.com/chrislusf/seaweedfs/weed/util"
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
_ "github.com/chrislusf/seaweedfs/weed/filer/hbase"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3"
_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql2"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
"github.com/chrislusf/seaweedfs/weed/glog"
_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"
_ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub"
_ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub"
_ "github.com/chrislusf/seaweedfs/weed/notification/kafka"
_ "github.com/chrislusf/seaweedfs/weed/notification/log"
"github.com/chrislusf/seaweedfs/weed/security"
)
type GatewayOption struct {
Masters []string
Filers []string
MaxMB int
IsSecure bool
}
type GatewayServer struct {
option *GatewayOption
secret security.SigningKey
grpcDialOption grpc.DialOption
}
func NewGatewayServer(defaultMux *http.ServeMux, option *GatewayOption) (fs *GatewayServer, err error) {
fs = &GatewayServer{
option: option,
grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"),
}
if len(option.Masters) == 0 {
glog.Fatal("master list is required!")
}
defaultMux.HandleFunc("/blobs/", fs.blobsHandler)
defaultMux.HandleFunc("/files/", fs.filesHandler)
defaultMux.HandleFunc("/topics/", fs.topicsHandler)
return fs, nil
}
func (fs *GatewayServer) getMaster() string {
randMaster := rand.Intn(len(fs.option.Masters))
return fs.option.Masters[randMaster]
}
func (fs *GatewayServer) blobsHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "DELETE":
chunkId := r.URL.Path[len("/blobs/"):]
fullUrl, err := operation.LookupFileId(fs.getMaster, chunkId)
if err != nil {
writeJsonError(w, r, http.StatusNotFound, err)
return
}
var jwtAuthorization security.EncodedJwt
if fs.option.IsSecure {
jwtAuthorization = operation.LookupJwt(fs.getMaster(), chunkId)
}
body, statusCode, err := util.DeleteProxied(fullUrl, string(jwtAuthorization))
if err != nil {
writeJsonError(w, r, http.StatusNotFound, err)
return
}
w.WriteHeader(statusCode)
w.Write(body)
case "POST":
submitForClientHandler(w, r, fs.getMaster, fs.grpcDialOption)
}
}
func (fs *GatewayServer) filesHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "DELETE":
case "POST":
}
}
func (fs *GatewayServer) topicsHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "POST":
}
}

21
weed/server/master_grpc_server_admin.go

@ -3,6 +3,7 @@ package weed_server
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"math/rand"
"sync"
"time"
@ -60,6 +61,7 @@ const (
type AdminLock struct {
accessSecret int64
accessLockTime time.Time
lastClient string
}
type AdminLocks struct {
@ -73,14 +75,15 @@ func NewAdminLocks() *AdminLocks {
}
}
func (locks *AdminLocks) isLocked(lockName string) bool {
func (locks *AdminLocks) isLocked(lockName string) (clientName string, isLocked bool) {
locks.RLock()
defer locks.RUnlock()
adminLock, found := locks.locks[lockName]
if !found {
return false
return "", false
}
return adminLock.accessLockTime.Add(LockDuration).After(time.Now())
glog.V(4).Infof("isLocked %v", adminLock.lastClient)
return adminLock.lastClient, adminLock.accessLockTime.Add(LockDuration).After(time.Now())
}
func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64) bool {
@ -93,12 +96,13 @@ func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64
return adminLock.accessLockTime.Equal(ts) && adminLock.accessSecret == token
}
func (locks *AdminLocks) generateToken(lockName string) (ts time.Time, token int64) {
func (locks *AdminLocks) generateToken(lockName string, clientName string) (ts time.Time, token int64) {
locks.Lock()
defer locks.Unlock()
lock := &AdminLock{
accessSecret: rand.Int63(),
accessLockTime: time.Now(),
lastClient: clientName,
}
locks.locks[lockName] = lock
return lock.accessLockTime, lock.accessSecret
@ -113,18 +117,19 @@ func (locks *AdminLocks) deleteLock(lockName string) {
func (ms *MasterServer) LeaseAdminToken(ctx context.Context, req *master_pb.LeaseAdminTokenRequest) (*master_pb.LeaseAdminTokenResponse, error) {
resp := &master_pb.LeaseAdminTokenResponse{}
if ms.adminLocks.isLocked(req.LockName) {
if lastClient, isLocked := ms.adminLocks.isLocked(req.LockName); isLocked {
glog.V(4).Infof("LeaseAdminToken %v", lastClient)
if req.PreviousToken != 0 && ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) {
// for renew
ts, token := ms.adminLocks.generateToken(req.LockName)
ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName)
resp.Token, resp.LockTsNs = token, ts.UnixNano()
return resp, nil
}
// refuse since still locked
return resp, fmt.Errorf("already locked")
return resp, fmt.Errorf("already locked by " + lastClient)
}
// for fresh lease request
ts, token := ms.adminLocks.generateToken(req.LockName)
ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName)
resp.Token, resp.LockTsNs = token, ts.UnixNano()
return resp, nil
}

102
weed/server/master_grpc_server_volume.go

@ -4,15 +4,68 @@ import (
"context"
"fmt"
"github.com/chrislusf/raft"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"reflect"
"sync"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/topology"
)
func (ms *MasterServer) ProcessGrowRequest() {
go func() {
filter := sync.Map{}
for {
req, ok := <-ms.vgCh
if !ok {
break
}
if !ms.Topo.IsLeader() {
//discard buffered requests
time.Sleep(time.Second * 1)
continue
}
// filter out identical requests being processed
found := false
filter.Range(func(k, v interface{}) bool {
if reflect.DeepEqual(k, req) {
found = true
}
return !found
})
// not atomic but it's okay
if !found && ms.shouldVolumeGrow(req.Option) {
filter.Store(req, nil)
// we have lock called inside vg
go func() {
glog.V(1).Infoln("starting automatic volume grow")
start := time.Now()
_, err := ms.vg.AutomaticGrowByType(req.Option, ms.grpcDialOption, ms.Topo, req.Count)
glog.V(1).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start))
if req.ErrCh != nil {
req.ErrCh <- err
close(req.ErrCh)
}
filter.Delete(req)
}()
} else {
glog.V(4).Infoln("discard volume grow request")
}
}
}()
}
func (ms *MasterServer) LookupVolume(ctx context.Context, req *master_pb.LookupVolumeRequest) (*master_pb.LookupVolumeResponse, error) {
if !ms.Topo.IsLeader() {
@ -68,38 +121,45 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
ReplicaPlacement: replicaPlacement,
Ttl: ttl,
DiskType: diskType,
Prealloacte: ms.preallocateSize,
Preallocate: ms.preallocateSize,
DataCenter: req.DataCenter,
Rack: req.Rack,
DataNode: req.DataNode,
MemoryMapMaxSizeMb: req.MemoryMapMaxSizeMb,
}
if !ms.Topo.HasWritableVolume(option) {
if ms.shouldVolumeGrow(option) {
if ms.Topo.AvailableSpaceFor(option) <= 0 {
return nil, fmt.Errorf("no free volumes left for " + option.String())
}
ms.vgLock.Lock()
if !ms.Topo.HasWritableVolume(option) {
if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, int(req.WritableVolumeCount)); err != nil {
ms.vgLock.Unlock()
return nil, fmt.Errorf("Cannot grow volume group! %v", err)
}
ms.vgCh <- &topology.VolumeGrowRequest{
Option: option,
Count: int(req.WritableVolumeCount),
}
ms.vgLock.Unlock()
}
fid, count, dn, err := ms.Topo.PickForWrite(req.Count, option)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
return &master_pb.AssignResponse{
Fid: fid,
Url: dn.Url(),
PublicUrl: dn.PublicUrl,
Count: count,
Auth: string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
}, nil
var (
lastErr error
maxTimeout = time.Second * 10
startTime = time.Now()
)
for time.Now().Sub(startTime) < maxTimeout {
fid, count, dn, err := ms.Topo.PickForWrite(req.Count, option)
if err == nil {
return &master_pb.AssignResponse{
Fid: fid,
Url: dn.Url(),
PublicUrl: dn.PublicUrl,
Count: count,
Auth: string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
}, nil
}
//glog.V(4).Infoln("waiting for volume growing...")
lastErr = err
time.Sleep(200 * time.Millisecond)
}
return nil, lastErr
}
func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.StatisticsRequest) (*master_pb.StatisticsResponse, error) {

22
weed/server/master_server.go

@ -51,9 +51,9 @@ type MasterServer struct {
preallocateSize int64
Topo *topology.Topology
vg *topology.VolumeGrowth
vgLock sync.Mutex
Topo *topology.Topology
vg *topology.VolumeGrowth
vgCh chan *topology.VolumeGrowRequest
boundedLeaderChan chan int
@ -82,6 +82,12 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
v.SetDefault("master.replication.treat_replication_as_minimums", false)
replicationAsMin := v.GetBool("master.replication.treat_replication_as_minimums")
v.SetDefault("master.volume_growth.copy_1", 7)
v.SetDefault("master.volume_growth.copy_2", 6)
v.SetDefault("master.volume_growth.copy_3", 3)
v.SetDefault("master.volume_growth.copy_other", 1)
v.SetDefault("master.volume_growth.threshold", 0.9)
var preallocateSize int64
if option.VolumePreallocate {
preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20)
@ -91,6 +97,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
ms := &MasterServer{
option: option,
preallocateSize: preallocateSize,
vgCh: make(chan *topology.VolumeGrowRequest, 1 << 6),
clientChans: make(map[string]chan *master_pb.VolumeLocation),
grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, "", peers),
@ -128,7 +135,14 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
r.HandleFunc("/{fileId}", ms.redirectHandler)
}
ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOption, ms.option.GarbageThreshold, ms.preallocateSize)
ms.Topo.StartRefreshWritableVolumes(
ms.grpcDialOption,
ms.option.GarbageThreshold,
v.GetFloat64("master.volume_growth.threshold"),
ms.preallocateSize,
)
ms.ProcessGrowRequest()
ms.startAdminScripts()

20
weed/server/master_server_handlers.go

@ -10,6 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/topology"
)
func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volumeLocations map[string]operation.LookupResult) {
@ -111,19 +112,20 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
return
}
if !ms.Topo.HasWritableVolume(option) {
if ms.shouldVolumeGrow(option) {
if ms.Topo.AvailableSpaceFor(option) <= 0 {
writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left for " + option.String()})
return
}
ms.vgLock.Lock()
defer ms.vgLock.Unlock()
if !ms.Topo.HasWritableVolume(option) {
if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, writableVolumeCount); err != nil {
writeJsonError(w, r, http.StatusInternalServerError,
fmt.Errorf("Cannot grow volume group! %v", err))
return
}
errCh := make(chan error, 1)
ms.vgCh <- &topology.VolumeGrowRequest{
Option: option,
Count: writableVolumeCount,
ErrCh: errCh,
}
if err := <- errCh; err != nil {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("cannot grow volume group! %v", err))
return
}
}
fid, count, dn, err := ms.Topo.PickForWrite(requestedCount, option)

10
weed/server/master_server_handlers_admin.go

@ -3,7 +3,6 @@ package weed_server
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"math/rand"
"net/http"
"strconv"
@ -14,6 +13,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/topology"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -136,9 +136,11 @@ func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *
}
}
func (ms *MasterServer) HasWritableVolume(option *topology.VolumeGrowOption) bool {
func (ms *MasterServer) shouldVolumeGrow(option *topology.VolumeGrowOption) bool {
vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
return vl.GetActiveVolumeCount(option) > 0
active, high := vl.GetActiveVolumeCount(option)
//glog.V(0).Infof("active volume: %d, high usage volume: %d\n", active, high)
return active <= high
}
func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGrowOption, error) {
@ -172,7 +174,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr
ReplicaPlacement: replicaPlacement,
Ttl: ttl,
DiskType: diskType,
Prealloacte: preallocate,
Preallocate: preallocate,
DataCenter: r.FormValue("dataCenter"),
Rack: r.FormValue("rack"),
DataNode: r.FormValue("dataNode"),

12
weed/server/master_server_handlers_ui.go

@ -14,17 +14,19 @@ func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
infos := make(map[string]interface{})
infos["Up Time"] = time.Now().Sub(startTime).String()
args := struct {
Version string
Topology interface{}
RaftServer raft.Server
Stats map[string]interface{}
Counters *stats.ServerStats
Version string
Topology interface{}
RaftServer raft.Server
Stats map[string]interface{}
Counters *stats.ServerStats
VolumeSizeLimitMB uint
}{
util.Version(),
ms.Topo.ToMap(),
ms.Topo.RaftServer,
infos,
serverStats,
ms.option.VolumeSizeLimitMB,
}
ui.StatusTpl.Execute(w, args)
}

10
weed/server/master_ui/templates.go

@ -22,8 +22,12 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html>
<div class="row">
<div class="col-sm-6">
<h2>Cluster status</h2>
<table class="table">
<table class="table table-condensed table-striped">
<tbody>
<tr>
<th>Volume Size Limit</th>
<td>{{ .VolumeSizeLimitMB }}MB</td>
</tr>
<tr>
<th>Free</th>
<td>{{ .Topology.Free }}</td>
@ -38,8 +42,8 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html>
<td><a href="http://{{ .Leader }}">{{ .Leader }}</a></td>
</tr>
<tr>
<td class="col-sm-2 field-label"><label>Other Masters:</label></td>
<td class="col-sm-10"><ul class="list-unstyled">
<th>Other Masters</th>
<td class="col-sm-5"><ul class="list-unstyled">
{{ range $k, $p := .Peers }}
<li><a href="http://{{ $p.Name }}/ui/index.html">{{ $p.Name }}</a></li>
{{ end }}

9
weed/server/volume_grpc_vacuum.go

@ -44,19 +44,14 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv
resp := &volume_server_pb.VacuumVolumeCommitResponse{}
err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId))
readOnly, err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId))
if err != nil {
glog.Errorf("commit volume %d: %v", req.VolumeId, err)
} else {
glog.V(1).Infof("commit volume %d", req.VolumeId)
}
if err == nil {
if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() {
resp.IsReadOnly = true
}
}
resp.IsReadOnly = readOnly
return resp, err
}

4
weed/server/volume_server.go

@ -43,7 +43,7 @@ type VolumeServer struct {
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
port int, publicUrl string,
folders []string, maxCounts []int, minFreeSpacePercents []float32, diskTypes []types.DiskType,
folders []string, maxCounts []int, minFreeSpaces []util.MinFreeSpace, diskTypes []types.DiskType,
idxFolder string,
needleMapKind storage.NeedleMapKind,
masterNodes []string, pulseSeconds int,
@ -85,7 +85,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
vs.checkWithMaster()
vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, idxFolder, vs.needleMapKind, diskTypes)
vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpaces, idxFolder, vs.needleMapKind, diskTypes)
vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec)
handleStaticResources(adminMux)

7
weed/server/volume_server_handlers_read.go

@ -27,7 +27,7 @@ var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {
glog.V(9).Info(r.Method + " " + r.URL.Path + " " + r.Header.Get("Range"))
// println(r.Method + " " + r.URL.Path)
stats.VolumeServerRequestCounter.WithLabelValues("get").Inc()
start := time.Now()
@ -261,10 +261,13 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re
return nil
}
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error {
if _, e = rs.Seek(offset, 0); e != nil {
return e
}
if httpStatusCode != 0 {
w.WriteHeader(httpStatusCode)
}
_, e = io.CopyN(writer, rs, size)
return e
})

3
weed/server/volume_server_handlers_write.go

@ -13,7 +13,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/topology"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
@ -68,7 +67,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
ret.Name = string(reqNeedle.Name)
}
ret.Size = uint32(originalSize)
ret.ETag = fmt.Sprintf("%x", util.Base64Md5ToBytes(contentMd5))
ret.ETag = reqNeedle.Etag()
ret.Mime = string(reqNeedle.Mime)
setEtag(w, ret.ETag)
w.Header().Set("Content-MD5", contentMd5)

2
weed/server/volume_server_ui/templates.go

@ -1,4 +1,4 @@
package master_ui
package volume_server_ui
import (
"fmt"

41
weed/server/webdav_server.go

@ -380,25 +380,32 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
ctx := context.Background()
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: f.fs.option.Replication,
Collection: f.fs.option.Collection,
DiskType: f.fs.option.DiskType,
Path: name,
}
assignErr := util.Retry("assignVolume", func() error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: f.fs.option.Replication,
Collection: f.fs.option.Collection,
DiskType: f.fs.option.DiskType,
Path: name,
}
resp, err := client.AssignVolume(ctx, request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
resp, err := client.AssignVolume(ctx, request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
f.collection, f.replication = resp.Collection, resp.Replication
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
f.collection, f.replication = resp.Collection, resp.Replication
return nil
})
if assignErr != nil {
return assignErr
}
return nil
}); flushErr != nil {

47
weed/shell/command_ec_encode.go

@ -63,6 +63,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
collection := encodeCommand.String("collection", "", "the collection name")
fullPercentage := encodeCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
quietPeriod := encodeCommand.Duration("quietFor", time.Hour, "select volumes without no writes for this period")
parallelCopy := encodeCommand.Bool("parallelCopy", true, "copy shards in parallel")
if err = encodeCommand.Parse(args); err != nil {
return nil
}
@ -71,7 +72,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
// volumeId is provided
if vid != 0 {
return doEcEncode(commandEnv, *collection, vid)
return doEcEncode(commandEnv, *collection, vid, *parallelCopy)
}
// apply to all volumes in the collection
@ -81,7 +82,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
}
fmt.Printf("ec encode volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
if err = doEcEncode(commandEnv, *collection, vid); err != nil {
if err = doEcEncode(commandEnv, *collection, vid, *parallelCopy); err != nil {
return err
}
}
@ -89,7 +90,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) {
func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId, parallelCopy bool) (err error) {
// find volume location
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
if !found {
@ -111,7 +112,7 @@ func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId)
}
// balance the ec shards to current cluster
err = spreadEcShards(commandEnv, vid, collection, locations)
err = spreadEcShards(commandEnv, vid, collection, locations, parallelCopy)
if err != nil {
return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err)
}
@ -157,7 +158,7 @@ func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId,
}
func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location, parallelCopy bool) (err error) {
allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "")
if err != nil {
@ -176,7 +177,7 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection
allocatedEcIds := balancedEcDistribution(allocatedDataNodes)
// ask the data nodes to copy from the source volume server
copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0])
copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0], parallelCopy)
if err != nil {
return err
}
@ -206,30 +207,36 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection
}
func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) {
func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location, parallelCopy bool) (actuallyCopied []uint32, err error) {
fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url)
// parallelize
shardIdChan := make(chan []uint32, len(targetServers))
var wg sync.WaitGroup
shardIdChan := make(chan []uint32, len(targetServers))
copyFunc := func(server *EcNode, allocatedEcShardIds []uint32) {
defer wg.Done()
copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
allocatedEcShardIds, volumeId, collection, existingLocation.Url)
if copyErr != nil {
err = copyErr
} else {
shardIdChan <- copiedShardIds
server.addEcVolumeShards(volumeId, collection, copiedShardIds)
}
}
// maybe parallelize
for i, server := range targetServers {
if len(allocatedEcIds[i]) <= 0 {
continue
}
wg.Add(1)
go func(server *EcNode, allocatedEcShardIds []uint32) {
defer wg.Done()
copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
allocatedEcShardIds, volumeId, collection, existingLocation.Url)
if copyErr != nil {
err = copyErr
} else {
shardIdChan <- copiedShardIds
server.addEcVolumeShards(volumeId, collection, copiedShardIds)
}
}(server, allocatedEcIds[i])
if parallelCopy {
go copyFunc(server, allocatedEcIds[i])
} else {
copyFunc(server, allocatedEcIds[i])
}
}
wg.Wait()
close(shardIdChan)

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save