Browse Source

Merge pull request #23 from chrislusf/master

sync
pull/1542/head
hilimd 4 years ago
committed by GitHub
parent
commit
411e49f964
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/workflows/go.yml
  2. 21
      README.md
  3. 23
      docker/entrypoint.sh
  4. 4
      go.mod
  5. 4
      go.sum
  6. 2
      k8s/seaweedfs/Chart.yaml
  7. 2
      k8s/seaweedfs/values.yaml
  8. 2
      other/java/client/pom.xml
  9. 2
      other/java/client/pom.xml.deploy
  10. 2
      other/java/client/pom_debug.xml
  11. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  12. 2
      other/java/hdfs2/pom.xml
  13. 7
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  14. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  15. 2
      other/java/hdfs3/pom.xml
  16. 1856
      other/metrics/grafana_seaweedfs.json
  17. 82
      test/s3/basic/object_tagging_test.go
  18. 7
      weed/command/benchmark.go
  19. 22
      weed/command/filer.go
  20. 34
      weed/command/master.go
  21. 27
      weed/command/mount_std.go
  22. 14
      weed/command/s3.go
  23. 2
      weed/command/scaffold.go
  24. 7
      weed/command/server.go
  25. 5
      weed/command/volume.go
  26. 30
      weed/filer/filechunk_manifest.go
  27. 12
      weed/filer/filechunks.go
  28. 112
      weed/filer/reader_at.go
  29. 40
      weed/filer/stream.go
  30. 9
      weed/filesys/dir.go
  31. 6
      weed/filesys/dir_link.go
  32. 5
      weed/filesys/wfs.go
  33. 3
      weed/operation/submit.go
  34. 2
      weed/pb/master.proto
  35. 238
      weed/pb/master_pb/master.pb.go
  36. 40
      weed/replication/repl_util/replication_utli.go
  37. 21
      weed/replication/sink/azuresink/azure_sink.go
  38. 28
      weed/replication/sink/b2sink/b2_sink.go
  39. 22
      weed/replication/sink/gcssink/gcs_sink.go
  40. 11
      weed/replication/sink/s3sink/s3_write.go
  41. 19
      weed/replication/source/filer_source.go
  42. 2
      weed/s3api/auth_credentials.go
  43. 104
      weed/s3api/filer_util_tags.go
  44. 117
      weed/s3api/s3api_object_tagging_handlers.go
  45. 17
      weed/s3api/s3api_server.go
  46. 6
      weed/s3api/s3err/s3api_errors.go
  47. 27
      weed/s3api/stats.go
  48. 38
      weed/s3api/tags.go
  49. 50
      weed/s3api/tags_test.go
  50. 11
      weed/server/filer_grpc_server.go
  51. 33
      weed/server/filer_server.go
  52. 1
      weed/server/filer_server_handlers_write_autochunk.go
  53. 17
      weed/server/master_grpc_server.go
  54. 13
      weed/server/master_grpc_server_volume.go
  55. 5
      weed/server/master_server.go
  56. 123
      weed/server/raft_server.go
  57. 4
      weed/server/raft_server_handlers.go
  58. 2
      weed/server/volume_grpc_client_to_master.go
  59. 4
      weed/server/volume_server_handlers_read.go
  60. 3
      weed/server/volume_server_handlers_write.go
  61. 2
      weed/server/webdav_server.go
  62. 2
      weed/stats/metrics.go
  63. 2
      weed/storage/backend/volume_create_linux.go
  64. 3
      weed/storage/disk_location.go
  65. 4
      weed/storage/store.go
  66. 4
      weed/storage/store_ec.go
  67. 3
      weed/storage/volume_read_write.go
  68. 32
      weed/util/chunk_cache/chunk_cache.go
  69. 51
      weed/util/chunk_cache/chunk_cache_on_disk_test.go
  70. 8
      weed/util/chunk_cache/on_disk_cache_layer.go
  71. 2
      weed/util/constants.go
  72. 2
      weed/util/fullpath.go
  73. 30
      weed/wdclient/masterclient.go
  74. 51
      weed/wdclient/vid_map.go

2
.github/workflows/go.yml

@ -34,4 +34,4 @@ jobs:
run: cd weed; go build -v . run: cd weed; go build -v .
- name: Test - name: Test
run: cd weed; go test -v .
run: cd weed; go test -v ./...

21
README.md

@ -349,6 +349,8 @@ Most other distributed file systems seem more complicated than necessary.
SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications. SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications.
SeaweedFS is constantly moving forward. Same with other systems. These comparisons can be outdated quickly. Please help to keep them updated.
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
### Compared to HDFS ### ### Compared to HDFS ###
@ -370,13 +372,14 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized. * SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized.
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc. * SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files |
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
| ------------- | ------------------------------- | ---------------- | ------ | -------- | ------------------------- | | ------------- | ------------------------------- | ---------------- | ------ | -------- | ------------------------- |
| SeaweedFS | lookup volume id, cacheable | O(1) disk seek | | Yes | Yes | | SeaweedFS | lookup volume id, cacheable | O(1) disk seek | | Yes | Yes |
| SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes | | SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes |
| GlusterFS | hashing | | FUSE, NFS | | | | GlusterFS | hashing | | FUSE, NFS | | |
| Ceph | hashing + rules | | FUSE | Yes | | | Ceph | hashing + rules | | FUSE | Yes | |
| MooseFS | in memory | | FUSE | | No | | MooseFS | in memory | | FUSE | | No |
| MinIO | separate meta file for each file | | | Yes | No |
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
@ -418,6 +421,22 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Red
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
### Compared to MinIO ###
MinIO follows AWS S3 closely and is ideal for testing for S3 API. It has good UI, policies, versionings, etc. SeaweedFS is trying to catch up here. It is also possible to put MinIO as a gateway in front of SeaweedFS later.
MinIO metadata are in simple files. Each file write will incur meta file writes.
MinIO does not have optimization for large number of small files.
MinIO has multiple disk IO to read one file. SeaweedFS has O(1) disk reads.
MinIO has full-time erasure coding. SeaweedFS uses replication on hot data for faster speed and optionally applies erasure coding on warm data.
MinIO does not have POSIX-like API support.
MinIO has specific requirements on storage layout. It is not flexible to adjust capacity. In SeaweedFS, just start one volume server pointing to the master. That's all.
## Dev Plan ## ## Dev Plan ##
More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc. More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.

23
docker/entrypoint.sh

@ -1,5 +1,24 @@
#!/bin/sh #!/bin/sh
isArgPassed() {
arg="$1"
argWithEqualSign="$1="
shift
while [ $# -gt 0 ]; do
passedArg="$1"
shift
case $passedArg in
$arg)
return 0
;;
$argWithEqualSign*)
return 0
;;
esac
done
return 1
}
case "$1" in case "$1" in
'master') 'master')
@ -9,7 +28,7 @@ case "$1" in
'volume') 'volume')
ARGS="-dir=/data -max=0" ARGS="-dir=/data -max=0"
if [[ $@ == *"-max="* ]]; then
if isArgPassed "-max" "$@"; then
ARGS="-dir=/data" ARGS="-dir=/data"
fi fi
exec /usr/bin/weed $@ $ARGS exec /usr/bin/weed $@ $ARGS
@ -17,7 +36,7 @@ case "$1" in
'server') 'server')
ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024" ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
if [[ $@ == *"-volume.max="* ]]; then
if isArgPassed "-volume.max" "$@"; then
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024" ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
fi fi
exec /usr/bin/weed $@ $ARGS exec /usr/bin/weed $@ $ARGS

4
go.mod

@ -11,7 +11,7 @@ require (
github.com/aws/aws-sdk-go v1.33.5 github.com/aws/aws-sdk-go v1.33.5
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v1.0.1
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/disintegration/imaging v1.6.2 github.com/disintegration/imaging v1.6.2
@ -27,6 +27,7 @@ require (
github.com/go-sql-driver/mysql v1.5.0 github.com/go-sql-driver/mysql v1.5.0
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
github.com/golang/protobuf v1.4.2 github.com/golang/protobuf v1.4.2
github.com/google/btree v1.0.0 github.com/google/btree v1.0.0
github.com/google/uuid v1.1.1 github.com/google/uuid v1.1.1
@ -78,6 +79,7 @@ require (
gocloud.dev/pubsub/rabbitpubsub v0.16.0 gocloud.dev/pubsub/rabbitpubsub v0.16.0
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/net v0.0.0-20200202094626-16171245cfb2
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5
google.golang.org/api v0.9.0 google.golang.org/api v0.9.0

4
go.sum

@ -69,6 +69,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/chrislusf/raft v1.0.1 h1:Wa4ffkmkysW7cX3T/gMC/Mk3PhnOXhsqOVwQJcMndhw= github.com/chrislusf/raft v1.0.1 h1:Wa4ffkmkysW7cX3T/gMC/Mk3PhnOXhsqOVwQJcMndhw=
github.com/chrislusf/raft v1.0.1/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= github.com/chrislusf/raft v1.0.1/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011 h1:vN1GvfLgDg8kIPCdhuVKAjlYpxG1B86jiKejB6MC/Q0=
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
@ -613,6 +615,8 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0=
golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

2
k8s/seaweedfs/Chart.yaml

@ -1,4 +1,4 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
version: 2.00
version: 2.03

2
k8s/seaweedfs/values.yaml

@ -4,7 +4,7 @@ global:
registry: "" registry: ""
repository: "" repository: ""
imageName: chrislusf/seaweedfs imageName: chrislusf/seaweedfs
imageTag: "2.00"
imageTag: "2.03"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
imagePullSecrets: imagepullsecret imagePullSecrets: imagepullsecret
restartPolicy: Always restartPolicy: Always

2
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.4.7</version>
<version>1.4.8</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

2
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.4.7</version>
<version>1.4.8</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

2
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.4.7</version>
<version>1.4.8</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -301,7 +301,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.4.7</seaweedfs.client.version>
<seaweedfs.client.version>1.4.8</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.4.7</seaweedfs.client.version>
<seaweedfs.client.version>1.4.8</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>

7
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -8,14 +8,10 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerProto;
import seaweedfs.client.SeaweedRead;
import seaweedfs.client.*;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -202,6 +198,7 @@ public class SeaweedFileSystemStore {
.clearGroupName() .clearGroupName()
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames())) .addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
); );
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
} }
return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication); return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -309,7 +309,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.4.7</seaweedfs.client.version>
<seaweedfs.client.version>1.4.8</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.4.7</seaweedfs.client.version>
<seaweedfs.client.version>1.4.8</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>

1856
other/metrics/grafana_seaweedfs.json
File diff suppressed because it is too large
View File

82
test/s3/basic/object_tagging_test.go

@ -0,0 +1,82 @@
package basic
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"testing"
)
func TestObjectTagging(t *testing.T) {
input := &s3.PutObjectInput{
Bucket: aws.String("theBucket"),
Key: aws.String("testDir/testObject"),
}
svc.PutObject(input)
printTags()
setTags()
printTags()
clearTags()
printTags()
}
func printTags() {
response, err := svc.GetObjectTagging(
&s3.GetObjectTaggingInput{
Bucket: aws.String("theBucket"),
Key: aws.String("testDir/testObject"),
})
fmt.Println("printTags")
if err != nil {
fmt.Println(err.Error())
}
fmt.Println(response.TagSet)
}
func setTags() {
response, err := svc.PutObjectTagging(&s3.PutObjectTaggingInput{
Bucket: aws.String("theBucket"),
Key: aws.String("testDir/testObject"),
Tagging: &s3.Tagging{
TagSet: []*s3.Tag{
{
Key: aws.String("kye2"),
Value: aws.String("value2"),
},
},
},
})
fmt.Println("setTags")
if err != nil {
fmt.Println(err.Error())
}
fmt.Println(response.String())
}
func clearTags() {
response, err := svc.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{
Bucket: aws.String("theBucket"),
Key: aws.String("testDir/testObject"),
})
fmt.Println("clearTags")
if err != nil {
fmt.Println(err.Error())
}
fmt.Println(response.String())
}

7
weed/command/benchmark.go

@ -282,14 +282,19 @@ func readFiles(fileIdLineChan chan string, s *stat) {
start := time.Now() start := time.Now()
var bytesRead int var bytesRead int
var err error var err error
url, err := b.masterClient.LookupFileId(fid)
urls, err := b.masterClient.LookupFileId(fid)
if err != nil { if err != nil {
s.failed++ s.failed++
println("!!!! ", fid, " location not found!!!!!") println("!!!! ", fid, " location not found!!!!!")
continue continue
} }
var bytes []byte var bytes []byte
for _, url := range urls {
bytes, err = util.Get(url) bytes, err = util.Get(url)
if err == nil {
break
}
}
bytesRead = len(bytes) bytesRead = len(bytes)
if err == nil { if err == nil {
s.completed++ s.completed++

22
weed/command/filer.go

@ -1,6 +1,7 @@
package command package command
import ( import (
"fmt"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
@ -19,6 +20,8 @@ import (
var ( var (
f FilerOptions f FilerOptions
filerStartS3 *bool
filerS3Options S3Options
) )
type FilerOptions struct { type FilerOptions struct {
@ -51,7 +54,7 @@ func init() {
f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size")
@ -60,6 +63,14 @@ func init() {
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list") f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
// start s3 on filer
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
filerS3Options.port = cmdFiler.Flag.Int("s3.port", 8333, "s3 server http listen port")
filerS3Options.domainName = cmdFiler.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}")
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
} }
var cmdFiler = &Command{ var cmdFiler = &Command{
@ -89,6 +100,15 @@ func runFiler(cmd *Command, args []string) bool {
go stats_collect.StartMetricsServer(*f.metricsHttpPort) go stats_collect.StartMetricsServer(*f.metricsHttpPort)
if *filerStartS3 {
filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
filerS3Options.filer = &filerAddress
go func() {
time.Sleep(2 * time.Second)
filerS3Options.startS3Server()
}()
}
f.startFiler() f.startFiler()
return true return true

34
weed/command/master.go

@ -1,15 +1,16 @@
package command package command
import ( import (
"github.com/chrislusf/raft/protobuf"
"github.com/gorilla/mux"
"google.golang.org/grpc/reflection"
"net/http" "net/http"
"os" "os"
"runtime" "runtime"
"sort"
"strconv" "strconv"
"strings" "strings"
"github.com/chrislusf/raft/protobuf"
"github.com/gorilla/mux"
"google.golang.org/grpc/reflection"
"time"
"github.com/chrislusf/seaweedfs/weed/util/grace" "github.com/chrislusf/seaweedfs/weed/util/grace"
@ -41,6 +42,7 @@ type MasterOptions struct {
disableHttp *bool disableHttp *bool
metricsAddress *string metricsAddress *string
metricsIntervalSec *int metricsIntervalSec *int
raftResumeState *bool
} }
func init() { func init() {
@ -59,6 +61,7 @@ func init() {
m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address <host>:<port>") m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address <host>:<port>")
m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
m.raftResumeState = cmdMaster.Flag.Bool("resumeState", false, "resume previous state on start master server")
} }
var cmdMaster = &Command{ var cmdMaster = &Command{
@ -118,10 +121,10 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
glog.Fatalf("Master startup error: %v", e) glog.Fatalf("Master startup error: %v", e)
} }
// start raftServer // start raftServer
raftServer := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"),
peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, 5)
raftServer, err := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"),
peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, 5, *masterOption.raftResumeState)
if raftServer == nil { if raftServer == nil {
glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder)
glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
} }
ms.SetRaftServer(raftServer) ms.SetRaftServer(raftServer)
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
@ -139,6 +142,15 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort) glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
go grpcS.Serve(grpcL) go grpcS.Serve(grpcL)
go func() {
time.Sleep(1500 * time.Millisecond)
if ms.Topo.RaftServer.Leader() == "" && ms.Topo.RaftServer.IsLogEmpty() && isTheFirstOne(myMasterAddress, peers) {
if ms.MasterClient.FindLeader(myMasterAddress) == "" {
raftServer.DoJoinCommand()
}
}
}()
go ms.MasterClient.KeepConnectedToMaster() go ms.MasterClient.KeepConnectedToMaster()
// start http server // start http server
@ -172,6 +184,14 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
return return
} }
func isTheFirstOne(self string, peers []string) bool {
sort.Strings(peers)
if len(peers) <= 0 {
return true
}
return self == peers[0]
}
func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption { func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
return &weed_server.MasterOption{ return &weed_server.MasterOption{
Host: *m.ip, Host: *m.ip,

27
weed/command/mount_std.go

@ -7,6 +7,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"os" "os"
"os/user"
"path" "path"
"runtime" "runtime"
"strconv" "strconv"
@ -92,6 +93,29 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
} }
fileInfo, err := os.Stat(dir) fileInfo, err := os.Stat(dir)
uid, gid := uint32(0), uint32(0)
mountMode := os.ModeDir | 0777
if err == nil {
mountMode = os.ModeDir | fileInfo.Mode()
uid, gid = util.GetFileUidGid(fileInfo)
fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode())
} else {
fmt.Printf("can not stat %s\n", dir)
return false
}
if uid == 0 {
if u, err := user.Current(); err == nil {
if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil {
uid = uint32(parsedId)
}
if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil {
gid = uint32(parsedId)
}
fmt.Printf("current uid=%d gid=%d\n", uid, gid)
}
}
// mapping uid, gid // mapping uid, gid
uidGidMapper, err := meta_cache.NewUidGidMapper(*option.uidMap, *option.gidMap) uidGidMapper, err := meta_cache.NewUidGidMapper(*option.uidMap, *option.gidMap)
if err != nil { if err != nil {
@ -150,6 +174,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
CacheSizeMB: *option.cacheSizeMB, CacheSizeMB: *option.cacheSizeMB,
DataCenter: *option.dataCenter, DataCenter: *option.dataCenter,
EntryCacheTtl: 3 * time.Second, EntryCacheTtl: 3 * time.Second,
MountUid: uid,
MountGid: gid,
MountMode: mountMode,
MountCtime: fileInfo.ModTime(), MountCtime: fileInfo.ModTime(),
MountMtime: time.Now(), MountMtime: time.Now(),
Umask: umask, Umask: umask,

14
weed/command/s3.go

@ -54,7 +54,13 @@ var cmdS3 = &Command{
{ {
"identities": [ "identities": [
{ {
"name": "some_name",
"name": "anonymous",
"actions": [
"Read"
]
},
{
"name": "some_admin_user",
"credentials": [ "credentials": [
{ {
"accessKey": "some_access_key1", "accessKey": "some_access_key1",
@ -64,6 +70,8 @@ var cmdS3 = &Command{
"actions": [ "actions": [
"Admin", "Admin",
"Read", "Read",
"List",
"Tagging",
"Write" "Write"
] ]
}, },
@ -89,6 +97,8 @@ var cmdS3 = &Command{
], ],
"actions": [ "actions": [
"Read", "Read",
"List",
"Tagging",
"Write" "Write"
] ]
}, },
@ -102,6 +112,8 @@ var cmdS3 = &Command{
], ],
"actions": [ "actions": [
"Read:bucket1", "Read:bucket1",
"List:bucket1",
"Tagging:bucket1",
"Write:bucket1" "Write:bucket1"
] ]
} }

2
weed/command/scaffold.go

@ -140,6 +140,8 @@ keyspace="seaweedfs"
hosts=[ hosts=[
"localhost:9042", "localhost:9042",
] ]
username=""
password=""
[redis2] [redis2]
enabled = false enabled = false

7
weed/command/server.go

@ -81,11 +81,12 @@ func init() {
masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address") masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address")
masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
masterOptions.raftResumeState = cmdServer.Flag.Bool("resumeState", false, "resume previous state on start master server")
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection") filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit")
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
@ -165,10 +166,6 @@ func runServer(cmd *Command, args []string) bool {
s3Options.filer = &filerAddress s3Options.filer = &filerAddress
msgBrokerOptions.filer = &filerAddress msgBrokerOptions.filer = &filerAddress
if *filerOptions.defaultReplicaPlacement == "" {
*filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication
}
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
go stats_collect.StartMetricsServer(*serverMetricsHttpPort) go stats_collect.StartMetricsServer(*serverMetricsHttpPort)

5
weed/command/volume.go

@ -138,6 +138,11 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
glog.Fatalf("The max specified in -max not a valid number %s", maxString) glog.Fatalf("The max specified in -max not a valid number %s", maxString)
} }
} }
if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 {
for i := 0; i < len(v.folders)-1; i++ {
v.folderMaxLimits = append(v.folderMaxLimits, v.folderMaxLimits[0])
}
}
if len(v.folders) != len(v.folderMaxLimits) { if len(v.folders) != len(v.folderMaxLimits) {
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits)) glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
} }

30
weed/filer/filechunk_manifest.go

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"time"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -84,21 +85,40 @@ func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *fil
// TODO fetch from cache for weed mount? // TODO fetch from cache for weed mount?
func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) { func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
urlString, err := lookupFileIdFn(fileId)
urlStrings, err := lookupFileIdFn(fileId)
if err != nil { if err != nil {
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return nil, err return nil, err
} }
return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0)
}
func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) ([]byte, error) {
var err error
var buffer bytes.Buffer var buffer bytes.Buffer
err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, true, 0, 0, func(data []byte) {
for waitTime := time.Second; waitTime < 10*time.Second; waitTime += waitTime / 2 {
for _, urlString := range urlStrings {
err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
buffer.Write(data) buffer.Write(data)
}) })
if err != nil { if err != nil {
glog.V(0).Infof("read %s failed, err: %v", fileId, err)
return nil, err
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
buffer.Reset()
} else {
break
}
}
if err != nil {
glog.V(0).Infof("sleep for %v before retrying reading", waitTime)
time.Sleep(waitTime)
} else {
break
}
} }
return buffer.Bytes(), nil
return buffer.Bytes(), err
} }
func MaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk) (chunks []*filer_pb.FileChunk, err error) { func MaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk) (chunks []*filer_pb.FileChunk, err error) {

12
weed/filer/filechunks.go

@ -1,13 +1,15 @@
package filer package filer
import ( import (
"bytes"
"encoding/hex"
"fmt" "fmt"
"hash/fnv"
"math" "math"
"sort" "sort"
"sync" "sync"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
) )
func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) { func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
@ -42,12 +44,12 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
if len(chunks) == 1 { if len(chunks) == 1 {
return chunks[0].ETag return chunks[0].ETag
} }
h := fnv.New32a()
md5_digests := [][]byte{}
for _, c := range chunks { for _, c := range chunks {
h.Write([]byte(c.ETag))
md5_decoded, _ := hex.DecodeString(c.ETag)
md5_digests = append(md5_digests, md5_decoded)
} }
return fmt.Sprintf("%x", h.Sum32())
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
} }
func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {

112
weed/filer/reader_at.go

@ -3,33 +3,43 @@ package filer
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache" "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
"github.com/golang/groupcache/singleflight"
"io"
"math/rand"
"sync"
) )
type ChunkReadAt struct { type ChunkReadAt struct {
masterClient *wdclient.MasterClient masterClient *wdclient.MasterClient
chunkViews []*ChunkView chunkViews []*ChunkView
lookupFileId func(fileId string) (targetUrl string, err error)
lookupFileId LookupFileIdFunctionType
readerLock sync.Mutex readerLock sync.Mutex
fileSize int64 fileSize int64
fetchGroup singleflight.Group
lastChunkFileId string
lastChunkData []byte
chunkCache chunk_cache.ChunkCache chunkCache chunk_cache.ChunkCache
} }
// var _ = io.ReaderAt(&ChunkReadAt{}) // var _ = io.ReaderAt(&ChunkReadAt{})
type LookupFileIdFunctionType func(fileId string) (targetUrl string, err error)
type LookupFileIdFunctionType func(fileId string) (targetUrls []string, err error)
func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType { func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
return func(fileId string) (targetUrl string, err error) {
err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
vidCache := make(map[string]*filer_pb.Locations)
return func(fileId string) (targetUrls []string, err error) {
vid := VolumeId(fileId) vid := VolumeId(fileId)
locations, found := vidCache[vid]
if !found {
// println("looking up volume", vid)
err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid}, VolumeIds: []string{vid},
}) })
@ -37,18 +47,28 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
return err return err
} }
locations := resp.LocationsMap[vid]
locations = resp.LocationsMap[vid]
if locations == nil || len(locations.Locations) == 0 { if locations == nil || len(locations.Locations) == 0 {
glog.V(0).Infof("failed to locate %s", fileId) glog.V(0).Infof("failed to locate %s", fileId)
return fmt.Errorf("failed to locate %s", fileId) return fmt.Errorf("failed to locate %s", fileId)
} }
volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url)
targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
vidCache[vid] = locations
return nil return nil
}) })
}
for _, loc := range locations.Locations {
volumeServerAddress := filerClient.AdjustedUrl(loc.Url)
targetUrl := fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
targetUrls = append(targetUrls, targetUrl)
}
for i := len(targetUrls) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
targetUrls[i], targetUrls[j] = targetUrls[j], targetUrls[i]
}
return return
} }
} }
@ -76,10 +96,16 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
var buffer []byte var buffer []byte
startOffset, remaining := offset, int64(len(p)) startOffset, remaining := offset, int64(len(p))
var nextChunk *ChunkView
for i, chunk := range c.chunkViews { for i, chunk := range c.chunkViews {
if remaining <= 0 { if remaining <= 0 {
break break
} }
if i+1 < len(c.chunkViews) {
nextChunk = c.chunkViews[i+1]
} else {
nextChunk = nil
}
if startOffset < chunk.LogicOffset { if startOffset < chunk.LogicOffset {
gap := int(chunk.LogicOffset - startOffset) gap := int(chunk.LogicOffset - startOffset)
glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap)) glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap))
@ -95,7 +121,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
continue continue
} }
glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size)) glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
buffer, err = c.readFromWholeChunkData(chunk)
buffer, err = c.readFromWholeChunkData(chunk, nextChunk)
if err != nil { if err != nil {
glog.Errorf("fetching chunk %+v: %v\n", chunk, err) glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
return return
@ -123,27 +149,63 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
} }
func (c *ChunkReadAt) readFromWholeChunkData(chunkView *ChunkView) (chunkData []byte, err error) {
func (c *ChunkReadAt) readFromWholeChunkData(chunkView *ChunkView, nextChunkViews ...*ChunkView) (chunkData []byte, err error) {
if c.lastChunkFileId == chunkView.FileId {
return c.lastChunkData, nil
}
v, doErr := c.readOneWholeChunk(chunkView)
if doErr != nil {
return nil, doErr
}
chunkData = v.([]byte)
c.lastChunkData = chunkData
c.lastChunkFileId = chunkView.FileId
for _, nextChunkView := range nextChunkViews {
if c.chunkCache != nil && nextChunkView != nil {
go c.readOneWholeChunk(nextChunkView)
}
}
return
}
func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, error) {
var err error
return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) {
glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize) glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize)
chunkData = c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
if chunkData != nil {
glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(chunkData)))
data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
if data != nil {
glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data)))
} else { } else {
glog.V(4).Infof("doFetchFullChunkData %s", chunkView.FileId)
chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
var err error
data, err = c.doFetchFullChunkData(chunkView)
if err != nil { if err != nil {
return
return data, err
} }
c.chunkCache.SetChunk(chunkView.FileId, chunkData)
c.chunkCache.SetChunk(chunkView.FileId, data)
} }
return
return data, err
})
} }
func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) {
glog.V(4).Infof("+ doFetchFullChunkData %s", chunkView.FileId)
data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
glog.V(4).Infof("- doFetchFullChunkData %s", chunkView.FileId)
return fetchChunk(c.lookupFileId, fileId, cipherKey, isGzipped)
return data, err
} }

40
weed/filer/stream.go

@ -17,28 +17,28 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
// fmt.Printf("start to stream content for chunks: %+v\n", chunks) // fmt.Printf("start to stream content for chunks: %+v\n", chunks)
chunkViews := ViewFromChunks(masterClient.LookupFileId, chunks, offset, size) chunkViews := ViewFromChunks(masterClient.LookupFileId, chunks, offset, size)
fileId2Url := make(map[string]string)
fileId2Url := make(map[string][]string)
for _, chunkView := range chunkViews { for _, chunkView := range chunkViews {
urlString, err := masterClient.LookupFileId(chunkView.FileId)
urlStrings, err := masterClient.LookupFileId(chunkView.FileId)
if err != nil { if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err return err
} }
fileId2Url[chunkView.FileId] = urlString
fileId2Url[chunkView.FileId] = urlStrings
} }
for _, chunkView := range chunkViews { for _, chunkView := range chunkViews {
urlString := fileId2Url[chunkView.FileId]
err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
w.Write(data)
})
if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
urlStrings := fileId2Url[chunkView.FileId]
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
if err == nil {
return err return err
} }
w.Write(data)
} }
return nil return nil
@ -51,25 +51,24 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk)
buffer := bytes.Buffer{} buffer := bytes.Buffer{}
lookupFileIdFn := func(fileId string) (targetUrl string, err error) {
lookupFileIdFn := func(fileId string) (targetUrls []string, err error) {
return masterClient.LookupFileId(fileId) return masterClient.LookupFileId(fileId)
} }
chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
for _, chunkView := range chunkViews { for _, chunkView := range chunkViews {
urlString, err := lookupFileIdFn(chunkView.FileId)
urlStrings, err := lookupFileIdFn(chunkView.FileId)
if err != nil { if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return nil, err return nil, err
} }
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
buffer.Write(data)
})
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
if err != nil { if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
return nil, err return nil, err
} }
buffer.Write(data)
} }
return buffer.Bytes(), nil return buffer.Bytes(), nil
} }
@ -89,7 +88,7 @@ var _ = io.ReadSeeker(&ChunkStreamReader{})
func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
lookupFileIdFn := func(fileId string) (targetUrl string, err error) {
lookupFileIdFn := func(fileId string) (targetUrl []string, err error) {
return masterClient.LookupFileId(fileId) return masterClient.LookupFileId(fileId)
} }
@ -169,17 +168,24 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
} }
func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
urlString, err := c.lookupFileId(chunkView.FileId)
urlStrings, err := c.lookupFileId(chunkView.FileId)
if err != nil { if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err return err
} }
var buffer bytes.Buffer var buffer bytes.Buffer
for _, urlString := range urlStrings {
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
buffer.Write(data) buffer.Write(data)
}) })
if err != nil { if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
buffer.Reset()
} else {
break
}
}
if err != nil {
return err return err
} }
c.buffer = buffer.Bytes() c.buffer = buffer.Bytes()

9
weed/filesys/dir.go

@ -82,9 +82,9 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f
func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode() attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
attr.Valid = time.Hour attr.Valid = time.Hour
attr.Uid = dir.entry.Attributes.Uid
attr.Gid = dir.entry.Attributes.Gid
attr.Mode = os.FileMode(dir.entry.Attributes.FileMode)
attr.Uid = dir.wfs.option.MountUid
attr.Gid = dir.wfs.option.MountGid
attr.Mode = dir.wfs.option.MountMode
attr.Crtime = dir.wfs.option.MountCtime attr.Crtime = dir.wfs.option.MountCtime
attr.Ctime = dir.wfs.option.MountCtime attr.Ctime = dir.wfs.option.MountCtime
attr.Mtime = dir.wfs.option.MountMtime attr.Mtime = dir.wfs.option.MountMtime
@ -354,7 +354,8 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
glog.V(3).Infof("remove directory entry: %v", req) glog.V(3).Infof("remove directory entry: %v", req)
err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, false, false, []int32{dir.wfs.signature})
ignoreRecursiveErr := true // ignore recursion error since the OS should manage it
err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
if err != nil { if err != nil {
glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err) glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
if strings.Contains(err.Error(), "non-empty") { if strings.Contains(err.Error(), "non-empty") {

6
weed/filesys/dir_link.go

@ -18,6 +18,10 @@ var _ = fs.NodeLinker(&Dir{})
var _ = fs.NodeSymlinker(&Dir{}) var _ = fs.NodeSymlinker(&Dir{})
var _ = fs.NodeReadlinker(&File{}) var _ = fs.NodeReadlinker(&File{})
const (
HARD_LINK_MARKER = '\x01'
)
func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) { func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) {
oldFile, ok := old.(*File) oldFile, ok := old.(*File)
@ -33,7 +37,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
// update old file to hardlink mode // update old file to hardlink mode
if len(oldFile.entry.HardLinkId) == 0 { if len(oldFile.entry.HardLinkId) == 0 {
oldFile.entry.HardLinkId = util.RandomBytes(16)
oldFile.entry.HardLinkId = append(util.RandomBytes(16), HARD_LINK_MARKER)
oldFile.entry.HardLinkCounter = 1 oldFile.entry.HardLinkCounter = 1
} }
oldFile.entry.HardLinkCounter++ oldFile.entry.HardLinkCounter++

5
weed/filesys/wfs.go

@ -37,6 +37,9 @@ type Option struct {
EntryCacheTtl time.Duration EntryCacheTtl time.Duration
Umask os.FileMode Umask os.FileMode
MountUid uint32
MountGid uint32
MountMode os.FileMode
MountCtime time.Time MountCtime time.Time
MountMtime time.Time MountMtime time.Time
@ -86,7 +89,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
cacheDir := path.Join(option.CacheDir, cacheUniqueId) cacheDir := path.Join(option.CacheDir, cacheUniqueId)
if option.CacheSizeMB > 0 { if option.CacheSizeMB > 0 {
os.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask) os.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask)
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB)
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
} }
wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), option.UidGidMapper) wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), option.UidGidMapper)

3
weed/operation/submit.go

@ -170,6 +170,9 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
} }
} }
fileUrl := "http://" + ret.Url + "/" + id fileUrl := "http://" + ret.Url + "/" + id
if usePublicUrl {
fileUrl = "http://" + ret.PublicUrl + "/" + id
}
count, e := upload_one_chunk( count, e := upload_one_chunk(
baseName+"-"+strconv.FormatInt(i+1, 10), baseName+"-"+strconv.FormatInt(i+1, 10),
io.LimitReader(fi.Reader, chunkSize), io.LimitReader(fi.Reader, chunkSize),

2
weed/pb/master.proto

@ -274,6 +274,8 @@ message GetMasterConfigurationResponse {
string metrics_address = 1; string metrics_address = 1;
uint32 metrics_interval_seconds = 2; uint32 metrics_interval_seconds = 2;
repeated StorageBackend storage_backends = 3; repeated StorageBackend storage_backends = 3;
string default_replication = 4;
string leader = 5;
} }
message ListMasterClientsRequest { message ListMasterClientsRequest {

238
weed/pb/master_pb/master.pb.go

@ -2279,6 +2279,8 @@ type GetMasterConfigurationResponse struct {
MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"`
StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"`
DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"`
Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"`
} }
func (x *GetMasterConfigurationResponse) Reset() { func (x *GetMasterConfigurationResponse) Reset() {
@ -2334,6 +2336,20 @@ func (x *GetMasterConfigurationResponse) GetStorageBackends() []*StorageBackend
return nil return nil
} }
func (x *GetMasterConfigurationResponse) GetDefaultReplication() string {
if x != nil {
return x.DefaultReplication
}
return ""
}
func (x *GetMasterConfigurationResponse) GetLeader() string {
if x != nil {
return x.Leader
}
return ""
}
type ListMasterClientsRequest struct { type ListMasterClientsRequest struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@ -3197,7 +3213,7 @@ var file_master_proto_rawDesc = []byte{
0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73,
0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc9, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x92, 0x02, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d,
0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65,
0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20,
@ -3210,115 +3226,119 @@ var file_master_proto_rawDesc = []byte{
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65,
0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65,
0x6e, 0x64, 0x73, 0x22, 0x3b, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65,
0x22, 0x42, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a,
0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18,
0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x65, 0x73, 0x22, 0x8a, 0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64,
0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75,
0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f,
0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b,
0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d,
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d,
0x65, 0x22, 0x4d, 0x0a, 0x17, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54,
0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05,
0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b,
0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73,
0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73,
0x22, 0x8c, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69,
0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a,
0x0e, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54,
0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73,
0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69,
0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22,
0x1b, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54,
0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf7, 0x08, 0x0a,
0x07, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64,
0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a,
0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72,
0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28,
0x01, 0x30, 0x01, 0x12, 0x51, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
0x63, 0x74, 0x65, 0x64, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70,
0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73,
0x69, 0x67, 0x6e, 0x12, 0x18, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74,
0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44,
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
0x4b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e,
0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69,
0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e,
0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20,
0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d,
0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73,
0x6e, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x72,
0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05,
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x3b, 0x0a, 0x18,
0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x19, 0x4c, 0x69, 0x73,
0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73,
0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d,
0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65,
0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41,
0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d,
0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24,
0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61,
0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73,
0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62,
0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d,
0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x8a, 0x01,
0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76,
0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65,
0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a,
0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, 0x4c, 0x65,
0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01,
0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x0a, 0x6c,
0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x52, 0x65,
0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f,
0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d,
0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a,
0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74,
0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69,
0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6c,
0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x65,
0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf7, 0x08, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65,
0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65,
0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48,
0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, 0x0a, 0x0d,
0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1f, 0x2e,
0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12,
0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12,
0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b,
0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b,
0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, 0x2e, 0x6d,
0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63,
0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74,
0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74,
0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69,
0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43,
0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6c,
0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x22, 0x2e,
0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f,
0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75,
0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45,
0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75,
0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f,
0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47,
0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54,
0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f,
0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a,
0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b,
0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42,
0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68,
0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66,
0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
} }
var ( var (

40
weed/replication/repl_util/replication_utli.go

@ -0,0 +1,40 @@
package repl_util
import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
)
func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.FilerSource, writeFunc func(data []byte) error) error {
for _, chunk := range chunkViews {
fileUrls, err := filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
var writeErr error
for _, fileUrl := range fileUrls {
err = util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
writeErr = writeFunc(data)
})
if err != nil {
glog.V(1).Infof("read from %s: %v", fileUrl, err)
} else if writeErr != nil {
glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr)
} else {
break
}
}
if err != nil {
return err
}
}
return nil
}

21
weed/replication/sink/azuresink/azure_sink.go

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/replication/repl_util"
"net/url" "net/url"
"strings" "strings"
@ -107,25 +108,13 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []
return err return err
} }
for _, chunk := range chunkViews {
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
var writeErr error
readErr := util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
_, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
})
if readErr != nil {
return readErr
}
if writeErr != nil {
writeFunc := func(data []byte) error {
_, writeErr := appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
return writeErr return writeErr
} }
if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
return err
} }
return nil return nil

28
weed/replication/sink/b2sink/b2_sink.go

@ -2,6 +2,7 @@ package B2Sink
import ( import (
"context" "context"
"github.com/chrislusf/seaweedfs/weed/replication/repl_util"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
@ -95,31 +96,18 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int
targetObject := bucket.Object(key) targetObject := bucket.Object(key)
writer := targetObject.NewWriter(context.Background()) writer := targetObject.NewWriter(context.Background())
for _, chunk := range chunkViews {
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
var writeErr error
readErr := util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
_, err := writer.Write(data)
if err != nil {
writeErr = err
}
})
if readErr != nil {
return readErr
}
if writeErr != nil {
writeFunc := func(data []byte) error {
_, writeErr := writer.Write(data)
return writeErr return writeErr
} }
defer writer.Close()
if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
return err
} }
return writer.Close()
return nil
} }

22
weed/replication/sink/gcssink/gcs_sink.go

@ -3,6 +3,7 @@ package gcssink
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/replication/repl_util"
"os" "os"
"cloud.google.com/go/storage" "cloud.google.com/go/storage"
@ -93,25 +94,14 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []in
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
defer wc.Close()
for _, chunk := range chunkViews {
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
err = util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
wc.Write(data)
})
if err != nil {
return err
}
writeFunc := func(data []byte) error {
_, writeErr := wc.Write(data)
return writeErr
} }
if err := wc.Close(); err != nil {
if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
return err return err
} }

11
weed/replication/sink/s3sink/s3_write.go

@ -157,11 +157,18 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
} }
func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) { func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) {
fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
fileUrls, err := s3sink.filerSource.LookupFileId(chunk.FileId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
buf := make([]byte, chunk.Size) buf := make([]byte, chunk.Size)
util.ReadUrl(fileUrl, nil, false, false, chunk.Offset, int(chunk.Size), buf)
for _, fileUrl := range fileUrls {
_, err = util.ReadUrl(fileUrl, nil, false, false, chunk.Offset, int(chunk.Size), buf)
if err != nil {
glog.V(1).Infof("read from %s: %v", fileUrl, err)
} else {
break
}
}
return bytes.NewReader(buf), nil return bytes.NewReader(buf), nil
} }

19
weed/replication/source/filer_source.go

@ -41,7 +41,7 @@ func (fs *FilerSource) DoInitialize(grpcAddress string, dir string) (err error)
return nil return nil
} }
func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error) {
vid2Locations := make(map[string]*filer_pb.Locations) vid2Locations := make(map[string]*filer_pb.Locations)
@ -64,29 +64,38 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
if err != nil { if err != nil {
glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err) glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err)
return "", fmt.Errorf("LookupFileId volume id %s: %v", vid, err)
return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err)
} }
locations := vid2Locations[vid] locations := vid2Locations[vid]
if locations == nil || len(locations.Locations) == 0 { if locations == nil || len(locations.Locations) == 0 {
glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err) glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err)
return "", fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err)
return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err)
} }
fileUrl = fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, part)
for _, loc := range locations.Locations {
fileUrls = append(fileUrls, fmt.Sprintf("http://%s/%s", loc.Url, part))
}
return return
} }
func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, resp *http.Response, err error) { func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, resp *http.Response, err error) {
fileUrl, err := fs.LookupFileId(part)
fileUrls, err := fs.LookupFileId(part)
if err != nil { if err != nil {
return "", nil, nil, err return "", nil, nil, err
} }
for _, fileUrl := range fileUrls {
filename, header, resp, err = util.DownloadFile(fileUrl) filename, header, resp, err = util.DownloadFile(fileUrl)
if err != nil {
glog.V(1).Infof("fail to read from %s: %v", fileUrl, err)
} else {
break
}
}
return filename, header, resp, err return filename, header, resp, err
} }

2
weed/s3api/auth_credentials.go

@ -19,6 +19,8 @@ const (
ACTION_READ = "Read" ACTION_READ = "Read"
ACTION_WRITE = "Write" ACTION_WRITE = "Write"
ACTION_ADMIN = "Admin" ACTION_ADMIN = "Admin"
ACTION_TAGGING = "Tagging"
ACTION_LIST = "List"
) )
type Iam interface { type Iam interface {

104
weed/s3api/filer_util_tags.go

@ -0,0 +1,104 @@
package s3api
import (
"strings"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
const (
S3TAG_PREFIX = "s3-"
)
func (s3a *S3ApiServer) getTags(parentDirectoryPath string, entryName string) (tags map[string]string, err error) {
err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{
Directory: parentDirectoryPath,
Name: entryName,
})
if err != nil {
return err
}
tags = make(map[string]string)
for k, v := range resp.Entry.Extended {
if strings.HasPrefix(k, S3TAG_PREFIX) {
tags[k[len(S3TAG_PREFIX):]] = string(v)
}
}
return nil
})
return
}
func (s3a *S3ApiServer) setTags(parentDirectoryPath string, entryName string, tags map[string]string) (err error) {
return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{
Directory: parentDirectoryPath,
Name: entryName,
})
if err != nil {
return err
}
for k, _ := range resp.Entry.Extended {
if strings.HasPrefix(k, S3TAG_PREFIX) {
delete(resp.Entry.Extended, k)
}
}
if resp.Entry.Extended == nil {
resp.Entry.Extended = make(map[string][]byte)
}
for k, v := range tags {
resp.Entry.Extended[S3TAG_PREFIX+k] = []byte(v)
}
return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{
Directory: parentDirectoryPath,
Entry: resp.Entry,
IsFromOtherCluster: false,
Signatures: nil,
})
})
}
func (s3a *S3ApiServer) rmTags(parentDirectoryPath string, entryName string) (err error) {
return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{
Directory: parentDirectoryPath,
Name: entryName,
})
if err != nil {
return err
}
hasDeletion := false
for k, _ := range resp.Entry.Extended {
if strings.HasPrefix(k, S3TAG_PREFIX) {
delete(resp.Entry.Extended, k)
hasDeletion = true
}
}
if !hasDeletion {
return nil
}
return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{
Directory: parentDirectoryPath,
Entry: resp.Entry,
IsFromOtherCluster: false,
Signatures: nil,
})
})
}

117
weed/s3api/s3api_object_tagging_handlers.go

@ -0,0 +1,117 @@
package s3api
import (
"encoding/xml"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/chrislusf/seaweedfs/weed/util"
"io"
"io/ioutil"
"net/http"
)
// GetObjectTaggingHandler - GET object tagging
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r)
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
dir, name := target.DirAndName()
tags, err := s3a.getTags(dir, name)
if err != nil {
if err == filer_pb.ErrNotFound {
glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
} else {
glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
}
return
}
writeSuccessResponseXML(w, encodeResponse(FromTags(tags)))
}
// PutObjectTaggingHandler Put object tagging
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html
func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r)
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
dir, name := target.DirAndName()
tagging := &Tagging{}
input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
if err = xml.Unmarshal(input, tagging); err != nil {
glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrMalformedXML, r.URL)
return
}
tags := tagging.ToTags()
if len(tags) > 10 {
glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags))
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
return
}
for k, v := range tags {
if len(k) > 128 {
glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k)
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
return
}
if len(v) > 256 {
glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v)
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
return
}
}
if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil {
if err == filer_pb.ErrNotFound {
glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
} else {
glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
}
return
}
w.WriteHeader(http.StatusNoContent)
}
// DeleteObjectTaggingHandler Delete object tagging
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html
func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r)
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
dir, name := target.DirAndName()
err := s3a.rmTags(dir, name)
if err != nil {
if err == filer_pb.ErrNotFound {
glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
} else {
glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
}
return
}
w.WriteHeader(http.StatusNoContent)
}

17
weed/s3api/s3api_server.go

@ -64,9 +64,16 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
// AbortMultipartUpload // AbortMultipartUpload
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}") bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}")
// ListObjectParts // ListObjectParts
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE), "GET")).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_READ), "GET")).Queries("uploadId", "{uploadId:.*}")
// ListMultipartUploads // ListMultipartUploads
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE), "GET")).Queries("uploads", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_READ), "GET")).Queries("uploads", "")
// GetObjectTagging
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectTaggingHandler, ACTION_READ), "GET")).Queries("tagging", "")
// PutObjectTagging
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectTaggingHandler, ACTION_TAGGING), "PUT")).Queries("tagging", "")
// DeleteObjectTagging
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "")
// CopyObject // CopyObject
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY")) bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY"))
@ -81,11 +88,11 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE")) bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE"))
// ListObjectsV2 // ListObjectsV2
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ), "LIST")).Queries("list-type", "2")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2")
// GetObject, but directory listing is not supported // GetObject, but directory listing is not supported
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET")) bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET"))
// ListObjectsV1 (Legacy) // ListObjectsV1 (Legacy)
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ), "LIST"))
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
// PostPolicy // PostPolicy
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST")) bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST"))
@ -112,7 +119,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
} }
// ListBuckets // ListBuckets
apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_READ), "LIST"))
apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST"))
// NotFound // NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)

6
weed/s3api/s3err/s3api_errors.go

@ -61,6 +61,7 @@ const (
ErrInternalError ErrInternalError
ErrInvalidCopyDest ErrInvalidCopyDest
ErrInvalidCopySource ErrInvalidCopySource
ErrInvalidTag
ErrAuthHeaderEmpty ErrAuthHeaderEmpty
ErrSignatureVersionNotSupported ErrSignatureVersionNotSupported
ErrMalformedPOSTRequest ErrMalformedPOSTRequest
@ -188,6 +189,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
ErrInvalidTag: {
Code: "InvalidArgument",
Description: "The Tag value you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedXML: { ErrMalformedXML: {
Code: "MalformedXML", Code: "MalformedXML",
Description: "The XML you provided was not well-formed or did not validate against our published schema.", Description: "The XML you provided was not well-formed or did not validate against our published schema.",

27
weed/s3api/stats.go

@ -4,18 +4,35 @@ import (
stats_collect "github.com/chrislusf/seaweedfs/weed/stats" stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"net/http" "net/http"
"strconv"
"time" "time"
) )
func track(f http.HandlerFunc, action string) http.HandlerFunc {
type StatusRecorder struct {
http.ResponseWriter
Status int
}
return func(w http.ResponseWriter, r *http.Request) {
func NewStatusResponseWriter(w http.ResponseWriter) *StatusRecorder {
return &StatusRecorder{w, http.StatusOK}
}
w.Header().Set("Server", "SeaweedFS S3 "+util.VERSION)
func (r *StatusRecorder) WriteHeader(status int) {
r.Status = status
r.ResponseWriter.WriteHeader(status)
}
func (r *StatusRecorder) Flush() {
r.ResponseWriter.(http.Flusher).Flush()
}
func track(f http.HandlerFunc, action string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS S3 "+util.VERSION)
recorder := NewStatusResponseWriter(w)
start := time.Now() start := time.Now()
stats_collect.S3RequestCounter.WithLabelValues(action).Inc()
f(w, r)
f(recorder, r)
stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds()) stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds())
stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status)).Inc()
} }
} }

38
weed/s3api/tags.go

@ -0,0 +1,38 @@
package s3api
import (
"encoding/xml"
)
type Tag struct {
Key string `xml:"Key"`
Value string `xml:"Value"`
}
type TagSet struct {
Tag []Tag `xml:"Tag"`
}
type Tagging struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"`
TagSet TagSet `xml:"TagSet"`
}
func (t *Tagging) ToTags() map[string]string {
output := make(map[string]string)
for _, tag := range t.TagSet.Tag {
output[tag.Key] = tag.Value
}
return output
}
func FromTags(tags map[string]string) (t *Tagging) {
t = &Tagging{}
for k, v := range tags {
t.TagSet.Tag = append(t.TagSet.Tag, Tag{
Key: k,
Value: v,
})
}
return
}

50
weed/s3api/tags_test.go

@ -0,0 +1,50 @@
package s3api
import (
"encoding/xml"
"github.com/stretchr/testify/assert"
"testing"
)
func TestXMLUnmarshall(t *testing.T) {
input := `<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
<Tag>
<Key>key1</Key>
<Value>value1</Value>
</Tag>
</TagSet>
</Tagging>
`
tags := &Tagging{}
xml.Unmarshal([]byte(input), tags)
assert.Equal(t, len(tags.TagSet.Tag), 1)
assert.Equal(t, tags.TagSet.Tag[0].Key, "key1")
assert.Equal(t, tags.TagSet.Tag[0].Value, "value1")
}
func TestXMLMarshall(t *testing.T) {
tags := &Tagging{
TagSet: TagSet{
[]Tag{
{
Key: "key1",
Value: "value1",
},
},
},
}
actual := string(encodeResponse(tags))
expected := `<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><TagSet><Tag><Key>key1</Key><Value>value1</Value></Tag></TagSet></Tagging>`
assert.Equal(t, expected, actual)
}

11
weed/server/filer_grpc_server.go

@ -135,16 +135,19 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol
return resp, nil return resp, nil
} }
func (fs *FilerServer) lookupFileId(fileId string) (targetUrl string, err error) {
func (fs *FilerServer) lookupFileId(fileId string) (targetUrls []string, err error) {
fid, err := needle.ParseFileIdFromString(fileId) fid, err := needle.ParseFileIdFromString(fileId)
if err != nil { if err != nil {
return "", err
return nil, err
} }
locations, found := fs.filer.MasterClient.GetLocations(uint32(fid.VolumeId)) locations, found := fs.filer.MasterClient.GetLocations(uint32(fid.VolumeId))
if !found || len(locations) == 0 { if !found || len(locations) == 0 {
return "", fmt.Errorf("not found volume %d in %s", fid.VolumeId, fileId)
return nil, fmt.Errorf("not found volume %d in %s", fid.VolumeId, fileId)
}
for _, loc := range locations {
targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId))
} }
return fmt.Sprintf("http://%s/%s", locations[0].Url, fileId), nil
return
} }
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {

33
weed/server/filer_server.go

@ -3,6 +3,7 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/stats"
"net/http" "net/http"
"os" "os"
"sync" "sync"
@ -15,7 +16,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
@ -92,8 +92,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
}) })
fs.filer.Cipher = option.Cipher fs.filer.Cipher = option.Cipher
fs.maybeStartMetrics()
fs.checkWithMaster()
go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), fs.metricsAddress, fs.metricsIntervalSec)
go fs.filer.KeepConnectedToMaster() go fs.filer.KeepConnectedToMaster()
v := util.GetViper() v := util.GetViper()
@ -135,7 +136,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
return fs, nil return fs, nil
} }
func (fs *FilerServer) maybeStartMetrics() {
func (fs *FilerServer) checkWithMaster() {
for _, master := range fs.option.Masters { for _, master := range fs.option.Masters {
_, err := pb.ParseFilerGrpcAddress(master) _, err := pb.ParseFilerGrpcAddress(master)
@ -145,10 +146,19 @@ func (fs *FilerServer) maybeStartMetrics() {
} }
isConnected := false isConnected := false
var readErr error
for !isConnected { for !isConnected {
for _, master := range fs.option.Masters { for _, master := range fs.option.Masters {
fs.metricsAddress, fs.metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, master)
readErr := operation.WithMasterServerClient(master, fs.grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return fmt.Errorf("get master %s configuration: %v", master, err)
}
fs.metricsAddress, fs.metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds)
if fs.option.DefaultReplication == "" {
fs.option.DefaultReplication = resp.DefaultReplication
}
return nil
})
if readErr == nil { if readErr == nil {
isConnected = true isConnected = true
} else { } else {
@ -157,17 +167,4 @@ func (fs *FilerServer) maybeStartMetrics() {
} }
} }
go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), fs.metricsAddress, fs.metricsIntervalSec)
}
func readFilerConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (metricsAddress string, metricsIntervalSec int, err error) {
err = operation.WithMasterServerClient(masterAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return fmt.Errorf("get master %s configuration: %v", masterAddress, err)
}
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds)
return nil
})
return
} }

1
weed/server/filer_server_handlers_write_autochunk.go

@ -167,6 +167,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
TtlSec: ttlSec, TtlSec: ttlSec,
Mime: contentType, Mime: contentType,
Md5: md5bytes, Md5: md5bytes,
FileSize: uint64(chunkOffset),
}, },
Chunks: fileChunks, Chunks: fileChunks,
} }

17
weed/server/master_grpc_server.go

@ -3,6 +3,7 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"net" "net"
"strings" "strings"
"time" "time"
@ -302,3 +303,19 @@ func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.Li
} }
return resp, nil return resp, nil
} }
func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) {
// tell the volume servers about the leader
leader, _ := ms.Topo.Leader()
resp := &master_pb.GetMasterConfigurationResponse{
MetricsAddress: ms.option.MetricsAddress,
MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),
StorageBackends: backend.ToPbStorageBackends(),
DefaultReplication: ms.option.DefaultReplicaPlacement,
Leader: leader,
}
return resp, nil
}

13
weed/server/master_grpc_server_volume.go

@ -3,8 +3,6 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/raft" "github.com/chrislusf/raft"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -179,14 +177,3 @@ func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.Looku
return resp, nil return resp, nil
} }
func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) {
resp := &master_pb.GetMasterConfigurationResponse{
MetricsAddress: ms.option.MetricsAddress,
MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),
StorageBackends: backend.ToPbStorageBackends(),
}
return resp, nil
}

5
weed/server/master_server.go

@ -138,14 +138,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) {
ms.Topo.RaftServer = raftServer.raftServer ms.Topo.RaftServer = raftServer.raftServer
ms.Topo.RaftServer.AddEventListener(raft.LeaderChangeEventType, func(e raft.Event) { ms.Topo.RaftServer.AddEventListener(raft.LeaderChangeEventType, func(e raft.Event) {
glog.V(0).Infof("event: %+v", e)
glog.V(0).Infof("leader change event: %+v => %+v", e.PrevValue(), e.Value())
if ms.Topo.RaftServer.Leader() != "" { if ms.Topo.RaftServer.Leader() != "" {
glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.") glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.")
} }
}) })
ms.Topo.RaftServer.AddEventListener(raft.StateChangeEventType, func(e raft.Event) {
glog.V(0).Infof("state change: %+v", e)
})
if ms.Topo.IsLeader() { if ms.Topo.IsLeader() {
glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!") glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!")
} else { } else {

123
weed/server/raft_server.go

@ -2,10 +2,8 @@ package weed_server
import ( import (
"encoding/json" "encoding/json"
"io/ioutil"
"os" "os"
"path" "path"
"reflect"
"sort" "sort"
"time" "time"
@ -28,7 +26,31 @@ type RaftServer struct {
*raft.GrpcServer *raft.GrpcServer
} }
func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer {
type StateMachine struct {
raft.StateMachine
topo *topology.Topology
}
func (s StateMachine) Save() ([]byte, error) {
state := topology.MaxVolumeIdCommand{
MaxVolumeId: s.topo.GetMaxVolumeId(),
}
glog.V(1).Infof("Save raft state %+v", state)
return json.Marshal(state)
}
func (s StateMachine) Recovery(data []byte) error {
state := topology.MaxVolumeIdCommand{}
err := json.Unmarshal(data, &state)
if err != nil {
return err
}
glog.V(1).Infof("Recovery raft state %+v", state)
s.topo.UpAdjustMaxVolumeId(state.MaxVolumeId)
return nil
}
func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, dataDir string, topo *topology.Topology, pulseSeconds int, raftResumeState bool) (*RaftServer, error) {
s := &RaftServer{ s := &RaftServer{
peers: peers, peers: peers,
serverAddr: serverAddr, serverAddr: serverAddr,
@ -46,48 +68,66 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d
transporter := raft.NewGrpcTransporter(grpcDialOption) transporter := raft.NewGrpcTransporter(grpcDialOption)
glog.V(0).Infof("Starting RaftServer with %v", serverAddr) glog.V(0).Infof("Starting RaftServer with %v", serverAddr)
if !raftResumeState {
// always clear previous metadata // always clear previous metadata
os.RemoveAll(path.Join(s.dataDir, "conf")) os.RemoveAll(path.Join(s.dataDir, "conf"))
os.RemoveAll(path.Join(s.dataDir, "log")) os.RemoveAll(path.Join(s.dataDir, "log"))
os.RemoveAll(path.Join(s.dataDir, "snapshot")) os.RemoveAll(path.Join(s.dataDir, "snapshot"))
// Clear old cluster configurations if peers are changed
if oldPeers, changed := isPeersChanged(s.dataDir, serverAddr, s.peers); changed {
glog.V(0).Infof("Peers Change: %v => %v", oldPeers, s.peers)
}
if err := os.MkdirAll(path.Join(s.dataDir, "snapshot"), 0600); err != nil {
return nil, err
} }
s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, nil, topo, "")
stateMachine := StateMachine{topo: topo}
s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, stateMachine, topo, "")
if err != nil { if err != nil {
glog.V(0).Infoln(err) glog.V(0).Infoln(err)
return nil
return nil, err
} }
s.raftServer.SetHeartbeatInterval(500 * time.Millisecond) s.raftServer.SetHeartbeatInterval(500 * time.Millisecond)
s.raftServer.SetElectionTimeout(time.Duration(pulseSeconds) * 500 * time.Millisecond) s.raftServer.SetElectionTimeout(time.Duration(pulseSeconds) * 500 * time.Millisecond)
s.raftServer.Start()
if err := s.raftServer.LoadSnapshot(); err != nil {
return nil, err
}
if err := s.raftServer.Start(); err != nil {
return nil, err
}
for _, peer := range s.peers { for _, peer := range s.peers {
s.raftServer.AddPeer(peer, pb.ServerToGrpcAddress(peer))
if err := s.raftServer.AddPeer(peer, pb.ServerToGrpcAddress(peer)); err != nil {
return nil, err
}
}
// Remove deleted peers
for existsPeerName := range s.raftServer.Peers() {
exists, existingPeer := false, ""
for _, peer := range s.peers {
if pb.ServerToGrpcAddress(peer) == existsPeerName {
exists, existingPeer = true, peer
break
}
}
if exists {
if err := s.raftServer.RemovePeer(existsPeerName); err != nil {
glog.V(0).Infoln(err)
return nil, err
} else {
glog.V(0).Infof("removing old peer %s", existingPeer)
}
}
} }
s.GrpcServer = raft.NewGrpcServer(s.raftServer) s.GrpcServer = raft.NewGrpcServer(s.raftServer)
if s.raftServer.IsLogEmpty() && isTheFirstOne(serverAddr, s.peers) { if s.raftServer.IsLogEmpty() && isTheFirstOne(serverAddr, s.peers) {
// Initialize the server by joining itself. // Initialize the server by joining itself.
glog.V(0).Infoln("Initializing new cluster")
_, err := s.raftServer.Do(&raft.DefaultJoinCommand{
Name: s.raftServer.Name(),
ConnectionString: pb.ServerToGrpcAddress(s.serverAddr),
})
if err != nil {
glog.V(0).Infoln(err)
return nil
}
// s.DoJoinCommand()
} }
glog.V(0).Infof("current cluster leader: %v", s.raftServer.Leader()) glog.V(0).Infof("current cluster leader: %v", s.raftServer.Leader())
return s
return s, nil
} }
func (s *RaftServer) Peers() (members []string) { func (s *RaftServer) Peers() (members []string) {
@ -100,38 +140,23 @@ func (s *RaftServer) Peers() (members []string) {
return return
} }
func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, changed bool) {
confPath := path.Join(dir, "conf")
// open conf file
b, err := ioutil.ReadFile(confPath)
if err != nil {
return oldPeers, true
}
conf := &raft.Config{}
if err = json.Unmarshal(b, conf); err != nil {
return oldPeers, true
}
for _, p := range conf.Peers {
oldPeers = append(oldPeers, p.Name)
func isTheFirstOne(self string, peers []string) bool {
sort.Strings(peers)
if len(peers) <= 0 {
return true
} }
oldPeers = append(oldPeers, self)
if len(peers) == 0 && len(oldPeers) <= 1 {
return oldPeers, false
return self == peers[0]
} }
sort.Strings(peers)
sort.Strings(oldPeers)
func (s *RaftServer) DoJoinCommand() {
return oldPeers, !reflect.DeepEqual(peers, oldPeers)
glog.V(0).Infoln("Initializing new cluster")
if _, err := s.raftServer.Do(&raft.DefaultJoinCommand{
Name: s.raftServer.Name(),
ConnectionString: pb.ServerToGrpcAddress(s.serverAddr),
}); err != nil {
glog.Errorf("fail to send join command: %v", err)
} }
func isTheFirstOne(self string, peers []string) bool {
sort.Strings(peers)
if len(peers) <= 0 {
return true
}
return self == peers[0]
} }

4
weed/server/raft_server_handlers.go

@ -1,6 +1,7 @@
package weed_server package weed_server
import ( import (
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"net/http" "net/http"
) )
@ -8,13 +9,16 @@ type ClusterStatusResult struct {
IsLeader bool `json:"IsLeader,omitempty"` IsLeader bool `json:"IsLeader,omitempty"`
Leader string `json:"Leader,omitempty"` Leader string `json:"Leader,omitempty"`
Peers []string `json:"Peers,omitempty"` Peers []string `json:"Peers,omitempty"`
MaxVolumeId needle.VolumeId `json:"MaxVolumeId,omitempty"`
} }
func (s *RaftServer) StatusHandler(w http.ResponseWriter, r *http.Request) { func (s *RaftServer) StatusHandler(w http.ResponseWriter, r *http.Request) {
ret := ClusterStatusResult{ ret := ClusterStatusResult{
IsLeader: s.topo.IsLeader(), IsLeader: s.topo.IsLeader(),
Peers: s.Peers(), Peers: s.Peers(),
MaxVolumeId: s.topo.GetMaxVolumeId(),
} }
if leader, e := s.topo.Leader(); e == nil { if leader, e := s.topo.Leader(); e == nil {
ret.Leader = leader ret.Leader = leader
} }

2
weed/server/volume_grpc_client_to_master.go

@ -90,7 +90,7 @@ func (vs *VolumeServer) StopHeartbeat() (isAlreadyStopping bool) {
return true return true
} }
vs.isHeartbeating = false vs.isHeartbeating = false
vs.stopChan <- true
close(vs.stopChan)
return false return false
} }

4
weed/server/volume_server_handlers_read.go

@ -93,6 +93,10 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} else if hasEcVolume { } else if hasEcVolume {
count, err = vs.store.ReadEcShardNeedle(volumeId, n) count, err = vs.store.ReadEcShardNeedle(volumeId, n)
} }
if err != nil && err != storage.ErrorDeleted && r.FormValue("type") != "replicate" && hasVolume {
glog.V(4).Infof("read needle: %v", err)
// start to fix it from other replicas, if not deleted and hasVolume and is not a replicated request
}
// glog.V(4).Infoln("read bytes", count, "error", err) // glog.V(4).Infoln("read bytes", count, "error", err)
if err != nil || count < 0 { if err != nil || count < 0 {
glog.V(3).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) glog.V(3).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err)

3
weed/server/volume_server_handlers_write.go

@ -13,6 +13,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/topology"
"github.com/chrislusf/seaweedfs/weed/util"
) )
func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
@ -67,7 +68,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
ret.Name = string(reqNeedle.Name) ret.Name = string(reqNeedle.Name)
} }
ret.Size = uint32(originalSize) ret.Size = uint32(originalSize)
ret.ETag = reqNeedle.Etag()
ret.ETag = fmt.Sprintf("%x", util.Base64Md5ToBytes(contentMd5))
ret.Mime = string(reqNeedle.Mime) ret.Mime = string(reqNeedle.Mime)
setEtag(w, ret.ETag) setEtag(w, ret.ETag)
w.Header().Set("Content-MD5", contentMd5) w.Header().Set("Content-MD5", contentMd5)

2
weed/server/webdav_server.go

@ -100,7 +100,7 @@ type WebDavFile struct {
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
chunkCache := chunk_cache.NewTieredChunkCache(256, option.CacheDir, option.CacheSizeMB)
chunkCache := chunk_cache.NewTieredChunkCache(256, option.CacheDir, option.CacheSizeMB, 1024*1024)
return &WebDavFileSystem{ return &WebDavFileSystem{
option: option, option: option,
chunkCache: chunkCache, chunkCache: chunkCache,

2
weed/stats/metrics.go

@ -99,7 +99,7 @@ var (
Subsystem: "s3", Subsystem: "s3",
Name: "request_total", Name: "request_total",
Help: "Counter of s3 requests.", Help: "Counter of s3 requests.",
}, []string{"type"})
}, []string{"type", "code"})
S3RequestHistogram = prometheus.NewHistogramVec( S3RequestHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Namespace: "SeaweedFS", Namespace: "SeaweedFS",

2
weed/storage/backend/volume_create_linux.go

@ -16,7 +16,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
} }
if preallocate != 0 { if preallocate != 0 {
syscall.Fallocate(int(file.Fd()), 1, 0, preallocate) syscall.Fallocate(int(file.Fd()), 1, 0, preallocate)
glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
glog.V(1).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
} }
return NewDiskFile(file), nil return NewDiskFile(file), nil
} }

3
weed/storage/disk_location.go

@ -174,9 +174,6 @@ func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e er
} }
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) { func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid] v, ok := l.volumes[vid]
if !ok { if !ok {
return return

4
weed/storage/store.go

@ -380,10 +380,12 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
Ttl: v.Ttl.ToUint32(), Ttl: v.Ttl.ToUint32(),
} }
for _, location := range s.Locations { for _, location := range s.Locations {
if found, err := location.deleteVolumeById(i); found && err == nil {
if err := location.DeleteVolume(i); err == nil {
glog.V(0).Infof("DeleteVolume %d", i) glog.V(0).Infof("DeleteVolume %d", i)
s.DeletedVolumesChan <- message s.DeletedVolumesChan <- message
return nil return nil
} else {
glog.Errorf("DeleteVolume %d: %v", i, err)
} }
} }

4
weed/storage/store_ec.go

@ -128,7 +128,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, fmt.Errorf("locate in local ec volume: %v", err) return 0, fmt.Errorf("locate in local ec volume: %v", err)
} }
if size.IsDeleted() { if size.IsDeleted() {
return 0, fmt.Errorf("entry %s is deleted", n.Id)
return 0, ErrorDeleted
} }
glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals) glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
@ -141,7 +141,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err) return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
} }
if isDeleted { if isDeleted {
return 0, fmt.Errorf("ec entry %s is deleted", n.Id)
return 0, ErrorDeleted
} }
err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, localEcVolume.Version) err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, localEcVolume.Version)

3
weed/storage/volume_read_write.go

@ -16,6 +16,7 @@ import (
) )
var ErrorNotFound = errors.New("not found") var ErrorNotFound = errors.New("not found")
var ErrorDeleted = errors.New("already deleted")
// isFileUnchanged checks whether this needle to write is same as last one. // isFileUnchanged checks whether this needle to write is same as last one.
// It requires serialized access in the same volume. // It requires serialized access in the same volume.
@ -266,7 +267,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
glog.V(3).Infof("reading deleted %s", n.String()) glog.V(3).Infof("reading deleted %s", n.String())
readSize = -readSize readSize = -readSize
} else { } else {
return -1, errors.New("already deleted")
return -1, ErrorDeleted
} }
} }
if readSize == 0 { if readSize == 0 {

32
weed/util/chunk_cache/chunk_cache.go

@ -7,12 +7,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
) )
const (
memCacheSizeLimit = 1024 * 1024
onDiskCacheSizeLimit0 = memCacheSizeLimit
onDiskCacheSizeLimit1 = 4 * memCacheSizeLimit
)
type ChunkCache interface { type ChunkCache interface {
GetChunk(fileId string, minSize uint64) (data []byte) GetChunk(fileId string, minSize uint64) (data []byte)
SetChunk(fileId string, data []byte) SetChunk(fileId string, data []byte)
@ -23,17 +17,23 @@ type TieredChunkCache struct {
memCache *ChunkCacheInMemory memCache *ChunkCacheInMemory
diskCaches []*OnDiskCacheLayer diskCaches []*OnDiskCacheLayer
sync.RWMutex sync.RWMutex
onDiskCacheSizeLimit0 uint64
onDiskCacheSizeLimit1 uint64
onDiskCacheSizeLimit2 uint64
} }
func NewTieredChunkCache(maxEntries int64, dir string, diskSizeMB int64) *TieredChunkCache {
func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache {
c := &TieredChunkCache{ c := &TieredChunkCache{
memCache: NewChunkCacheInMemory(maxEntries), memCache: NewChunkCacheInMemory(maxEntries),
} }
c.diskCaches = make([]*OnDiskCacheLayer, 3) c.diskCaches = make([]*OnDiskCacheLayer, 3)
c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeMB/4, 4)
c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeMB/4, 4)
c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeMB/2, 4)
c.onDiskCacheSizeLimit0 = uint64(unitSize)
c.onDiskCacheSizeLimit1 = 4 * c.onDiskCacheSizeLimit0
c.onDiskCacheSizeLimit2 = 2 * c.onDiskCacheSizeLimit1
c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_2", diskSizeInUnit*unitSize/8, 2)
c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_3", diskSizeInUnit*unitSize/4+diskSizeInUnit*unitSize/8, 3)
c.diskCaches[2] = NewOnDiskCacheLayer(dir, "c2_2", diskSizeInUnit*unitSize/2, 2)
return c return c
} }
@ -51,7 +51,7 @@ func (c *TieredChunkCache) GetChunk(fileId string, minSize uint64) (data []byte)
func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byte) { func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byte) {
if minSize < memCacheSizeLimit {
if minSize <= c.onDiskCacheSizeLimit0 {
data = c.memCache.GetChunk(fileId) data = c.memCache.GetChunk(fileId)
if len(data) >= int(minSize) { if len(data) >= int(minSize) {
return data return data
@ -64,13 +64,13 @@ func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byt
return nil return nil
} }
if minSize < onDiskCacheSizeLimit0 {
if minSize <= c.onDiskCacheSizeLimit0 {
data = c.diskCaches[0].getChunk(fid.Key) data = c.diskCaches[0].getChunk(fid.Key)
if len(data) >= int(minSize) { if len(data) >= int(minSize) {
return data return data
} }
} }
if minSize < onDiskCacheSizeLimit1 {
if minSize <= c.onDiskCacheSizeLimit1 {
data = c.diskCaches[1].getChunk(fid.Key) data = c.diskCaches[1].getChunk(fid.Key)
if len(data) >= int(minSize) { if len(data) >= int(minSize) {
return data return data
@ -101,7 +101,7 @@ func (c *TieredChunkCache) SetChunk(fileId string, data []byte) {
func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) {
if len(data) < memCacheSizeLimit {
if len(data) <= int(c.onDiskCacheSizeLimit0) {
c.memCache.SetChunk(fileId, data) c.memCache.SetChunk(fileId, data)
} }
@ -111,9 +111,9 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) {
return return
} }
if len(data) < onDiskCacheSizeLimit0 {
if len(data) <= int(c.onDiskCacheSizeLimit0) {
c.diskCaches[0].setChunk(fid.Key, data) c.diskCaches[0].setChunk(fid.Key, data)
} else if len(data) < onDiskCacheSizeLimit1 {
} else if len(data) <= int(c.onDiskCacheSizeLimit1) {
c.diskCaches[1].setChunk(fid.Key, data) c.diskCaches[1].setChunk(fid.Key, data)
} else { } else {
c.diskCaches[2].setChunk(fid.Key, data) c.diskCaches[2].setChunk(fid.Key, data)

51
weed/util/chunk_cache/chunk_cache_on_disk_test.go

@ -14,9 +14,9 @@ func TestOnDisk(t *testing.T) {
tmpDir, _ := ioutil.TempDir("", "c") tmpDir, _ := ioutil.TempDir("", "c")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
totalDiskSizeMb := int64(32)
totalDiskSizeInKB := int64(32)
cache := NewTieredChunkCache(0, tmpDir, totalDiskSizeMb)
cache := NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024)
writeCount := 5 writeCount := 5
type test_data struct { type test_data struct {
@ -26,7 +26,7 @@ func TestOnDisk(t *testing.T) {
} }
testData := make([]*test_data, writeCount) testData := make([]*test_data, writeCount)
for i := 0; i < writeCount; i++ { for i := 0; i < writeCount; i++ {
buff := make([]byte, 1024*1024)
buff := make([]byte, 1024)
rand.Read(buff) rand.Read(buff)
testData[i] = &test_data{ testData[i] = &test_data{
data: buff, data: buff,
@ -34,9 +34,22 @@ func TestOnDisk(t *testing.T) {
size: uint64(len(buff)), size: uint64(len(buff)),
} }
cache.SetChunk(testData[i].fileId, testData[i].data) cache.SetChunk(testData[i].fileId, testData[i].data)
// read back right after write
data := cache.GetChunk(testData[i].fileId, testData[i].size)
if bytes.Compare(data, testData[i].data) != 0 {
t.Errorf("failed to write to and read from cache: %d", i)
}
} }
for i := 0; i < writeCount; i++ {
for i := 0; i < 2; i++ {
data := cache.GetChunk(testData[i].fileId, testData[i].size)
if bytes.Compare(data, testData[i].data) == 0 {
t.Errorf("old cache should have been purged: %d", i)
}
}
for i := 2; i < writeCount; i++ {
data := cache.GetChunk(testData[i].fileId, testData[i].size) data := cache.GetChunk(testData[i].fileId, testData[i].size)
if bytes.Compare(data, testData[i].data) != 0 { if bytes.Compare(data, testData[i].data) != 0 {
t.Errorf("failed to write to and read from cache: %d", i) t.Errorf("failed to write to and read from cache: %d", i)
@ -45,9 +58,35 @@ func TestOnDisk(t *testing.T) {
cache.Shutdown() cache.Shutdown()
cache = NewTieredChunkCache(0, tmpDir, totalDiskSizeMb)
cache = NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024)
for i := 0; i < writeCount; i++ {
for i := 0; i < 2; i++ {
data := cache.GetChunk(testData[i].fileId, testData[i].size)
if bytes.Compare(data, testData[i].data) == 0 {
t.Errorf("old cache should have been purged: %d", i)
}
}
for i := 2; i < writeCount; i++ {
if i == 4 {
// FIXME this failed many times on build machines
/*
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 2048 bytes disk space for /tmp/c578652251/c0_2_0.dat
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 2048 bytes disk space for /tmp/c578652251/c0_2_1.dat
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 4096 bytes disk space for /tmp/c578652251/c1_3_0.dat
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 4096 bytes disk space for /tmp/c578652251/c1_3_1.dat
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 4096 bytes disk space for /tmp/c578652251/c1_3_2.dat
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 8192 bytes disk space for /tmp/c578652251/c2_2_0.dat
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 8192 bytes disk space for /tmp/c578652251/c2_2_1.dat
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 2048 bytes disk space for /tmp/c578652251/c0_2_0.dat
I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 2048 bytes disk space for /tmp/c578652251/c0_2_1.dat
--- FAIL: TestOnDisk (0.19s)
chunk_cache_on_disk_test.go:73: failed to write to and read from cache: 4
FAIL
FAIL github.com/chrislusf/seaweedfs/weed/util/chunk_cache 0.199s
*/
continue
}
data := cache.GetChunk(testData[i].fileId, testData[i].size) data := cache.GetChunk(testData[i].fileId, testData[i].size)
if bytes.Compare(data, testData[i].data) != 0 { if bytes.Compare(data, testData[i].data) != 0 {
t.Errorf("failed to write to and read from cache: %d", i) t.Errorf("failed to write to and read from cache: %d", i)

8
weed/util/chunk_cache/on_disk_cache_layer.go

@ -14,17 +14,17 @@ type OnDiskCacheLayer struct {
diskCaches []*ChunkCacheVolume diskCaches []*ChunkCacheVolume
} }
func NewOnDiskCacheLayer(dir, namePrefix string, diskSizeMB int64, segmentCount int) *OnDiskCacheLayer {
func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount int) *OnDiskCacheLayer {
volumeCount, volumeSize := int(diskSizeMB/30000), int64(30000)
volumeCount, volumeSize := int(diskSize/(30000*1024*1024)), int64(30000*1024*1024)
if volumeCount < segmentCount { if volumeCount < segmentCount {
volumeCount, volumeSize = segmentCount, diskSizeMB/int64(segmentCount)
volumeCount, volumeSize = segmentCount, diskSize/int64(segmentCount)
} }
c := &OnDiskCacheLayer{} c := &OnDiskCacheLayer{}
for i := 0; i < volumeCount; i++ { for i := 0; i < volumeCount; i++ {
fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i)) fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i))
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024)
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize)
if err != nil { if err != nil {
glog.Errorf("failed to add cache %s : %v", fileName, err) glog.Errorf("failed to add cache %s : %v", fileName, err)
} else { } else {

2
weed/util/constants.go

@ -5,7 +5,7 @@ import (
) )
var ( var (
VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 00)
VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 03)
COMMIT = "" COMMIT = ""
) )

2
weed/util/fullpath.go

@ -13,6 +13,7 @@ func NewFullPath(dir, name string) FullPath {
func (fp FullPath) DirAndName() (string, string) { func (fp FullPath) DirAndName() (string, string) {
dir, name := filepath.Split(string(fp)) dir, name := filepath.Split(string(fp))
name = strings.ToValidUTF8(name, "?")
if dir == "/" { if dir == "/" {
return dir, name return dir, name
} }
@ -24,6 +25,7 @@ func (fp FullPath) DirAndName() (string, string) {
func (fp FullPath) Name() string { func (fp FullPath) Name() string {
_, name := filepath.Split(string(fp)) _, name := filepath.Split(string(fp))
name = strings.ToValidUTF8(name, "?")
return name return name
} }

30
weed/wdclient/masterclient.go

@ -52,6 +52,32 @@ func (mc *MasterClient) KeepConnectedToMaster() {
} }
} }
func (mc *MasterClient) FindLeader(myMasterAddress string) (leader string) {
for _, master := range mc.masters {
if master == myMasterAddress {
continue
}
if grpcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Millisecond)
defer cancel()
resp, err := client.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return err
}
leader = resp.Leader
return nil
}); grpcErr != nil {
glog.V(0).Infof("connect to %s: %v", master, grpcErr)
}
if leader != "" {
glog.V(0).Infof("existing leader is %s", leader)
return
}
}
glog.V(0).Infof("No existing leader found!")
return
}
func (mc *MasterClient) tryAllMasters() { func (mc *MasterClient) tryAllMasters() {
nextHintedLeader := "" nextHintedLeader := ""
for _, master := range mc.masters { for _, master := range mc.masters {
@ -75,7 +101,7 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri
stream, err := client.KeepConnected(ctx) stream, err := client.KeepConnected(ctx)
if err != nil { if err != nil {
glog.V(0).Infof("%s masterClient failed to keep connected to %s: %v", mc.clientType, master, err)
glog.V(1).Infof("%s masterClient failed to keep connected to %s: %v", mc.clientType, master, err)
return err return err
} }
@ -118,7 +144,7 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri
}) })
if gprcErr != nil { if gprcErr != nil {
glog.V(0).Infof("%s masterClient failed to connect with master %v: %v", mc.clientType, master, gprcErr)
glog.V(1).Infof("%s masterClient failed to connect with master %v: %v", mc.clientType, master, gprcErr)
} }
return return
} }

51
weed/wdclient/vid_map.go

@ -44,38 +44,36 @@ func (vc *vidMap) getLocationIndex(length int) (int, error) {
return int(atomic.AddInt32(&vc.cursor, 1)) % length, nil return int(atomic.AddInt32(&vc.cursor, 1)) % length, nil
} }
func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrl string, err error) {
func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrls []string, err error) {
id, err := strconv.Atoi(vid) id, err := strconv.Atoi(vid)
if err != nil { if err != nil {
glog.V(1).Infof("Unknown volume id %s", vid) glog.V(1).Infof("Unknown volume id %s", vid)
return "", err
return nil, err
} }
return vc.GetRandomLocation(uint32(id))
}
func (vc *vidMap) LookupFileId(fileId string) (fullUrl string, err error) {
parts := strings.Split(fileId, ",")
if len(parts) != 2 {
return "", errors.New("Invalid fileId " + fileId)
locations, found := vc.GetLocations(uint32(id))
if !found {
return nil, fmt.Errorf("volume %d not found", id)
} }
serverUrl, lookupError := vc.LookupVolumeServerUrl(parts[0])
if lookupError != nil {
return "", lookupError
for _, loc := range locations {
serverUrls = append(serverUrls, loc.Url)
} }
return "http://" + serverUrl + "/" + fileId, nil
return
} }
func (vc *vidMap) LookupVolumeServer(fileId string) (volumeServer string, err error) {
func (vc *vidMap) LookupFileId(fileId string) (fullUrls []string, err error) {
parts := strings.Split(fileId, ",") parts := strings.Split(fileId, ",")
if len(parts) != 2 { if len(parts) != 2 {
return "", errors.New("Invalid fileId " + fileId)
return nil, errors.New("Invalid fileId " + fileId)
} }
serverUrl, lookupError := vc.LookupVolumeServerUrl(parts[0])
serverUrls, lookupError := vc.LookupVolumeServerUrl(parts[0])
if lookupError != nil { if lookupError != nil {
return "", lookupError
return nil, lookupError
} }
return serverUrl, nil
for _, serverUrl := range serverUrls {
fullUrls = append(fullUrls, "http://"+serverUrl+"/"+fileId)
}
return
} }
func (vc *vidMap) GetVidLocations(vid string) (locations []Location, err error) { func (vc *vidMap) GetVidLocations(vid string) (locations []Location, err error) {
@ -99,23 +97,6 @@ func (vc *vidMap) GetLocations(vid uint32) (locations []Location, found bool) {
return return
} }
func (vc *vidMap) GetRandomLocation(vid uint32) (serverUrl string, err error) {
vc.RLock()
defer vc.RUnlock()
locations := vc.vid2Locations[vid]
if len(locations) == 0 {
return "", fmt.Errorf("volume %d not found", vid)
}
index, err := vc.getLocationIndex(len(locations))
if err != nil {
return "", fmt.Errorf("volume %d: %v", vid, err)
}
return locations[index].Url, nil
}
func (vc *vidMap) addLocation(vid uint32, location Location) { func (vc *vidMap) addLocation(vid uint32, location Location) {
vc.Lock() vc.Lock()
defer vc.Unlock() defer vc.Unlock()

Loading…
Cancel
Save