Browse Source

Merge pull request #73 from chrislusf/master

sync
pull/1968/head
hilimd 4 years ago
committed by GitHub
parent
commit
620b91f23e
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 16
      .github/workflows/cleanup.yml
  2. 78
      .github/workflows/release.yml
  3. 4
      .travis.yml
  4. 4
      docker/Dockerfile
  5. 31
      docker/Dockerfile.s3tests
  6. 6
      docker/Makefile
  7. 6
      docker/compose/local-cluster-compose.yml
  8. 4
      docker/compose/local-mount-compose.yml
  9. 45
      docker/compose/local-s3tests-compose.yml
  10. 0
      docker/compose/notification.toml
  11. 0
      docker/compose/replication.toml
  12. 105
      docker/compose/s3.json
  13. 70
      docker/compose/s3tests.conf
  14. 5
      go.mod
  15. 18
      go.sum
  16. 29
      k8s/README.md
  17. 4
      k8s/seaweedfs/Chart.yaml
  18. 15
      k8s/seaweedfs/templates/cronjob.yaml
  19. 1
      k8s/seaweedfs/templates/filer-service-client.yaml
  20. 29
      k8s/seaweedfs/templates/filer-statefulset.yaml
  21. 10
      k8s/seaweedfs/templates/s3-deployment.yaml
  22. 10
      k8s/seaweedfs/templates/s3-service.yaml
  23. 21
      k8s/seaweedfs/templates/seaweedfs-s3-secret.yaml
  24. 38
      k8s/seaweedfs/values.yaml
  25. 2
      other/java/client/pom.xml
  26. 2
      other/java/client/pom.xml.deploy
  27. 2
      other/java/client/pom_debug.xml
  28. 26
      other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java
  29. 57
      other/java/client/src/main/java/seaweedfs/client/FilerClient.java
  30. 17
      other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java
  31. 18
      other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java
  32. 18
      other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
  33. 22
      other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
  34. 13
      other/java/client/src/main/proto/filer.proto
  35. 4
      other/java/examples/pom.xml
  36. 6
      other/java/examples/src/main/java/com/seaweedfs/examples/ExampleReadFile.java
  37. 13
      other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile.java
  38. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  39. 2
      other/java/hdfs2/pom.xml
  40. 16
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  41. 6
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
  42. 6
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java
  43. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  44. 2
      other/java/hdfs3/pom.xml
  45. 16
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  46. 6
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
  47. 6
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java
  48. 2
      unmaintained/change_superblock/change_superblock.go
  49. 2
      unmaintained/diff_volume_servers/diff_volume_servers.go
  50. 2
      unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
  51. 2
      unmaintained/volume_tailer/volume_tailer.go
  52. 2
      weed/command/backup.go
  53. 9
      weed/command/benchmark.go
  54. 2
      weed/command/command.go
  55. 12
      weed/command/download.go
  56. 2
      weed/command/export.go
  57. 4
      weed/command/filer.go
  58. 8
      weed/command/filer_copy.go
  59. 18
      weed/command/filer_sync.go
  60. 2
      weed/command/mount.go
  61. 4
      weed/command/mount_std.go
  62. 6
      weed/command/scaffold.go
  63. 3
      weed/command/server.go
  64. 6
      weed/command/upload.go
  65. 20
      weed/command/volume.go
  66. 6
      weed/command/webdav.go
  67. 2
      weed/filer/abstract_sql/abstract_sql_store.go
  68. 1
      weed/filer/entry.go
  69. 2
      weed/filer/entry_codec.go
  70. 2
      weed/filer/filechunk_manifest.go
  71. 18
      weed/filer/filer_conf.go
  72. 5
      weed/filer/filer_notify.go
  73. 2
      weed/filer/filer_notify_append.go
  74. 2
      weed/filer/filer_on_meta_event.go
  75. 14
      weed/filer/mysql/mysql_sql_gen.go
  76. 6
      weed/filer/mysql/mysql_store.go
  77. 6
      weed/filer/mysql2/mysql2_store.go
  78. 14
      weed/filer/postgres/postgres_sql_gen.go
  79. 13
      weed/filer/postgres/postgres_store.go
  80. 13
      weed/filer/postgres2/postgres2_store.go
  81. 2
      weed/filer/read_write.go
  82. 2
      weed/filer/stream.go
  83. 9
      weed/filesys/filehandle.go
  84. 3
      weed/filesys/wfs.go
  85. 1
      weed/filesys/wfs_write.go
  86. 2
      weed/glog/glog.go
  87. 2
      weed/glog/glog_file.go
  88. 35
      weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
  89. 9
      weed/operation/assign_file_id.go
  90. 8
      weed/operation/chunked_file.go
  91. 4
      weed/operation/delete_content.go
  92. 15
      weed/operation/lookup.go
  93. 29
      weed/operation/submit.go
  94. 4
      weed/operation/tail_volume.go
  95. 37
      weed/operation/upload_content.go
  96. 13
      weed/pb/filer.proto
  97. 1086
      weed/pb/filer_pb/filer.pb.go
  98. 7
      weed/pb/grpc_client_server.go
  99. 43
      weed/pb/master.proto
  100. 1658
      weed/pb/master_pb/master.pb.go

16
.github/workflows/cleanup.yml

@ -12,11 +12,11 @@ jobs:
steps:
- name: Delete old release assets
uses: mknejp/delete-release-assets@v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*
- name: Delete old release assets
uses: mknejp/delete-release-assets@v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*

78
.github/workflows/release.yml

@ -21,42 +21,42 @@ jobs:
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Wait for the deletion
uses: jakejarvis/wait-action@master
with:
time: '30s'
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Wait for the deletion
uses: jakejarvis/wait-action@master
with:
time: '30s'
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"

4
.travis.yml

@ -1,8 +1,8 @@
sudo: false
language: go
go:
- 1.14.x
- 1.15.x
- 1.16.x
before_install:
- export PATH=/home/travis/gopath/bin:$PATH
@ -44,4 +44,4 @@ deploy:
on:
tags: true
repo: chrislusf/seaweedfs
go: 1.15.x
go: 1.16.x

4
docker/Dockerfile

@ -1,5 +1,7 @@
FROM alpine
ARG RELEASE=latest # 'latest' or 'dev'
RUN \
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
@ -13,7 +15,7 @@ RUN \
# Install SeaweedFS and Supercronic ( for cron job mode )
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
apk add fuse && \
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/${RELEASE} | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
curl -fsSLO "$SUPERCRONIC_URL" && \
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \

31
docker/Dockerfile.s3tests

@ -0,0 +1,31 @@
FROM ubuntu:20.04
RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
git \
sudo \
debianutils \
python3-pip \
python3-virtualenv \
python3-dev \
libevent-dev \
libffi-dev \
libxml2-dev \
libxslt-dev \
zlib1g-dev && \
DEBIAN_FRONTEND=noninteractive apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
git clone https://github.com/ceph/s3-tests.git /opt/s3-tests
WORKDIR /opt/s3-tests
RUN ./bootstrap
ENV \
NOSETESTS_EXCLUDE="" \
NOSETESTS_ATTR="" \
NOSETESTS_OPTIONS="" \
S3TEST_CONF="/s3test.conf"
ENTRYPOINT ["/bin/bash", "-c"]
CMD ["exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]

6
docker/Makefile

@ -9,6 +9,9 @@ build:
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
rm ./weed
s3tests_build:
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
dev: build
docker-compose -f compose/local-dev-compose.yml -p seaweedfs up
@ -30,6 +33,9 @@ cluster: build
2clusters: build
docker-compose -f compose/local-clusters-compose.yml -p seaweedfs up
s3tests: build s3tests_build
docker-compose -f compose/local-s3tests-compose.yml -p seaweedfs up
filer_etcd: build
docker stack deploy -c compose/swarm-etcd.yml fs

6
docker/compose/local-cluster-compose.yml

@ -24,7 +24,7 @@ services:
ports:
- 8080:8080
- 18080:18080
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080'
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1 -disk=ssd1'
depends_on:
- master0
- master1
@ -34,7 +34,7 @@ services:
ports:
- 8082:8082
- 18082:18082
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082'
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1 -disk=ssd1'
depends_on:
- master0
- master1
@ -44,7 +44,7 @@ services:
ports:
- 8083:8083
- 18083:18083
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083'
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
depends_on:
- master0
- master1

4
docker/compose/local-mount-compose.yml

@ -30,7 +30,7 @@ services:
mount_1:
image: chrislusf/seaweedfs:local
privileged: true
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1" -volumeServerAccess=filerProxy'
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1 -volumeServerAccess=filerProxy"'
depends_on:
- master
- volume
@ -38,7 +38,7 @@ services:
mount_2:
image: chrislusf/seaweedfs:local
privileged: true
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1" -volumeServerAcess=publicUrl'
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1 -volumeServerAcess=publicUrl"'
depends_on:
- master
- volume

45
docker/compose/local-s3tests-compose.yml

@ -0,0 +1,45 @@
version: '2'
services:
master:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master -volumeSizeLimitMB=16"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
volume:
image: chrislusf/seaweedfs:local
ports:
- 8080:8080
- 18080:18080
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
depends_on:
- master
s3:
image: chrislusf/seaweedfs:local
ports:
- 8888:8888
- 18888:18888
- 8000:8000
command: 'filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000'
volumes:
- ./s3.json:/etc/seaweedfs/s3.json
depends_on:
- master
- volume
s3tests:
image: chrislusf/ceph-s3-tests:local
volumes:
- ./s3tests.conf:/opt/s3-tests/s3tests.conf
environment:
S3TEST_CONF: "s3tests.conf"
NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
NOSETESTS_EXCLUDE: "(bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_delimiter_not_skip_special|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_head_zero_bytes|object_write_cache_control|object_write_expires|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifmatch_failed|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_authenticated|object_raw_response_headers|object_raw_authenticated_bucket_acl|object_raw_authenticated_object_acl|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_not_expired|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_create_exists|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|bucket_create_special_key_names|object_copy_zero_size|object_copy_verify_contenttype|object_copy_to_itself|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)"
depends_on:
- master
- volume
- s3

0
docker/notification.toml → docker/compose/notification.toml

0
docker/replication.toml → docker/compose/replication.toml

105
docker/compose/s3.json

@ -0,0 +1,105 @@
{
"identities": [
{
"name": "anonymous",
"actions": [
"Read"
]
},
{
"name": "some_admin_user",
"credentials": [
{
"accessKey": "some_access_key1",
"secretKey": "some_secret_key1"
}
],
"actions": [
"Admin",
"Read",
"List",
"Tagging",
"Write"
]
},
{
"name": "s3_tests",
"credentials": [
{
"accessKey": "ABCDEFGHIJKLMNOPQRST",
"secretKey": "abcdefghijklmnopqrstuvwxyzabcdefghijklmn"
},
{
"accessKey": "0555b35654ad1656d804",
"secretKey": "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
}
],
"actions": [
"Admin",
"Read",
"List",
"Tagging",
"Write"
]
},
{
"name": "s3_tests_alt",
"credentials": [
{
"accessKey": "NOPQRSTUVWXYZABCDEFG",
"secretKey": "nopqrstuvwxyzabcdefghijklmnabcdefghijklm"
}
],
"actions": [
"Admin",
"Read",
"List",
"Tagging",
"Write"
]
},
{
"name": "s3_tests_tenant",
"credentials": [
{
"accessKey": "HIJKLMNOPQRSTUVWXYZA",
"secretKey": "opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab"
}
],
"actions": [
"Admin",
"Read",
"List",
"Tagging",
"Write"
]
},
{
"name": "some_read_only_user",
"credentials": [
{
"accessKey": "some_access_key2",
"secretKey": "some_secret_key2"
}
],
"actions": [
"Read"
]
},
{
"name": "some_normal_user",
"credentials": [
{
"accessKey": "some_access_key3",
"secretKey": "some_secret_key3"
}
],
"actions": [
"Read",
"List",
"Tagging",
"Write"
]
}
]
}

70
docker/compose/s3tests.conf

@ -0,0 +1,70 @@
[DEFAULT]
## this section is just used for host, port and bucket_prefix
# host set for rgw in vstart.sh
host = s3
# port set for rgw in vstart.sh
port = 8000
## say "False" to disable TLS
is_secure = False
[fixtures]
## all the buckets created will start with this prefix;
## {random} will be filled with random characters to pad
## the prefix to 30 characters long, and avoid collisions
bucket prefix = yournamehere-{random}-
[s3 main]
# main display_name set in vstart.sh
display_name = M. Tester
# main user_idname set in vstart.sh
user_id = testid
# main email set in vstart.sh
email = tester@ceph.com
# zonegroup api_name for bucket location
api_name = default
## main AWS access key
access_key = 0555b35654ad1656d804
## main AWS secret key
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
## replace with key id obtained when secret is created, or delete if KMS not tested
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
[s3 alt]
# alt display_name set in vstart.sh
display_name = john.doe
## alt email set in vstart.sh
email = john.doe@example.com
# alt user_id set in vstart.sh
user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234
# alt AWS access key set in vstart.sh
access_key = NOPQRSTUVWXYZABCDEFG
# alt AWS secret key set in vstart.sh
secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
[s3 tenant]
# tenant display_name set in vstart.sh
display_name = testx$tenanteduser
# tenant user_id set in vstart.sh
user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef
# tenant AWS secret key set in vstart.sh
access_key = HIJKLMNOPQRSTUVWXYZA
# tenant AWS secret key set in vstart.sh
secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab
# tenant email set in vstart.sh
email = tenanteduser@example.com

5
go.mod

@ -3,7 +3,7 @@ module github.com/chrislusf/seaweedfs
go 1.12
require (
cloud.google.com/go v0.58.0
cloud.google.com/go v0.58.0 // indirect
cloud.google.com/go/pubsub v1.3.1
cloud.google.com/go/storage v1.9.0
github.com/Azure/azure-amqp-common-go/v2 v2.1.0 // indirect
@ -41,7 +41,7 @@ require (
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/json-iterator/go v1.1.10
github.com/karlseguin/ccache v2.0.3+incompatible
github.com/karlseguin/ccache v2.0.3+incompatible // indirect
github.com/karlseguin/ccache/v2 v2.0.7
github.com/klauspost/compress v1.10.9 // indirect
github.com/klauspost/cpuid v1.2.1 // indirect
@ -73,6 +73,7 @@ require (
github.com/tidwall/match v1.0.1
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
github.com/valyala/bytebufferpool v1.0.0
github.com/valyala/fasthttp v1.20.0
github.com/viant/assertly v0.5.4 // indirect
github.com/viant/ptrie v0.3.0
github.com/viant/toolbox v0.33.2 // indirect

18
go.sum

@ -23,8 +23,10 @@ cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNF
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.2.0/go.mod h1:iISCjWnTpnoJT1R287xRdjvQHJrxQOpeah4phb5D3h0=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -68,10 +70,12 @@ github.com/Azure/go-amqp v0.12.7/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG
github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4=
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw=
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
@ -105,6 +109,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -446,6 +452,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@ -455,6 +462,7 @@ github.com/karlseguin/ccache/v2 v2.0.7 h1:y5Pfi4eiyYCOD6LS/Kj+o6Nb4M5Ngpw9qFQs+v
github.com/karlseguin/ccache/v2 v2.0.7/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ=
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
@ -463,6 +471,7 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M=
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.9 h1:pPRt1Z78crspaHISkpSSHjDlx+Tt9suHe519dsI0vF4=
github.com/klauspost/compress v1.10.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
@ -756,6 +765,9 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.20.0 h1:olTmcnLQeZrkBc4TVgE/BatTo1NE/IvW050AuD8SW+U=
github.com/valyala/fasthttp v1.20.0/go.mod h1:jjraHZVbKOXftJfsOYoAjaeygpj5hr8ermTRJNroD7A=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/viant/assertly v0.5.4 h1:5Hh4U3pLZa6uhCFAGpYOxck/8l9TZczEzoHNfJAhHEQ=
github.com/viant/assertly v0.5.4/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/ptrie v0.3.0 h1:SDaRd7Gqr1+ItCNz0GpTxRdK21nOfqjV6YtBm9jGlMY=
@ -874,6 +886,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@ -882,6 +895,7 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -919,6 +933,7 @@ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1058,6 +1073,7 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509 h1:MI14dOfl3OG6Zd32w3ugsrvcUO810fDZdWakTq39dH4=
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
@ -1094,6 +1110,7 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -1211,6 +1228,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o=
modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=

29
k8s/README.md

@ -9,15 +9,32 @@ and backup/HA memsql can provide.
with ENV.
* cert config exists and can be enabled, but not been tested.
### current instances config (AIO):
1 instance for each type (master/filer/volume/s3)
### prerequisites
kubernetes node have labels which help to define which node(Host) will run which pod.
s3/filer/master needs the label **sw-backend=true**
volume need the label **sw-volume=true**
to label a node to be able to run all pod types in k8s:
```
kubectl label node YOUR_NODE_NAME sw-volume=true,sw-backend=true
```
instances need node labels:
* sw-volume: true (for volume instance, specific tag)
* sw-backend: true (for all others, as they less resource demanding)
on production k8s deployment you will want each pod to have a different host,
especially the volume server & the masters, currently all pods (master/volume/filer)
have anti-affinity rule to disallow running multiple pod type on the same host.
if you still want to run multiple pods of the same type (master/volume/filer) on the same host
please set/update the corresponding affinity rule in values.yaml to an empty one:
```affinity: ""```
### current instances config (AIO):
1 instance for each type (master/filer+s3/volume)
you can update the replicas count for each node type in values.yaml,
need to add more nodes with the corresponding label.
need to add more nodes with the corresponding labels.
most of the configuration are available through values.yaml

4
k8s/seaweedfs/Chart.yaml

@ -1,5 +1,5 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "2.23"
version: 2.23
appVersion: "2.27"
version: 2.27

15
k8s/seaweedfs/templates/cronjob.yaml

@ -15,13 +15,13 @@ spec:
backoffLimit: 2
template:
spec:
{{- with .Values.cronjob.nodeSelector }}
{{- if .Values.cronjob.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 12 }}
{{ tpl .Values.cronjob.nodeSelector . | indent 12 | trim }}
{{- end }}
{{- with .Values.cronjob.tolerations }}
{{- if .Values.cronjob.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
{{ tpl .Values.cronjob.tolerations . | nindent 12 | trim }}
{{- end }}
restartPolicy: OnFailure
containers:
@ -36,10 +36,13 @@ spec:
- |
set -ex
echo -e "lock\n\
volume.balance -force\
volume.balance -force \
{{ if .Values.volume.dataCenter }} -dataCenter {{ .Values.volume.dataCenter }}{{ end }}\
{{ if .Values.cronjob.collection }} -collection {{ .Values.cronjob.collection }}{{ end }}\n\
volume.fix.replication\nunlock\n" | \
{{- if .Values.cronjob.enableFixReplication }}
volume.fix.replication {{ if .Values.cronjob.collectionPattern }} -collectionPattern={{ .Values.cronjob.collectionPattern }} {{ end }} \n\
{{- end }}
unlock\n" | \
/usr/bin/weed shell \
{{- if .Values.cronjob.master }}
-master {{ .Values.cronjob.master }} \

1
k8s/seaweedfs/templates/filer-service-client.yaml

@ -10,7 +10,6 @@ metadata:
monitoring: "true"
{{- end }}
spec:
clusterIP: None
ports:
- name: "swfs-filer"
port: {{ .Values.filer.port }}

29
k8s/seaweedfs/templates/filer-statefulset.yaml

@ -133,14 +133,36 @@ spec:
-encryptVolumeData \
{{- end }}
-ip=${POD_IP} \
{{- if .Values.filer.enable_peers }}
{{- if gt (.Values.filer.replicas | int) 1 }}
-peers=$(echo -n "{{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}" | sed "s/$HOSTNAME.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}//" | sed 's/,$//; 's/^,//'; s/,,/,/;' ) \
{{- end }}
{{- end }}
{{- if .Values.filer.s3.enabled }}
-s3 \
-s3.port={{ .Values.filer.s3.port }} \
{{- if .Values.filer.s3.domainName }}
-s3.domainName={{ .Values.filer.s3.domainName }} \
{{- end }}
{{- if .Values.global.enableSecurity }}
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \
{{- end }}
{{- if .Values.filer.s3.allowEmptyFolder }}
-s3.allowEmptyFolder={{ .Values.filer.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
-s3.config=/etc/sw/seaweedfs_s3_config \
{{- end }}
{{- end }}
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
{{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
volumeMounts:
- name: seaweedfs-filer-log-volume
mountPath: "/logs/"
- mountPath: /etc/sw
name: config-users
readOnly: true
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
@ -198,6 +220,13 @@ spec:
hostPath:
path: /storage/logs/seaweedfs/filer
type: DirectoryOrCreate
- name: db-schema-config-volume
configMap:
name: seaweedfs-db-init-config
- name: config-users
secret:
defaultMode: 420
secretName: seaweedfs-s3-secret
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:

10
k8s/seaweedfs/templates/s3-deployment.yaml

@ -90,10 +90,16 @@ spec:
{{- if .Values.s3.allowEmptyFolder }}
-allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.s3.enableAuth }}
-config=/etc/sw/seaweedfs_s3_config \
{{- end }}
-filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }}
volumeMounts:
- name: logs
mountPath: "/logs/"
- mountPath: /etc/sw
name: config-users
readOnly: true
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
@ -144,6 +150,10 @@ spec:
{{ tpl .Values.s3.resources . | nindent 12 | trim }}
{{- end }}
volumes:
- name: config-users
secret:
defaultMode: 420
secretName: seaweedfs-s3-secret
{{- if eq .Values.s3.logs.type "hostPath" }}
- name: logs
hostPath:

10
k8s/seaweedfs/templates/s3-service.yaml

@ -9,15 +9,15 @@ metadata:
spec:
ports:
- name: "swfs-s3"
port: {{ .Values.s3.port }}
targetPort: {{ .Values.s3.port }}
port: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
targetPort: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
protocol: TCP
{{- if .Values.s3.metricsPort }}
- name: "swfs-s3-metrics"
{{- if and .Values.s3.enabled .Values.s3.metricsPort }}
- name: "metrics"
port: {{ .Values.s3.metricsPort }}
targetPort: {{ .Values.s3.metricsPort }}
protocol: TCP
{{- end }}
selector:
app: {{ template "seaweedfs.name" . }}
component: s3
component: {{ if .Values.s3.enabled }}s3{{ else }}filer{{ end }}

21
k8s/seaweedfs/templates/seaweedfs-s3-secret.yaml

@ -0,0 +1,21 @@
{{- if not (or .Values.filer.s3.skipAuthSecretCreation .Values.s3.skipAuthSecretCreation) }}
{{- $access_key_admin := randAlphaNum 16 -}}
{{- $secret_key_admin := randAlphaNum 32 -}}
{{- $access_key_read := randAlphaNum 16 -}}
{{- $secret_key_read := randAlphaNum 32 -}}
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: seaweedfs-s3-secret
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/resource-policy": keep
"helm.sh/hook": "pre-install"
stringData:
admin_access_key_id: {{ $access_key_admin }}
admin_secret_access_key: {{ $secret_key_admin }}
read_access_key_id: {{ $access_key_read }}
read_secret_access_key: {{ $secret_key_read }}
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"{{ $access_key_admin }}","secretKey":"{{ $secret_key_admin }}"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"{{ $access_key_read }}","secretKey":"{{ $secret_key_read }}"}],"actions":["Read"]}]}'
{{- end }}

38
k8s/seaweedfs/values.yaml

@ -4,7 +4,7 @@ global:
registry: ""
repository: ""
imageName: chrislusf/seaweedfs
# imageTag: "2.23" - started using {.Chart.appVersion}
# imageTag: "2.27" - started using {.Chart.appVersion}
imagePullPolicy: IfNotPresent
imagePullSecrets: imagepullsecret
restartPolicy: Always
@ -136,7 +136,7 @@ volume:
# limit file size to avoid out of memory, default 256mb
fileSizeLimitMB: null
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
minFreeSpacePercent: 1
minFreeSpacePercent: 7
# limit background compaction or copying speed in mega bytes per second
@ -229,6 +229,8 @@ filer:
maxMB: null
# encrypt data on volume servers
encryptVolumeData: false
# enable peers sync metadata, for leveldb (localdb for filer but with sync across)
enable_peers: false
# Whether proxy or redirect to volume server during file GET request
redirectOnRead: false
@ -311,8 +313,19 @@ filer:
# directories under this folder will be automatically creating a separate bucket
WEED_FILER_BUCKETS_FOLDER: "/buckets"
s3:
enabled: true
port: 8333
#allow empty folders
allowEmptyFolder: false
# Suffix of the host name, {bucket}.{domainName}
domainName: ""
# enable user & permission to s3 (need to inject to all services)
enableAuth: false
skipAuthSecretCreation: false
s3:
enabled: true
enabled: false
repository: null
imageName: null
imageTag: null
@ -323,6 +336,9 @@ s3:
loggingOverrideLevel: null
#allow empty folders
allowEmptyFolder: true
# enable user & permission to s3 (need to inject to all services)
enableAuth: false
skipAuthSecretCreation: false
# Suffix of the host name, {bucket}.{domainName}
domainName: ""
@ -359,17 +375,21 @@ s3:
storageClass: ""
cronjob:
enabled: false
enabled: true
master: "seaweedfs-master:9333"
filer: "seaweedfs-filer-client:8888"
tolerations: ""
nodeSelector: |
sw-backend: "true"
replication:
enable: true
collectionPattern: ""
schedule: "*/7 * * * *"
resources: null
# balance all volumes among volume servers
# ALL|EACH_COLLECTION|<collection_name>
collection: ""
master: ""
filer: ""
tolerations: ""
nodeSelector: |
sw-backend: "true"
certificates:
commonName: "SeaweedFS CA"

2
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.9</version>
<version>1.6.1</version>
<parent>
<groupId>org.sonatype.oss</groupId>

2
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.9</version>
<version>1.6.1</version>
<parent>
<groupId>org.sonatype.oss</groupId>

2
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.9</version>
<version>1.6.1</version>
<parent>
<groupId>org.sonatype.oss</groupId>

26
other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java

@ -23,7 +23,7 @@ public class FileChunkManifest {
}
public static List<FilerProto.FileChunk> resolveChunkManifest(
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> chunks) throws IOException {
final FilerClient filerClient, List<FilerProto.FileChunk> chunks) throws IOException {
List<FilerProto.FileChunk> dataChunks = new ArrayList<>();
@ -35,30 +35,30 @@ public class FileChunkManifest {
// IsChunkManifest
LOG.debug("fetching chunk manifest:{}", chunk);
byte[] data = fetchChunk(filerGrpcClient, chunk);
byte[] data = fetchChunk(filerClient, chunk);
FilerProto.FileChunkManifest m = FilerProto.FileChunkManifest.newBuilder().mergeFrom(data).build();
List<FilerProto.FileChunk> resolvedChunks = new ArrayList<>();
for (FilerProto.FileChunk t : m.getChunksList()) {
// avoid deprecated chunk.getFileId()
resolvedChunks.add(t.toBuilder().setFileId(FilerClient.toFileId(t.getFid())).build());
}
dataChunks.addAll(resolveChunkManifest(filerGrpcClient, resolvedChunks));
dataChunks.addAll(resolveChunkManifest(filerClient, resolvedChunks));
}
return dataChunks;
}
private static byte[] fetchChunk(final FilerGrpcClient filerGrpcClient, FilerProto.FileChunk chunk) throws IOException {
private static byte[] fetchChunk(final FilerClient filerClient, FilerProto.FileChunk chunk) throws IOException {
String vid = "" + chunk.getFid().getVolumeId();
FilerProto.Locations locations = filerGrpcClient.vidLocations.get(vid);
FilerProto.Locations locations = filerClient.vidLocations.get(vid);
if (locations == null) {
FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder();
lookupRequest.addVolumeIds(vid);
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
FilerProto.LookupVolumeResponse lookupResponse = filerClient
.getBlockingStub().lookupVolume(lookupRequest.build());
locations = lookupResponse.getLocationsMapMap().get(vid);
filerGrpcClient.vidLocations.put(vid, locations);
filerClient.vidLocations.put(vid, locations);
LOG.debug("fetchChunk vid:{} locations:{}", vid, locations);
}
@ -74,7 +74,7 @@ public class FileChunkManifest {
byte[] chunkData = SeaweedRead.chunkCache.getChunk(chunkView.fileId);
if (chunkData == null) {
LOG.debug("doFetchFullChunkData:{}", chunkView);
chunkData = SeaweedRead.doFetchFullChunkData(filerGrpcClient, chunkView, locations);
chunkData = SeaweedRead.doFetchFullChunkData(filerClient, chunkView, locations);
}
if (chunk.getIsChunkManifest()){
LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length);
@ -86,7 +86,7 @@ public class FileChunkManifest {
}
public static List<FilerProto.FileChunk> maybeManifestize(
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> inputChunks, String parentDirectory) throws IOException {
final FilerClient filerClient, List<FilerProto.FileChunk> inputChunks, String parentDirectory) throws IOException {
// the return variable
List<FilerProto.FileChunk> chunks = new ArrayList<>();
@ -101,7 +101,7 @@ public class FileChunkManifest {
int remaining = dataChunks.size();
for (int i = 0; i + mergeFactor < dataChunks.size(); i += mergeFactor) {
FilerProto.FileChunk chunk = mergeIntoManifest(filerGrpcClient, dataChunks.subList(i, i + mergeFactor), parentDirectory);
FilerProto.FileChunk chunk = mergeIntoManifest(filerClient, dataChunks.subList(i, i + mergeFactor), parentDirectory);
chunks.add(chunk);
remaining -= mergeFactor;
}
@ -113,7 +113,7 @@ public class FileChunkManifest {
return chunks;
}
private static FilerProto.FileChunk mergeIntoManifest(final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> dataChunks, String parentDirectory) throws IOException {
private static FilerProto.FileChunk mergeIntoManifest(final FilerClient filerClient, List<FilerProto.FileChunk> dataChunks, String parentDirectory) throws IOException {
// create and serialize the manifest
dataChunks = FilerClient.beforeEntrySerialization(dataChunks);
FilerProto.FileChunkManifest.Builder m = FilerProto.FileChunkManifest.newBuilder().addAllChunks(dataChunks);
@ -127,8 +127,8 @@ public class FileChunkManifest {
}
FilerProto.FileChunk.Builder manifestChunk = SeaweedWrite.writeChunk(
filerGrpcClient.getReplication(),
filerGrpcClient,
filerClient.getReplication(),
filerClient,
minOffset,
data, 0, data.length, parentDirectory);
manifestChunk.setIsChunkManifest(true);

57
other/java/client/src/main/java/seaweedfs/client/FilerClient.java

@ -4,25 +4,18 @@ import com.google.common.base.Strings;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
public class FilerClient {
public class FilerClient extends FilerGrpcClient {
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
private final FilerGrpcClient filerGrpcClient;
public FilerClient(String host, int grpcPort) {
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
}
public FilerClient(FilerGrpcClient filerGrpcClient) {
this.filerGrpcClient = filerGrpcClient;
super(host, grpcPort);
}
public static String toFileId(FilerProto.FileId fid) {
@ -100,9 +93,9 @@ public class FilerClient {
if ("/".equals(path)) {
return true;
}
Path pathObject = Paths.get(path);
String parent = pathObject.getParent().toString();
String name = pathObject.getFileName().toString();
File pathFile = new File(path);
String parent = pathFile.getParent();
String name = pathFile.getName();
mkdirs(parent, mode, uid, gid, userName, groupNames);
@ -121,13 +114,13 @@ public class FilerClient {
public boolean mv(String oldPath, String newPath) {
Path oldPathObject = Paths.get(oldPath);
String oldParent = oldPathObject.getParent().toString();
String oldName = oldPathObject.getFileName().toString();
File oldPathFile = new File(oldPath);
String oldParent = oldPathFile.getParent();
String oldName = oldPathFile.getName();
Path newPathObject = Paths.get(newPath);
String newParent = newPathObject.getParent().toString();
String newName = newPathObject.getFileName().toString();
File newPathFile = new File(newPath);
String newParent = newPathFile.getParent();
String newName = newPathFile.getName();
return atomicRenameEntry(oldParent, oldName, newParent, newName);
@ -135,9 +128,9 @@ public class FilerClient {
public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
Path pathObject = Paths.get(path);
String parent = pathObject.getParent().toString();
String name = pathObject.getFileName().toString();
File pathFile = new File(path);
String parent = pathFile.getParent();
String name = pathFile.getName();
return deleteEntry(
parent,
@ -154,9 +147,9 @@ public class FilerClient {
public boolean touch(String path, int mode, int uid, int gid, String userName, String[] groupNames) {
Path pathObject = Paths.get(path);
String parent = pathObject.getParent().toString();
String name = pathObject.getFileName().toString();
File pathFile = new File(path);
String parent = pathFile.getParent();
String name = pathFile.getName();
FilerProto.Entry entry = lookupEntry(parent, name);
if (entry == null) {
@ -236,7 +229,7 @@ public class FilerClient {
}
public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit, boolean includeLastEntry) {
Iterator<FilerProto.ListEntriesResponse> iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
Iterator<FilerProto.ListEntriesResponse> iter = this.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
.setDirectory(path)
.setPrefix(entryPrefix)
.setStartFromFileName(lastEntryName)
@ -253,7 +246,7 @@ public class FilerClient {
public FilerProto.Entry lookupEntry(String directory, String entryName) {
try {
FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry(
FilerProto.Entry entry = this.getBlockingStub().lookupDirectoryEntry(
FilerProto.LookupDirectoryEntryRequest.newBuilder()
.setDirectory(directory)
.setName(entryName)
@ -274,7 +267,7 @@ public class FilerClient {
public boolean createEntry(String parent, FilerProto.Entry entry) {
try {
FilerProto.CreateEntryResponse createEntryResponse =
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
this.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
.setDirectory(parent)
.setEntry(entry)
.build());
@ -291,7 +284,7 @@ public class FilerClient {
public boolean updateEntry(String parent, FilerProto.Entry entry) {
try {
filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
this.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
.setDirectory(parent)
.setEntry(entry)
.build());
@ -304,7 +297,7 @@ public class FilerClient {
public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive, boolean ignoreRecusiveError) {
try {
filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
this.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
.setDirectory(parent)
.setName(entryName)
.setIsDeleteData(isDeleteFileChunk)
@ -320,7 +313,7 @@ public class FilerClient {
public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) {
try {
filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
this.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
.setOldDirectory(oldParent)
.setOldName(oldName)
.setNewDirectory(newParent)
@ -334,7 +327,7 @@ public class FilerClient {
}
public Iterator<FilerProto.SubscribeMetadataResponse> watch(String prefix, String clientName, long sinceNs) {
return filerGrpcClient.getBlockingStub().subscribeMetadata(FilerProto.SubscribeMetadataRequest.newBuilder()
return this.getBlockingStub().subscribeMetadata(FilerProto.SubscribeMetadataRequest.newBuilder()
.setPathPrefix(prefix)
.setClientName(clientName)
.setSinceNs(sinceNs)

17
other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java

@ -16,7 +16,7 @@ public class SeaweedInputStream extends InputStream {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class);
private static final IOException EXCEPTION_STREAM_IS_CLOSED = new IOException("Stream is closed!");
private final FilerGrpcClient filerGrpcClient;
private final FilerClient filerClient;
private final String path;
private final FilerProto.Entry entry;
private final List<SeaweedRead.VisibleInterval> visibleIntervalList;
@ -27,32 +27,31 @@ public class SeaweedInputStream extends InputStream {
private boolean closed = false;
public SeaweedInputStream(
final FilerGrpcClient filerGrpcClient,
final FilerClient filerClient,
final String fullpath) throws IOException {
this.filerGrpcClient = filerGrpcClient;
this.path = fullpath;
FilerClient filerClient = new FilerClient(filerGrpcClient);
this.filerClient = filerClient;
this.entry = filerClient.lookupEntry(
SeaweedOutputStream.getParentDirectory(fullpath),
SeaweedOutputStream.getFileName(fullpath));
this.contentLength = SeaweedRead.fileSize(entry);
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerGrpcClient, entry.getChunksList());
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
}
public SeaweedInputStream(
final FilerGrpcClient filerGrpcClient,
final FilerClient filerClient,
final String path,
final FilerProto.Entry entry) throws IOException {
this.filerGrpcClient = filerGrpcClient;
this.filerClient = filerClient;
this.path = path;
this.entry = entry;
this.contentLength = SeaweedRead.fileSize(entry);
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerGrpcClient, entry.getChunksList());
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
@ -110,7 +109,7 @@ public class SeaweedInputStream extends InputStream {
if (start+len <= entry.getContent().size()) {
entry.getContent().substring(start, start+len).copyTo(buf);
} else {
bytesRead = SeaweedRead.read(this.filerGrpcClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry));
bytesRead = SeaweedRead.read(this.filerClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry));
}
if (bytesRead > Integer.MAX_VALUE) {

18
other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java

@ -15,7 +15,7 @@ public class SeaweedOutputStream extends OutputStream {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
protected final boolean supportFlush = true;
private final FilerGrpcClient filerGrpcClient;
private final FilerClient filerClient;
private final String path;
private final int bufferSize;
private final int maxConcurrentRequestCount;
@ -33,17 +33,17 @@ public class SeaweedOutputStream extends OutputStream {
private long outputIndex;
private String replication = "000";
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final String fullpath) {
this(filerGrpcClient, fullpath, "000");
public SeaweedOutputStream(FilerClient filerClient, final String fullpath) {
this(filerClient, fullpath, "000");
}
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final String fullpath, final String replication) {
this(filerGrpcClient, fullpath, null, 0, 8 * 1024 * 1024, "000");
public SeaweedOutputStream(FilerClient filerClient, final String fullpath, final String replication) {
this(filerClient, fullpath, null, 0, 8 * 1024 * 1024, "000");
}
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final String path, FilerProto.Entry.Builder entry,
public SeaweedOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
final long position, final int bufferSize, final String replication) {
this.filerGrpcClient = filerGrpcClient;
this.filerClient = filerClient;
this.replication = replication;
this.path = path;
this.position = position;
@ -109,7 +109,7 @@ public class SeaweedOutputStream extends OutputStream {
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
try {
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
} catch (Exception ex) {
throw new IOException(ex);
}
@ -225,7 +225,7 @@ public class SeaweedOutputStream extends OutputStream {
}
final Future<Void> job = completionService.submit(() -> {
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path);
SeaweedWrite.writeData(entry, replication, filerClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path);
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
ByteBufferPool.release(bufferToWrite);
return null;

18
other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java

@ -23,7 +23,7 @@ public class SeaweedRead {
static VolumeIdCache volumeIdCache = new VolumeIdCache(4 * 1024);
// returns bytesRead
public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals,
public static long read(FilerClient filerClient, List<VisibleInterval> visibleIntervals,
final long position, final ByteBuffer buf, final long fileSize) throws IOException {
List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, buf.remaining());
@ -42,7 +42,7 @@ public class SeaweedRead {
}
if (lookupRequest.getVolumeIdsCount() > 0) {
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
FilerProto.LookupVolumeResponse lookupResponse = filerClient
.getBlockingStub().lookupVolume(lookupRequest.build());
Map<String, FilerProto.Locations> vid2Locations = lookupResponse.getLocationsMapMap();
for (Map.Entry<String, FilerProto.Locations> entry : vid2Locations.entrySet()) {
@ -71,7 +71,7 @@ public class SeaweedRead {
return 0;
}
int len = readChunkView(filerGrpcClient, startOffset, buf, chunkView, locations);
int len = readChunkView(filerClient, startOffset, buf, chunkView, locations);
LOG.debug("read [{},{}) {} size {}", startOffset, startOffset + len, chunkView.fileId, chunkView.size);
@ -93,12 +93,12 @@ public class SeaweedRead {
return readCount;
}
private static int readChunkView(FilerGrpcClient filerGrpcClient, long startOffset, ByteBuffer buf, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
private static int readChunkView(FilerClient filerClient, long startOffset, ByteBuffer buf, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
byte[] chunkData = chunkCache.getChunk(chunkView.fileId);
if (chunkData == null) {
chunkData = doFetchFullChunkData(filerGrpcClient, chunkView, locations);
chunkData = doFetchFullChunkData(filerClient, chunkView, locations);
chunkCache.setChunk(chunkView.fileId, chunkData);
}
@ -110,13 +110,13 @@ public class SeaweedRead {
return len;
}
public static byte[] doFetchFullChunkData(FilerGrpcClient filerGrpcClient, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
public static byte[] doFetchFullChunkData(FilerClient filerClient, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
byte[] data = null;
IOException lastException = null;
for (long waitTime = 1000L; waitTime < 10 * 1000; waitTime += waitTime / 2) {
for (FilerProto.Location location : locations.getLocationsList()) {
String url = filerGrpcClient.getChunkUrl(chunkView.fileId, location.getUrl(), location.getPublicUrl());
String url = filerClient.getChunkUrl(chunkView.fileId, location.getUrl(), location.getPublicUrl());
try {
data = doFetchOneFullChunkData(chunkView, url);
lastException = null;
@ -221,9 +221,9 @@ public class SeaweedRead {
}
public static List<VisibleInterval> nonOverlappingVisibleIntervals(
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> chunkList) throws IOException {
final FilerClient filerClient, List<FilerProto.FileChunk> chunkList) throws IOException {
chunkList = FileChunkManifest.resolveChunkManifest(filerGrpcClient, chunkList);
chunkList = FileChunkManifest.resolveChunkManifest(filerClient, chunkList);
FilerProto.FileChunk[] chunks = chunkList.toArray(new FilerProto.FileChunk[0]);
Arrays.sort(chunks, new Comparator<FilerProto.FileChunk>() {

22
other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java

@ -23,29 +23,29 @@ public class SeaweedWrite {
public static void writeData(FilerProto.Entry.Builder entry,
final String replication,
final FilerGrpcClient filerGrpcClient,
final FilerClient filerClient,
final long offset,
final byte[] bytes,
final long bytesOffset, final long bytesLength,
final String path) throws IOException {
FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength, path);
replication, filerClient, offset, bytes, bytesOffset, bytesLength, path);
synchronized (entry) {
entry.addChunks(chunkBuilder);
}
}
public static FilerProto.FileChunk.Builder writeChunk(final String replication,
final FilerGrpcClient filerGrpcClient,
final FilerClient filerClient,
final long offset,
final byte[] bytes,
final long bytesOffset,
final long bytesLength,
final String path) throws IOException {
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
FilerProto.AssignVolumeResponse response = filerClient.getBlockingStub().assignVolume(
FilerProto.AssignVolumeRequest.newBuilder()
.setCollection(filerGrpcClient.getCollection())
.setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
.setCollection(filerClient.getCollection())
.setReplication(replication == null ? filerClient.getReplication() : replication)
.setDataCenter("")
.setTtlSec(0)
.setPath(path)
@ -53,11 +53,11 @@ public class SeaweedWrite {
String fileId = response.getFileId();
String auth = response.getAuth();
String targetUrl = filerGrpcClient.getChunkUrl(fileId, response.getUrl(), response.getPublicUrl());
String targetUrl = filerClient.getChunkUrl(fileId, response.getUrl(), response.getPublicUrl());
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
byte[] cipherKey = null;
if (filerGrpcClient.isCipher()) {
if (filerClient.isCipher()) {
cipherKey = genCipherKey();
cipherKeyString = ByteString.copyFrom(cipherKey);
}
@ -75,15 +75,15 @@ public class SeaweedWrite {
.setCipherKey(cipherKeyString);
}
public static void writeMeta(final FilerGrpcClient filerGrpcClient,
public static void writeMeta(final FilerClient filerClient,
final String parentDirectory,
final FilerProto.Entry.Builder entry) throws IOException {
synchronized (entry) {
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList(), parentDirectory);
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerClient, entry.getChunksList(), parentDirectory);
entry.clearChunks();
entry.addAllChunks(chunks);
filerGrpcClient.getBlockingStub().createEntry(
filerClient.getBlockingStub().createEntry(
FilerProto.CreateEntryRequest.newBuilder()
.setDirectory(parentDirectory)
.setEntry(entry)

13
other/java/client/src/main/proto/filer.proto

@ -156,6 +156,7 @@ message FuseAttributes {
repeated string group_name = 12; // for hdfs
string symlink_target = 13;
bytes md5 = 14;
string disk_type = 15;
}
message CreateEntryRequest {
@ -220,6 +221,7 @@ message AssignVolumeRequest {
string data_center = 5;
string path = 6;
string rack = 7;
string disk_type = 8;
}
message AssignVolumeResponse {
@ -270,11 +272,9 @@ message StatisticsRequest {
string replication = 1;
string collection = 2;
string ttl = 3;
string disk_type = 4;
}
message StatisticsResponse {
string replication = 1;
string collection = 2;
string ttl = 3;
uint64 total_size = 4;
uint64 used_size = 5;
uint64 file_count = 6;
@ -358,12 +358,7 @@ message FilerConf {
string collection = 2;
string replication = 3;
string ttl = 4;
enum DiskType {
NONE = 0;
HDD = 1;
SSD = 2;
}
DiskType disk_type = 5;
string disk_type = 5;
bool fsync = 6;
uint32 volume_growth_count = 7;
}

4
other/java/examples/pom.xml

@ -11,13 +11,13 @@
<dependency>
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.9</version>
<version>1.6.1</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-hadoop2-client</artifactId>
<version>1.5.9</version>
<version>1.6.1</version>
<scope>compile</scope>
</dependency>
<dependency>

6
other/java/examples/src/main/java/com/seaweedfs/examples/ExampleReadFile.java

@ -1,6 +1,6 @@
package com.seaweedfs.examples;
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerClient;
import seaweedfs.client.SeaweedInputStream;
import java.io.FileInputStream;
@ -13,7 +13,7 @@ public class ExampleReadFile {
public static void main(String[] args) throws IOException {
FilerGrpcClient filerGrpcClient = new FilerGrpcClient("localhost", 18888);
FilerClient filerClient = new FilerClient("localhost", 18888);
long startTime = System.currentTimeMillis();
parseZip("/Users/chris/tmp/test.zip");
@ -23,7 +23,7 @@ public class ExampleReadFile {
long localProcessTime = startTime2 - startTime;
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(
filerGrpcClient, "/test.zip");
filerClient, "/test.zip");
parseZip(seaweedInputStream);
long swProcessTime = System.currentTimeMillis() - startTime2;

13
other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile.java

@ -1,6 +1,6 @@
package com.seaweedfs.examples;
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerClient;
import seaweedfs.client.SeaweedInputStream;
import seaweedfs.client.SeaweedOutputStream;
@ -13,15 +13,14 @@ public class ExampleWriteFile {
public static void main(String[] args) throws IOException {
FilerGrpcClient filerGrpcClient = new FilerGrpcClient("localhost", 18888);
FilerClient filerClient = new FilerClient("localhost", 18888);
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(
filerGrpcClient, "/test.zip");
unZipFiles(filerGrpcClient, seaweedInputStream);
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(filerClient, "/test.zip");
unZipFiles(filerClient, seaweedInputStream);
}
public static void unZipFiles(FilerGrpcClient filerGrpcClient, InputStream is) throws IOException {
public static void unZipFiles(FilerClient filerClient, InputStream is) throws IOException {
ZipInputStream zin = new ZipInputStream(is);
ZipEntry ze;
while ((ze = zin.getNextEntry()) != null) {
@ -34,7 +33,7 @@ public class ExampleWriteFile {
continue;
}
SeaweedOutputStream seaweedOutputStream = new SeaweedOutputStream(filerGrpcClient, "/test/"+filename);
SeaweedOutputStream seaweedOutputStream = new SeaweedOutputStream(filerClient, "/test/"+filename);
byte[] bytesIn = new byte[16 * 1024];
int read = 0;
while ((read = zin.read(bytesIn))!=-1) {

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -301,7 +301,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.5.9</seaweedfs.client.version>
<seaweedfs.client.version>1.6.1</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>
</project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.5.9</seaweedfs.client.version>
<seaweedfs.client.version>1.6.1</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>

16
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -24,27 +24,25 @@ public class SeaweedFileSystemStore {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
private FilerGrpcClient filerGrpcClient;
private FilerClient filerClient;
private Configuration conf;
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
int grpcPort = 10000 + port;
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
filerClient = new FilerClient(filerGrpcClient);
filerClient = new FilerClient(host, grpcPort);
this.conf = conf;
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
if (volumeServerAccessMode.equals("publicUrl")) {
filerGrpcClient.setAccessVolumeServerByPublicUrl();
filerClient.setAccessVolumeServerByPublicUrl();
} else if (volumeServerAccessMode.equals("filerProxy")) {
filerGrpcClient.setAccessVolumeServerByFilerProxy();
filerClient.setAccessVolumeServerByFilerProxy();
}
}
public void close() {
try {
this.filerGrpcClient.shutdown();
this.filerClient.shutdown();
} catch (InterruptedException e) {
e.printStackTrace();
}
@ -219,10 +217,10 @@ public class SeaweedFileSystemStore {
.clearGroupName()
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
);
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
}
return new SeaweedHadoopOutputStream(filerGrpcClient, path.toString(), entry, writePosition, bufferSize, replication);
return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication);
}
@ -236,7 +234,7 @@ public class SeaweedFileSystemStore {
throw new FileNotFoundException("read non-exist file " + path);
}
return new SeaweedHadoopInputStream(filerGrpcClient,
return new SeaweedHadoopInputStream(filerClient,
statistics,
path.toUri().getPath(),
entry);

6
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java

@ -5,7 +5,7 @@ package seaweed.hdfs;
import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileSystem.Statistics;
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerProto;
import seaweedfs.client.SeaweedInputStream;
@ -19,11 +19,11 @@ public class SeaweedHadoopInputStream extends FSInputStream implements ByteBuffe
private final Statistics statistics;
public SeaweedHadoopInputStream(
final FilerGrpcClient filerGrpcClient,
final FilerClient filerClient,
final Statistics statistics,
final String path,
final FilerProto.Entry entry) throws IOException {
this.seaweedInputStream = new SeaweedInputStream(filerGrpcClient, path, entry);
this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry);
this.statistics = statistics;
}

6
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java

@ -2,15 +2,15 @@ package seaweed.hdfs;
// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerProto;
import seaweedfs.client.SeaweedOutputStream;
public class SeaweedHadoopOutputStream extends SeaweedOutputStream {
public SeaweedHadoopOutputStream(FilerGrpcClient filerGrpcClient, final String path, FilerProto.Entry.Builder entry,
public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
final long position, final int bufferSize, final String replication) {
super(filerGrpcClient, path.toString(), entry, position, bufferSize, replication);
super(filerClient, path, entry, position, bufferSize, replication);
}
}

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -309,7 +309,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.5.9</seaweedfs.client.version>
<seaweedfs.client.version>1.6.1</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>
</project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.5.9</seaweedfs.client.version>
<seaweedfs.client.version>1.6.1</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>

16
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -24,27 +24,25 @@ public class SeaweedFileSystemStore {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
private FilerGrpcClient filerGrpcClient;
private FilerClient filerClient;
private Configuration conf;
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
int grpcPort = 10000 + port;
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
filerClient = new FilerClient(filerGrpcClient);
filerClient = new FilerClient(host, grpcPort);
this.conf = conf;
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
if (volumeServerAccessMode.equals("publicUrl")) {
filerGrpcClient.setAccessVolumeServerByPublicUrl();
filerClient.setAccessVolumeServerByPublicUrl();
} else if (volumeServerAccessMode.equals("filerProxy")) {
filerGrpcClient.setAccessVolumeServerByFilerProxy();
filerClient.setAccessVolumeServerByFilerProxy();
}
}
public void close() {
try {
this.filerGrpcClient.shutdown();
this.filerClient.shutdown();
} catch (InterruptedException e) {
e.printStackTrace();
}
@ -219,10 +217,10 @@ public class SeaweedFileSystemStore {
.clearGroupName()
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
);
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
}
return new SeaweedHadoopOutputStream(filerGrpcClient, path.toString(), entry, writePosition, bufferSize, replication);
return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication);
}
@ -236,7 +234,7 @@ public class SeaweedFileSystemStore {
throw new FileNotFoundException("read non-exist file " + path);
}
return new SeaweedHadoopInputStream(filerGrpcClient,
return new SeaweedHadoopInputStream(filerClient,
statistics,
path.toUri().getPath(),
entry);

6
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java

@ -5,7 +5,7 @@ package seaweed.hdfs;
import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileSystem.Statistics;
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerProto;
import seaweedfs.client.SeaweedInputStream;
@ -19,11 +19,11 @@ public class SeaweedHadoopInputStream extends FSInputStream implements ByteBuffe
private final Statistics statistics;
public SeaweedHadoopInputStream(
final FilerGrpcClient filerGrpcClient,
final FilerClient filerClient,
final Statistics statistics,
final String path,
final FilerProto.Entry entry) throws IOException {
this.seaweedInputStream = new SeaweedInputStream(filerGrpcClient, path, entry);
this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry);
this.statistics = statistics;
}

6
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java

@ -4,7 +4,7 @@ package seaweed.hdfs;
import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.fs.Syncable;
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerProto;
import seaweedfs.client.SeaweedOutputStream;
@ -13,9 +13,9 @@ import java.util.Locale;
public class SeaweedHadoopOutputStream extends SeaweedOutputStream implements Syncable, StreamCapabilities {
public SeaweedHadoopOutputStream(FilerGrpcClient filerGrpcClient, final String path, FilerProto.Entry.Builder entry,
public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
final long position, final int bufferSize, final String replication) {
super(filerGrpcClient, path, entry, position, bufferSize, replication);
super(filerClient, path, entry, position, bufferSize, replication);
}
/**

2
unmaintained/change_superblock/change_superblock.go

@ -92,7 +92,7 @@ func main() {
header := superBlock.Bytes()
if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil {
if n, e := datBackend.WriteAt(header, 0); n == 0 || e != nil {
glog.Fatalf("cannot write super block: %v", e)
}

2
unmaintained/diff_volume_servers/diff_volume_servers.go

@ -168,7 +168,7 @@ func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int6
size: size,
}
}
if actual := offset.ToAcutalOffset(); actual > maxOffset {
if actual := offset.ToActualOffset(); actual > maxOffset {
maxOffset = actual
}
return nil

2
unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go

@ -45,7 +45,7 @@ func main() {
defer wg.Done()
client := &http.Client{Transport: &http.Transport{
MaxConnsPerHost: 1024,
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
}}
r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x)))

2
unmaintained/volume_tailer/volume_tailer.go

@ -37,7 +37,7 @@ func main() {
sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
}
err := operation.TailVolume(*master, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
err := operation.TailVolume(func()string{return *master}, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
if n.Size == 0 {
println("-", n.String())
return nil

2
weed/command/backup.go

@ -72,7 +72,7 @@ func runBackup(cmd *Command, args []string) bool {
vid := needle.VolumeId(*s.volumeId)
// find volume location, replication, ttl info
lookup, err := operation.Lookup(*s.master, vid.String())
lookup, err := operation.Lookup(func() string { return *s.master }, vid.String())
if err != nil {
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
return true

9
weed/command/benchmark.go

@ -35,6 +35,7 @@ type BenchmarkOptions struct {
sequentialRead *bool
collection *string
replication *string
diskType *string
cpuprofile *string
maxCpu *int
grpcDialOption grpc.DialOption
@ -62,6 +63,7 @@ func init() {
b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file")
b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection")
b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
b.diskType = cmdBenchmark.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write")
@ -234,13 +236,14 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
Count: 1,
Collection: *b.collection,
Replication: *b.replication,
DiskType: *b.diskType,
}
if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil {
if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil {
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
if !isSecure && assignResult.Auth != "" {
isSecure = true
}
if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil {
if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil {
if random.Intn(100) < *b.deletePercentage {
s.total++
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
@ -290,7 +293,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
}
var bytes []byte
for _, url := range urls {
bytes, _, err = util.Get(url)
bytes, _, err = util.FastGet(url)
if err == nil {
break
}

2
weed/command/command.go

@ -1,8 +1,8 @@
package command
import (
"flag"
"fmt"
flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
"os"
"strings"
)

12
weed/command/download.go

@ -44,15 +44,15 @@ var cmdDownload = &Command{
func runDownload(cmd *Command, args []string) bool {
for _, fid := range args {
if e := downloadToFile(*d.server, fid, util.ResolvePath(*d.dir)); e != nil {
if e := downloadToFile(func() string { return *d.server }, fid, util.ResolvePath(*d.dir)); e != nil {
fmt.Println("Download Error: ", fid, e)
}
}
return true
}
func downloadToFile(server, fileId, saveDir string) error {
fileUrl, lookupError := operation.LookupFileId(server, fileId)
func downloadToFile(masterFn operation.GetMasterFn, fileId, saveDir string) error {
fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
if lookupError != nil {
return lookupError
}
@ -83,7 +83,7 @@ func downloadToFile(server, fileId, saveDir string) error {
fids := strings.Split(string(content), "\n")
for _, partId := range fids {
var n int
_, part, err := fetchContent(*d.server, partId)
_, part, err := fetchContent(masterFn, partId)
if err == nil {
n, err = f.Write(part)
}
@ -103,8 +103,8 @@ func downloadToFile(server, fileId, saveDir string) error {
return nil
}
func fetchContent(server string, fileId string) (filename string, content []byte, e error) {
fileUrl, lookupError := operation.LookupFileId(server, fileId)
func fetchContent(masterFn operation.GetMasterFn, fileId string) (filename string, content []byte, e error) {
fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
if lookupError != nil {
return "", nil, lookupError
}

2
weed/command/export.go

@ -113,7 +113,7 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
nv, ok := needleMap.Get(n.Id)
glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToAcutalOffset() == offset {
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)

4
weed/command/filer.go

@ -52,7 +52,7 @@ type FilerOptions struct {
func init() {
cmdFiler.Run = runFiler // break init cycle
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection")
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
@ -83,6 +83,8 @@ func init() {
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
filerWebDavOptions.replication = cmdFiler.Flag.String("webdav.replication", "", "replication to create the files")
filerWebDavOptions.disk = cmdFiler.Flag.String("webdav.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")

8
weed/command/filer_copy.go

@ -37,6 +37,7 @@ type CopyOptions struct {
replication *string
collection *string
ttl *string
diskType *string
maxMB *int
masterClient *wdclient.MasterClient
concurrenctFiles *int
@ -54,6 +55,7 @@ func init() {
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
@ -311,6 +313,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
DiskType: *worker.options.diskType,
Path: task.destinationUrlPath,
}
@ -405,6 +408,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
DiskType: *worker.options.diskType,
Path: task.destinationUrlPath + fileName,
}
@ -459,7 +463,9 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId)
}
operation.DeleteFiles(copy.masters[0], false, worker.options.grpcDialOption, fileIds)
operation.DeleteFiles(func() string {
return copy.masters[0]
}, false, worker.options.grpcDialOption, fileIds)
return uploadError
}

18
weed/command/filer_sync.go

@ -31,6 +31,8 @@ type SyncOptions struct {
bCollection *string
aTtlSec *int
bTtlSec *int
aDiskType *string
bDiskType *string
aDebug *bool
bDebug *bool
aProxyByFiler *bool
@ -56,6 +58,8 @@ func init() {
syncOptions.bCollection = cmdFilerSynchronize.Flag.String("b.collection", "", "collection on filer B")
syncOptions.aTtlSec = cmdFilerSynchronize.Flag.Int("a.ttlSec", 0, "ttl in seconds on filer A")
syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B")
syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag on filer A")
syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag on filer B")
syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers")
syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers")
syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files")
@ -90,9 +94,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
go func() {
for {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler,
*syncOptions.filerB, *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler,
*syncOptions.bDebug)
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
time.Sleep(1747 * time.Millisecond)
@ -103,9 +106,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
if !*syncOptions.isActivePassive {
go func() {
for {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler,
*syncOptions.filerA, *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler,
*syncOptions.aDebug)
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
time.Sleep(2147 * time.Millisecond)
@ -120,7 +122,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
}
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler, debug bool) error {
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
// read source filer signature
sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler)
@ -146,7 +148,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
filerSource := &source.FilerSource{}
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
filerSink := &filersink.FilerSink{}
filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, grpcDialOption, sinkWriteChunkByFiler)
filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
filerSink.SetSourceFiler(filerSource)
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {

2
weed/command/mount.go

@ -12,6 +12,7 @@ type MountOptions struct {
dirAutoCreate *bool
collection *string
replication *string
diskType *string
ttlSec *int
chunkSizeLimitMB *int
concurrentWriters *int
@ -41,6 +42,7 @@ func init() {
mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to")
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 128, "limit concurrent goroutine writers if not 0")

4
weed/command/mount_std.go

@ -5,6 +5,7 @@ package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"os"
"os/user"
"path"
@ -168,6 +169,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
mountRoot = mountRoot[0 : len(mountRoot)-1]
}
diskType := types.ToDiskType(*option.diskType)
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
MountDirectory: dir,
FilerAddress: filer,
@ -177,6 +180,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
Collection: *option.collection,
Replication: *option.replication,
TtlSec: int32(*option.ttlSec),
DiskType: diskType,
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
ConcurrentWriters: *option.concurrentWriters,
CacheDir: *option.cacheDir,

6
weed/command/scaffold.go

@ -124,7 +124,7 @@ interpolateParams = false
[mysql2] # or memsql, tidb
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS %s (
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
dirhash BIGINT,
name VARCHAR(1000),
directory TEXT,
@ -160,11 +160,12 @@ schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
[postgres2]
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS %s (
CREATE TABLE IF NOT EXISTS "%s" (
dirhash BIGINT,
name VARCHAR(65535),
directory VARCHAR(65535),
@ -181,6 +182,7 @@ schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
[cassandra]
# CREATE TABLE filemeta (

3
weed/command/server.go

@ -102,6 +102,7 @@ func init() {
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
@ -120,6 +121,8 @@ func init() {
webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
webdavOptions.replication = cmdServer.Flag.String("webdav.replication", "", "replication to create the files")
webdavOptions.disk = cmdServer.Flag.String("webdav.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file")
webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")

6
weed/command/upload.go

@ -27,6 +27,7 @@ type UploadOptions struct {
collection *string
dataCenter *string
ttl *string
diskType *string
maxMB *int
usePublicUrl *bool
}
@ -40,6 +41,7 @@ func init() {
upload.replication = cmdUpload.Flag.String("replication", "", "replication type")
upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name")
upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name")
upload.diskType = cmdUpload.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit")
upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server")
@ -94,7 +96,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
return e
}
results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
results, e := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
if e != nil {
@ -111,7 +113,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
fmt.Println(e.Error())
}
results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
results, _ := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
}

20
weed/command/volume.go

@ -2,6 +2,7 @@ package command
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"net/http"
httppprof "net/http/pprof"
"os"
@ -49,6 +50,7 @@ type VolumeServerOptions struct {
rack *string
whiteList []string
indexType *string
diskType *string
fixJpgOrientation *bool
readRedirect *bool
cpuProfile *string
@ -76,6 +78,7 @@ func init() {
v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name")
v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name")
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
v.diskType = cmdVolume.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
@ -167,6 +170,21 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
}
// set disk types
var diskTypes []types.DiskType
diskTypeStrings := strings.Split(*v.diskType, ",")
for _, diskTypeString := range diskTypeStrings {
diskTypes = append(diskTypes, types.ToDiskType(diskTypeString))
}
if len(diskTypes) == 1 && len(v.folders) > 1 {
for i := 0; i < len(v.folders)-1; i++ {
diskTypes = append(diskTypes, diskTypes[0])
}
}
if len(v.folders) != len(diskTypes) {
glog.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes))
}
// security related white list configuration
if volumeWhiteListOption != "" {
v.whiteList = strings.Split(volumeWhiteListOption, ",")
@ -212,7 +230,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
*v.ip, *v.port, *v.publicUrl,
v.folders, v.folderMaxLimits, v.minFreeSpacePercents,
v.folders, v.folderMaxLimits, v.minFreeSpacePercents, diskTypes,
*v.idxFolder,
volumeNeedleMapKind,
strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,

6
weed/command/webdav.go

@ -25,6 +25,8 @@ type WebDavOption struct {
filer *string
port *int
collection *string
replication *string
disk *string
tlsPrivateKey *string
tlsCertificate *string
cacheDir *string
@ -36,6 +38,8 @@ func init() {
webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address")
webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port")
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
webDavStandaloneOptions.replication = cmdWebDav.Flag.String("replication", "", "replication to create the files")
webDavStandaloneOptions.disk = cmdWebDav.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
@ -107,6 +111,8 @@ func (wo *WebDavOption) startWebDav() bool {
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: grpcDialOption,
Collection: *wo.collection,
Replication: *wo.replication,
DiskType: *wo.disk,
Uid: uid,
Gid: gid,
Cipher: cipher,

2
weed/filer/abstract_sql/abstract_sql_store.go

@ -107,7 +107,7 @@ func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.Full
}
if _, found := store.dbs[bucket]; !found {
if err = store.CreateTable(ctx, bucket); err != nil {
if err = store.CreateTable(ctx, bucket); err == nil {
store.dbs[bucket] = true
}
}

1
weed/filer/entry.go

@ -18,6 +18,7 @@ type Attr struct {
Replication string // replication
Collection string // collection name
TtlSec int32 // ttl in seconds
DiskType string
UserName string
GroupNames []string
SymlinkTarget string

2
weed/filer/entry_codec.go

@ -56,6 +56,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
Collection: entry.Attr.Collection,
Replication: entry.Attr.Replication,
TtlSec: entry.Attr.TtlSec,
DiskType: entry.Attr.DiskType,
UserName: entry.Attr.UserName,
GroupName: entry.Attr.GroupNames,
SymlinkTarget: entry.Attr.SymlinkTarget,
@ -81,6 +82,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
t.Collection = attr.Collection
t.Replication = attr.Replication
t.TtlSec = attr.TtlSec
t.DiskType = attr.DiskType
t.UserName = attr.UserName
t.GroupNames = attr.GroupName
t.SymlinkTarget = attr.SymlinkTarget

2
weed/filer/filechunk_manifest.go

@ -102,7 +102,7 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
for _, urlString := range urlStrings {
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
shouldRetry, err = util.FastReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
buffer.Write(data)
})
if !shouldRetry {

18
weed/filer/filer_conf.go

@ -46,17 +46,19 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
return fc.LoadFromBytes(entry.Content)
}
return fc.loadFromChunks(filer, entry.Chunks)
return fc.loadFromChunks(filer, entry.Content, entry.Chunks)
}
func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) {
data, err := filer.readEntry(chunks)
if err != nil {
glog.Errorf("read filer conf content: %v", err)
return
func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk) (err error) {
if len(content) == 0 {
content, err = filer.readEntry(chunks)
if err != nil {
glog.Errorf("read filer conf content: %v", err)
return
}
}
return fc.LoadFromBytes(data)
return fc.LoadFromBytes(content)
}
func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
@ -116,7 +118,7 @@ func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
a.Collection = util.Nvl(b.Collection, a.Collection)
a.Replication = util.Nvl(b.Replication, a.Replication)
a.Ttl = util.Nvl(b.Ttl, a.Ttl)
if b.DiskType != filer_pb.FilerConf_PathConf_NONE {
if b.DiskType != "" {
a.DiskType = b.DiskType
}
a.Fsync = b.Fsync || a.Fsync

5
weed/filer/filer_notify.go

@ -55,7 +55,10 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry
if notification.Queue != nil {
glog.V(3).Infof("notifying entry update %v", fullpath)
notification.Queue.SendMessage(fullpath, eventNotification)
if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
// throw message
glog.Error(err)
}
}
f.logMetaEvent(ctx, fullpath, eventNotification)

2
weed/filer/filer_notify_append.go

@ -56,7 +56,7 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
WritableVolumeCount: rule.VolumeGrowthCount,
}
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
assignResult, err := operation.Assign(f.GetMaster, f.GrpcDialOption, assignRequest)
if err != nil {
return nil, nil, fmt.Errorf("AssignVolume: %v", err)
}

2
weed/filer/filer_on_meta_event.go

@ -40,7 +40,7 @@ func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) {
func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
fc := NewFilerConf()
err := fc.loadFromChunks(f, entry.Chunks)
err := fc.loadFromChunks(f, entry.Content, entry.Chunks)
if err != nil {
glog.Errorf("read filer conf chunks: %v", err)
return

14
weed/filer/mysql/mysql_sql_gen.go

@ -16,31 +16,31 @@ var (
)
func (gen *SqlGenMysql) GetSqlInsert(bucket string) string {
return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES(?,?,?,?)", bucket)
return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", bucket)
}
func (gen *SqlGenMysql) GetSqlUpdate(bucket string) string {
return fmt.Sprintf("UPDATE %s SET meta=? WHERE dirhash=? AND name=? AND directory=?", bucket)
return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", bucket)
}
func (gen *SqlGenMysql) GetSqlFind(bucket string) string {
return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket)
return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", bucket)
}
func (gen *SqlGenMysql) GetSqlDelete(bucket string) string {
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket)
return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", bucket)
}
func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(bucket string) string {
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND directory=?", bucket)
return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", bucket)
}
func (gen *SqlGenMysql) GetSqlListExclusive(bucket string) string {
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
}
func (gen *SqlGenMysql) GetSqlListInclusive(bucket string) string {
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
}
func (gen *SqlGenMysql) GetSqlCreateTable(bucket string) string {

6
weed/filer/mysql/mysql_store.go

@ -47,12 +47,14 @@ func (store *MysqlStore) initialize(user, password, hostname string, port int, d
store.SupportBucketTable = false
store.SqlGenerator = &SqlGenMysql{
CreateTableSqlTemplate: "",
DropTableSqlTemplate: "drop table %s",
DropTableSqlTemplate: "drop table `%s`",
}
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "<ADAPTED>", hostname, port, database)
if interpolateParams {
sqlUrl += "&interpolateParams=true"
adaptedSqlUrl += "&interpolateParams=true"
}
var dbErr error
@ -60,7 +62,7 @@ func (store *MysqlStore) initialize(user, password, hostname string, port int, d
if dbErr != nil {
store.DB.Close()
store.DB = nil
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
}
store.DB.SetMaxIdleConns(maxIdle)

6
weed/filer/mysql2/mysql2_store.go

@ -50,12 +50,14 @@ func (store *MysqlStore2) initialize(createTable, user, password, hostname strin
store.SupportBucketTable = true
store.SqlGenerator = &mysql.SqlGenMysql{
CreateTableSqlTemplate: createTable,
DropTableSqlTemplate: "drop table %s",
DropTableSqlTemplate: "drop table `%s`",
}
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "<ADAPTED>", hostname, port, database)
if interpolateParams {
sqlUrl += "&interpolateParams=true"
adaptedSqlUrl += "&interpolateParams=true"
}
var dbErr error
@ -63,7 +65,7 @@ func (store *MysqlStore2) initialize(createTable, user, password, hostname strin
if dbErr != nil {
store.DB.Close()
store.DB = nil
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
}
store.DB.SetMaxIdleConns(maxIdle)

14
weed/filer/postgres/postgres_sql_gen.go

@ -17,31 +17,31 @@ var (
)
func (gen *SqlGenPostgres) GetSqlInsert(bucket string) string {
return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)", bucket)
return fmt.Sprintf(`INSERT INTO "%s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)`, bucket)
}
func (gen *SqlGenPostgres) GetSqlUpdate(bucket string) string {
return fmt.Sprintf("UPDATE %s SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4", bucket)
return fmt.Sprintf(`UPDATE "%s" SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4`, bucket)
}
func (gen *SqlGenPostgres) GetSqlFind(bucket string) string {
return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket)
return fmt.Sprintf(`SELECT meta FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, bucket)
}
func (gen *SqlGenPostgres) GetSqlDelete(bucket string) string {
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket)
return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, bucket)
}
func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(bucket string) string {
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND directory=$2", bucket)
return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND directory=$2`, bucket)
}
func (gen *SqlGenPostgres) GetSqlListExclusive(bucket string) string {
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket)
return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, bucket)
}
func (gen *SqlGenPostgres) GetSqlListInclusive(bucket string) string {
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket)
return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, bucket)
}
func (gen *SqlGenPostgres) GetSqlCreateTable(bucket string) string {

13
weed/filer/postgres/postgres_store.go

@ -3,6 +3,7 @@ package postgres
import (
"database/sql"
"fmt"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
@ -37,40 +38,46 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix
configuration.GetString(prefix+"sslmode"),
configuration.GetInt(prefix+"connection_max_idle"),
configuration.GetInt(prefix+"connection_max_open"),
configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
)
}
func (store *PostgresStore) initialize(user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) {
func (store *PostgresStore) initialize(user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) {
store.SupportBucketTable = false
store.SqlGenerator = &SqlGenPostgres{
CreateTableSqlTemplate: "",
DropTableSqlTemplate: "drop table %s",
DropTableSqlTemplate: `drop table "%s"`,
}
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
if user != "" {
sqlUrl += " user=" + user
}
adaptedSqlUrl := sqlUrl
if password != "" {
sqlUrl += " password=" + password
adaptedSqlUrl += " password=ADAPTED"
}
if database != "" {
sqlUrl += " dbname=" + database
adaptedSqlUrl += " dbname=" + database
}
if schema != "" {
sqlUrl += " search_path=" + schema
adaptedSqlUrl += " search_path=" + schema
}
var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
store.DB.Close()
store.DB = nil
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
}
store.DB.SetMaxIdleConns(maxIdle)
store.DB.SetMaxOpenConns(maxOpen)
store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
if err = store.DB.Ping(); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)

13
weed/filer/postgres2/postgres2_store.go

@ -4,6 +4,7 @@ import (
"context"
"database/sql"
"fmt"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
@ -40,40 +41,46 @@ func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix
configuration.GetString(prefix+"sslmode"),
configuration.GetInt(prefix+"connection_max_idle"),
configuration.GetInt(prefix+"connection_max_open"),
configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
)
}
func (store *PostgresStore2) initialize(createTable, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) {
func (store *PostgresStore2) initialize(createTable, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) {
store.SupportBucketTable = true
store.SqlGenerator = &postgres.SqlGenPostgres{
CreateTableSqlTemplate: createTable,
DropTableSqlTemplate: "drop table %s",
DropTableSqlTemplate: `drop table "%s"`,
}
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
if user != "" {
sqlUrl += " user=" + user
}
adaptedSqlUrl := sqlUrl
if password != "" {
sqlUrl += " password=" + password
adaptedSqlUrl += " password=ADAPTED"
}
if database != "" {
sqlUrl += " dbname=" + database
adaptedSqlUrl += " dbname=" + database
}
if schema != "" {
sqlUrl += " search_path=" + schema
adaptedSqlUrl += " search_path=" + schema
}
var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
store.DB.Close()
store.DB = nil
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
}
store.DB.SetMaxIdleConns(maxIdle)
store.DB.SetMaxOpenConns(maxOpen)
store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
if err = store.DB.Ping(); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)

2
weed/filer/read_write.go

@ -35,7 +35,7 @@ func ReadContent(filerAddress string, dir, name string) ([]byte, error) {
target := fmt.Sprintf("http://%s%s/%s", filerAddress, dir, name)
data, _, err := util.Get(target)
data, _, err := util.FastGet(target)
return data, err
}

2
weed/filer/stream.go

@ -181,7 +181,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
var buffer bytes.Buffer
var shouldRetry bool
for _, urlString := range urlStrings {
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
shouldRetry, err = util.FastReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
buffer.Write(data)
})
if !shouldRetry {

9
weed/filesys/filehandle.go

@ -192,16 +192,13 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
if fh.f.isOpen == 1 {
if err := fh.doFlush(ctx, req.Header); err != nil {
glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
return err
}
fh.f.isOpen--
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
if closer, ok := fh.f.reader.(io.Closer); ok {
closer.Close()
if closer != nil {
closer.Close()
}
}
fh.f.reader = nil
}

3
weed/filesys/wfs.go

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"math"
"os"
@ -34,6 +35,7 @@ type Option struct {
Collection string
Replication string
TtlSec int32
DiskType types.DiskType
ChunkSizeLimit int64
ConcurrentWriters int
CacheDir string
@ -194,6 +196,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
Collection: wfs.option.Collection,
Replication: wfs.option.Replication,
Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec),
DiskType: string(wfs.option.DiskType),
}
glog.V(4).Infof("reading filer stats: %+v", request)

1
weed/filesys/wfs_write.go

@ -26,6 +26,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
Replication: wfs.option.Replication,
Collection: wfs.option.Collection,
TtlSec: wfs.option.TtlSec,
DiskType: string(wfs.option.DiskType),
DataCenter: wfs.option.DataCenter,
Path: string(fullPath),
}

2
weed/glog/glog.go

@ -74,8 +74,8 @@ import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
"io"
stdLog "log"
"os"

2
weed/glog/glog_file.go

@ -20,8 +20,8 @@ package glog
import (
"errors"
"flag"
"fmt"
flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
"os"
"os/user"
"path/filepath"

35
weed/notification/gocdk_pub_sub/gocdk_pub_sub.go

@ -17,10 +17,14 @@ package gocdk_pub_sub
import (
"context"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/streadway/amqp"
"gocloud.dev/pubsub"
_ "gocloud.dev/pubsub/awssnssqs"
"gocloud.dev/pubsub/rabbitpubsub"
"net/url"
"path"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
@ -29,12 +33,18 @@ import (
_ "gocloud.dev/pubsub/gcppubsub"
_ "gocloud.dev/pubsub/natspubsub"
_ "gocloud.dev/pubsub/rabbitpubsub"
"os"
)
func init() {
notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{})
}
func getPath(rawUrl string) string {
parsedUrl, _ := url.Parse(rawUrl)
return path.Join(parsedUrl.Host, parsedUrl.Path)
}
type GoCDKPubSub struct {
topicURL string
topic *pubsub.Topic
@ -44,6 +54,28 @@ func (k *GoCDKPubSub) GetName() string {
return "gocdk_pub_sub"
}
func (k *GoCDKPubSub) doReconnect() {
var conn *amqp.Connection
if k.topic.As(&conn) {
go func() {
<-conn.NotifyClose(make(chan *amqp.Error))
conn.Close()
k.topic.Shutdown(context.Background())
for {
glog.Info("Try reconnect")
conn, err := amqp.Dial(os.Getenv("RABBIT_SERVER_URL"))
if err == nil {
k.topic = rabbitpubsub.OpenTopic(conn, getPath(k.topicURL), nil)
k.doReconnect()
break
}
glog.Error(err)
time.Sleep(time.Second)
}
}()
}
}
func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error {
k.topicURL = configuration.GetString(prefix + "topic_url")
glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
@ -52,6 +84,7 @@ func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string
glog.Fatalf("Failed to open topic: %v", err)
}
k.topic = topic
k.doReconnect()
return nil
}

9
weed/operation/assign_file_id.go

@ -18,6 +18,7 @@ type VolumeAssignRequest struct {
Replication string
Collection string
Ttl string
DiskType string
DataCenter string
Rack string
DataNode string
@ -33,7 +34,7 @@ type AssignResult struct {
Auth security.EncodedJwt `json:"auth,omitempty"`
}
func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
var requests []*VolumeAssignRequest
requests = append(requests, primaryRequest)
@ -47,13 +48,14 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum
continue
}
lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
lastError = WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.AssignRequest{
Count: request.Count,
Replication: request.Replication,
Collection: request.Collection,
Ttl: request.Ttl,
DiskType: request.DiskType,
DataCenter: request.DataCenter,
Rack: request.Rack,
DataNode: request.DataNode,
@ -105,6 +107,7 @@ func LookupJwt(master string, fileId string) security.EncodedJwt {
type StorageOption struct {
Replication string
DiskType string
Collection string
DataCenter string
Rack string
@ -123,6 +126,7 @@ func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, a
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DiskType: so.DiskType,
DataCenter: so.DataCenter,
Rack: so.Rack,
WritableVolumeCount: so.VolumeGrowthCount,
@ -133,6 +137,7 @@ func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, a
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DiskType: so.DiskType,
DataCenter: "",
Rack: "",
WritableVolumeCount: so.VolumeGrowthCount,

8
weed/operation/chunked_file.go

@ -72,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) {
return json.Marshal(cm)
}
func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDialOption grpc.DialOption) error {
func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption) error {
var fileIds []string
for _, ci := range cm.Chunks {
fileIds = append(fileIds, ci.Fid)
}
results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds)
results, err := DeleteFiles(masterFn, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
@ -174,7 +174,9 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
for ; chunkIndex < len(cf.chunkList); chunkIndex++ {
ci := cf.chunkList[chunkIndex]
// if we need read date from local volume server first?
fileUrl, lookupError := LookupFileId(cf.master, ci.Fid)
fileUrl, lookupError := LookupFileId(func() string {
return cf.master
}, ci.Fid)
if lookupError != nil {
return n, lookupError
}

4
weed/operation/delete_content.go

@ -28,10 +28,10 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) {
}
// DeleteFiles batch deletes a list of fileIds
func DeleteFiles(master string, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
func DeleteFiles(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
lookupFunc := func(vids []string) (results map[string]LookupResult, err error) {
results, err = LookupVolumeIds(master, grpcDialOption, vids)
results, err = LookupVolumeIds(masterFn, grpcDialOption, vids)
if err == nil && usePublicUrl {
for _, result := range results {
for _, loc := range result.Locations {

15
weed/operation/lookup.go

@ -33,10 +33,10 @@ var (
vc VidCache // caching of volume locations, re-check if after 10 minutes
)
func Lookup(server string, vid string) (ret *LookupResult, err error) {
func Lookup(masterFn GetMasterFn, vid string) (ret *LookupResult, err error) {
locations, cache_err := vc.Get(vid)
if cache_err != nil {
if ret, err = do_lookup(server, vid); err == nil {
if ret, err = do_lookup(masterFn, vid); err == nil {
vc.Set(vid, ret.Locations, 10*time.Minute)
}
} else {
@ -45,9 +45,10 @@ func Lookup(server string, vid string) (ret *LookupResult, err error) {
return
}
func do_lookup(server string, vid string) (*LookupResult, error) {
func do_lookup(masterFn GetMasterFn, vid string) (*LookupResult, error) {
values := make(url.Values)
values.Add("volumeId", vid)
server := masterFn()
jsonBlob, err := util.Post("http://"+server+"/dir/lookup", values)
if err != nil {
return nil, err
@ -63,12 +64,12 @@ func do_lookup(server string, vid string) (*LookupResult, error) {
return &ret, nil
}
func LookupFileId(server string, fileId string) (fullUrl string, err error) {
func LookupFileId(masterFn GetMasterFn, fileId string) (fullUrl string, err error) {
parts := strings.Split(fileId, ",")
if len(parts) != 2 {
return "", errors.New("Invalid fileId " + fileId)
}
lookup, lookupError := Lookup(server, parts[0])
lookup, lookupError := Lookup(masterFn, parts[0])
if lookupError != nil {
return "", lookupError
}
@ -79,7 +80,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) {
}
// LookupVolumeIds find volume locations by cache and actual lookup
func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
func LookupVolumeIds(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
ret := make(map[string]LookupResult)
var unknown_vids []string
@ -99,7 +100,7 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin
//only query unknown_vids
err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
err := WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.LookupVolumeRequest{
VolumeIds: unknown_vids,

29
weed/operation/submit.go

@ -25,6 +25,7 @@ type FilePart struct {
Collection string
DataCenter string
Ttl string
DiskType string
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
Fsync bool
@ -38,7 +39,9 @@ type SubmitResult struct {
Error string `json:"error,omitempty"`
}
func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) {
type GetMasterFn func() string
func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, diskType string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files))
for index, file := range files {
results[index].FileName = file.FileName
@ -49,8 +52,9 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
Collection: collection,
DataCenter: dataCenter,
Ttl: ttl,
DiskType: diskType,
}
ret, err := Assign(master, grpcDialOption, ar)
ret, err := Assign(masterFn, grpcDialOption, ar)
if err != nil {
for index := range files {
results[index].Error = err.Error()
@ -70,7 +74,8 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
file.Collection = collection
file.DataCenter = dataCenter
file.Ttl = ttl
results[index].Size, err = file.Upload(maxMB, master, usePublicUrl, ret.Auth, grpcDialOption)
file.DiskType = diskType
results[index].Size, err = file.Upload(maxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption)
if err != nil {
results[index].Error = err.Error()
}
@ -113,7 +118,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) {
return ret, nil
}
func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
fileUrl := "http://" + fi.Server + "/" + fi.Fid
if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
@ -143,8 +148,9 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
Replication: fi.Replication,
Collection: fi.Collection,
Ttl: fi.Ttl,
DiskType: fi.DiskType,
}
ret, err = Assign(master, grpcDialOption, ar)
ret, err = Assign(masterFn, grpcDialOption, ar)
if err != nil {
return
}
@ -156,11 +162,12 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
Replication: fi.Replication,
Collection: fi.Collection,
Ttl: fi.Ttl,
DiskType: fi.DiskType,
}
ret, err = Assign(master, grpcDialOption, ar)
ret, err = Assign(masterFn, grpcDialOption, ar)
if err != nil {
// delete all uploaded chunks
cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
return
}
id = ret.Fid
@ -177,11 +184,11 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
count, e := upload_one_chunk(
baseName+"-"+strconv.FormatInt(i+1, 10),
io.LimitReader(fi.Reader, chunkSize),
master, fileUrl,
masterFn, fileUrl,
ret.Auth)
if e != nil {
// delete all uploaded chunks
cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
return 0, e
}
cm.Chunks = append(cm.Chunks,
@ -196,7 +203,7 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
if err != nil {
// delete all uploaded chunks
cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
}
} else {
ret, e, _ := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt)
@ -208,7 +215,7 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
return
}
func upload_one_chunk(filename string, reader io.Reader, master,
func upload_one_chunk(filename string, reader io.Reader, masterFn GetMasterFn,
fileUrl string, jwt security.EncodedJwt,
) (size uint32, e error) {
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")

4
weed/operation/tail_volume.go

@ -11,9 +11,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
func TailVolume(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
// find volume location, replication, ttl info
lookup, err := Lookup(master, vid.String())
lookup, err := Lookup(masterFn, vid.String())
if err != nil {
return fmt.Errorf("look up volume %d: %v", vid, err)
}

37
weed/operation/upload_content.go

@ -58,11 +58,12 @@ var (
func init() {
HttpClient = &http.Client{Transport: &http.Transport{
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
}}
}
var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
// Upload sends a POST request to a volume server to upload the content with adjustable compression level
func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
@ -99,6 +100,7 @@ func retriedUploadData(uploadUrl string, filename string, cipher bool, data []by
} else {
glog.Warningf("uploading to %s: %v", uploadUrl, err)
}
time.Sleep(time.Millisecond * time.Duration(237*(i+1)))
}
return
}
@ -128,7 +130,8 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i
// gzip if possible
// this could be double copying
clearDataLen = len(data)
if shouldGzipNow {
clearData := data
if shouldGzipNow && !cipher {
compressed, compressErr := util.GzipData(data)
// fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed))
if compressErr == nil {
@ -137,7 +140,7 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i
}
} else if isInputCompressed {
// just to get the clear data length
clearData, err := util.DecompressData(data)
clearData, err = util.DecompressData(data)
if err == nil {
clearDataLen = len(clearData)
}
@ -148,7 +151,7 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i
// encrypt
cipherKey := util.GenCipherKey()
encryptedData, encryptionErr := util.Encrypt(data, cipherKey)
encryptedData, encryptionErr := util.Encrypt(clearData, cipherKey)
if encryptionErr != nil {
err = fmt.Errorf("encrypt input: %v", encryptionErr)
return
@ -159,26 +162,26 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i
_, err = w.Write(encryptedData)
return
}, "", false, len(encryptedData), "", nil, jwt)
if uploadResult != nil {
uploadResult.Name = filename
uploadResult.Mime = mtype
uploadResult.CipherKey = cipherKey
if uploadResult == nil {
return
}
uploadResult.Name = filename
uploadResult.Mime = mtype
uploadResult.CipherKey = cipherKey
uploadResult.Size = uint32(clearDataLen)
} else {
// upload data
uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) {
_, err = w.Write(data)
return
}, filename, contentIsGzipped, len(data), mtype, pairMap, jwt)
}
if uploadResult == nil {
return
}
uploadResult.Size = uint32(clearDataLen)
if contentIsGzipped {
uploadResult.Gzip = 1
if uploadResult == nil {
return
}
uploadResult.Size = uint32(clearDataLen)
if contentIsGzipped {
uploadResult.Gzip = 1
}
}
return uploadResult, err

13
weed/pb/filer.proto

@ -156,6 +156,7 @@ message FuseAttributes {
repeated string group_name = 12; // for hdfs
string symlink_target = 13;
bytes md5 = 14;
string disk_type = 15;
}
message CreateEntryRequest {
@ -220,6 +221,7 @@ message AssignVolumeRequest {
string data_center = 5;
string path = 6;
string rack = 7;
string disk_type = 8;
}
message AssignVolumeResponse {
@ -270,11 +272,9 @@ message StatisticsRequest {
string replication = 1;
string collection = 2;
string ttl = 3;
string disk_type = 4;
}
message StatisticsResponse {
string replication = 1;
string collection = 2;
string ttl = 3;
uint64 total_size = 4;
uint64 used_size = 5;
uint64 file_count = 6;
@ -358,12 +358,7 @@ message FilerConf {
string collection = 2;
string replication = 3;
string ttl = 4;
enum DiskType {
NONE = 0;
HDD = 1;
SSD = 2;
}
DiskType disk_type = 5;
string disk_type = 5;
bool fsync = 6;
uint32 volume_growth_count = 7;
}

1086
weed/pb/filer_pb/filer.pb.go
File diff suppressed because it is too large
View File

7
weed/pb/grpc_client_server.go

@ -29,14 +29,16 @@ var (
func init() {
http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024
http.DefaultTransport.(*http.Transport).MaxIdleConns = 1024
}
func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
var options []grpc.ServerOption
options = append(options,
grpc.KeepaliveParams(keepalive.ServerParameters{
Time: 10 * time.Second, // wait time before ping if no activity
Timeout: 20 * time.Second, // ping timeout
Time: 10 * time.Second, // wait time before ping if no activity
Timeout: 20 * time.Second, // ping timeout
MaxConnectionAge: 10 * time.Hour,
}),
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: 60 * time.Second, // min time a client should wait before sending a ping
@ -62,7 +64,6 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr
grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(Max_Message_Size),
grpc.MaxCallRecvMsgSize(Max_Message_Size),
grpc.WaitForReady(true),
),
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 30 * time.Second, // client ping server if no activity for this long

43
weed/pb/master.proto

@ -44,7 +44,6 @@ message Heartbeat {
string ip = 1;
uint32 port = 2;
string public_url = 3;
uint32 max_volume_count = 4;
uint64 max_file_key = 5;
string data_center = 6;
string rack = 7;
@ -62,6 +61,8 @@ message Heartbeat {
repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
bool has_no_ec_shards = 19;
map<string, uint32> max_volume_counts = 4;
}
message HeartbeatResponse {
@ -87,6 +88,7 @@ message VolumeInformationMessage {
int64 modified_at_second = 12;
string remote_storage_name = 13;
string remote_storage_key = 14;
string disk_type = 15;
}
message VolumeShortInformationMessage {
@ -95,12 +97,14 @@ message VolumeShortInformationMessage {
uint32 replica_placement = 8;
uint32 version = 9;
uint32 ttl = 10;
string disk_type = 15;
}
message VolumeEcShardInformationMessage {
uint32 id = 1;
string collection = 2;
uint32 ec_index_bits = 3;
string disk_type = 4;
}
message StorageBackend {
@ -163,6 +167,7 @@ message AssignRequest {
string data_node = 7;
uint32 memory_map_max_size_mb = 8;
uint32 Writable_volume_count = 9;
string disk_type = 10;
}
message AssignResponse {
string fid = 1;
@ -177,11 +182,9 @@ message StatisticsRequest {
string replication = 1;
string collection = 2;
string ttl = 3;
string disk_type = 4;
}
message StatisticsResponse {
string replication = 1;
string collection = 2;
string ttl = 3;
uint64 total_size = 4;
uint64 used_size = 5;
uint64 file_count = 6;
@ -210,8 +213,8 @@ message CollectionDeleteResponse {
//
// volume related
//
message DataNodeInfo {
string id = 1;
message DiskInfo {
string type = 1;
uint64 volume_count = 2;
uint64 max_volume_count = 3;
uint64 free_volume_count = 4;
@ -220,32 +223,24 @@ message DataNodeInfo {
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
uint64 remote_volume_count = 8;
}
message DataNodeInfo {
string id = 1;
map<string, DiskInfo> diskInfos = 2;
}
message RackInfo {
string id = 1;
uint64 volume_count = 2;
uint64 max_volume_count = 3;
uint64 free_volume_count = 4;
uint64 active_volume_count = 5;
repeated DataNodeInfo data_node_infos = 6;
uint64 remote_volume_count = 7;
repeated DataNodeInfo data_node_infos = 2;
map<string, DiskInfo> diskInfos = 3;
}
message DataCenterInfo {
string id = 1;
uint64 volume_count = 2;
uint64 max_volume_count = 3;
uint64 free_volume_count = 4;
uint64 active_volume_count = 5;
repeated RackInfo rack_infos = 6;
uint64 remote_volume_count = 7;
repeated RackInfo rack_infos = 2;
map<string, DiskInfo> diskInfos = 3;
}
message TopologyInfo {
string id = 1;
uint64 volume_count = 2;
uint64 max_volume_count = 3;
uint64 free_volume_count = 4;
uint64 active_volume_count = 5;
repeated DataCenterInfo data_center_infos = 6;
uint64 remote_volume_count = 7;
repeated DataCenterInfo data_center_infos = 2;
map<string, DiskInfo> diskInfos = 3;
}
message VolumeListRequest {
}

1658
weed/pb/master_pb/master.pb.go
File diff suppressed because it is too large
View File

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save