Browse Source

Merge pull request #87 from chrislusf/master

sync
pull/2469/head
hilimd 3 years ago
committed by GitHub
parent
commit
34240606f7
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 13
      .github/workflows/binaries_dev.yml
  2. 8
      backers.md
  3. 2
      docker/Dockerfile.go_build
  4. 2
      docker/Dockerfile.go_build_large
  5. 4
      k8s/helm_charts2/Chart.yaml
  6. 5
      k8s/helm_charts2/templates/filer-statefulset.yaml
  7. 2
      k8s/helm_charts2/values.yaml
  8. 4
      other/java/client/pom.xml
  9. 4
      other/java/client/pom.xml.deploy
  10. 4
      other/java/client/pom_debug.xml
  11. 6
      other/java/client/src/main/java/seaweedfs/client/RemoteUtil.java
  12. 6
      other/java/client/src/main/proto/filer.proto
  13. 4
      other/java/examples/pom.xml
  14. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  15. 2
      other/java/hdfs2/pom.xml
  16. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  17. 2
      other/java/hdfs3/pom.xml
  18. 5
      weed/Makefile
  19. 264
      weed/cluster/cluster.go
  20. 47
      weed/cluster/cluster_test.go
  21. 9
      weed/command/filer.go
  22. 2
      weed/command/filer_meta_tail.go
  23. 1
      weed/command/server.go
  24. 7
      weed/command/shell.go
  25. 4
      weed/filer/filechunk_manifest.go
  26. 2
      weed/filer/filechunks_read.go
  27. 8
      weed/filer/filechunks_read_test.go
  28. 46
      weed/filer/filer.go
  29. 63
      weed/filer/filer_delete_entry.go
  30. 53
      weed/filer/meta_aggregator.go
  31. 4
      weed/filer/read_remote.go
  32. 4
      weed/filer/redis2/universal_redis_store.go
  33. 3
      weed/filesys/dir_rename.go
  34. 6
      weed/filesys/file.go
  35. 8
      weed/iamapi/iamapi_handlers.go
  36. 22
      weed/iamapi/iamapi_management_handlers.go
  37. 2
      weed/iamapi/iamapi_server.go
  38. 9
      weed/messaging/broker/broker_grpc_server_discovery.go
  39. 6
      weed/pb/filer.proto
  40. 221
      weed/pb/filer_pb/filer.pb.go
  41. 394
      weed/pb/master.proto
  42. 1533
      weed/pb/master_pb/master.pb.go
  43. 5
      weed/remote_storage/s3/s3_storage_client.go
  44. 20
      weed/s3api/auth_credentials.go
  45. 47
      weed/s3api/s3api_bucket_handlers.go
  46. 8
      weed/s3api/s3api_handlers.go
  47. 30
      weed/s3api/s3api_object_copy_handlers.go
  48. 32
      weed/s3api/s3api_object_handlers.go
  49. 30
      weed/s3api/s3api_object_handlers_postpolicy.go
  50. 40
      weed/s3api/s3api_object_multipart_handlers.go
  51. 24
      weed/s3api/s3api_object_tagging_handlers.go
  52. 24
      weed/s3api/s3api_objects_list_handlers.go
  53. 4
      weed/s3api/s3api_server.go
  54. 2
      weed/s3api/s3api_status_handlers.go
  55. 24
      weed/s3api/s3err/error_handler.go
  56. 3
      weed/s3api/stats.go
  57. 4
      weed/server/filer_grpc_server_remote.go
  58. 2
      weed/server/filer_grpc_server_rename.go
  59. 5
      weed/server/filer_server.go
  60. 4
      weed/server/filer_server_handlers_read.go
  61. 59
      weed/server/master_grpc_server.go
  62. 21
      weed/server/master_grpc_server_cluster.go
  63. 10
      weed/server/master_server.go
  64. 2
      weed/server/volume_grpc_copy.go
  65. 7
      weed/server/volume_grpc_tier_upload.go
  66. 2
      weed/server/volume_grpc_vacuum.go
  67. 55
      weed/shell/command_cluster_ps.go
  68. 5
      weed/shell/command_ec_encode.go
  69. 8
      weed/shell/command_remote_cache.go
  70. 2
      weed/shell/command_remote_meta_sync.go
  71. 2
      weed/shell/command_remote_mount.go
  72. 2
      weed/shell/command_remote_mount_buckets.go
  73. 2
      weed/shell/command_remote_unmount.go
  74. 23
      weed/shell/command_volume_fix_replication_test.go
  75. 27
      weed/shell/shell_liner.go
  76. 2
      weed/storage/backend/backend.go
  77. 4
      weed/storage/backend/s3_backend/s3_backend.go
  78. 5
      weed/storage/backend/s3_backend/s3_sessions.go
  79. 24
      weed/storage/backend/s3_backend/s3_upload.go
  80. 2
      weed/util/constants.go
  81. 65
      weed/wdclient/masterclient.go

13
.github/workflows/binaries_dev.yml

@ -3,6 +3,8 @@ name: "go: build dev binaries"
on: on:
push: push:
branches: [ master ] branches: [ master ]
pull_request:
branches: [ master ]
jobs: jobs:
@ -36,8 +38,11 @@ jobs:
- name: Set BUILD_TIME env - name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: disable http2 env
run: export GODEBUG=http2client=0
- name: Go Release Binaries Large Disk - name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@bugfix/upload-fail
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -53,7 +58,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@bugfix/upload-fail
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -84,7 +89,7 @@ jobs:
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk - name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@bugfix/upload-fail
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -100,7 +105,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@bugfix/upload-fail
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

8
backers.md

@ -5,12 +5,16 @@
<h2 align="center">Generous Backers ($50+)</h2> <h2 align="center">Generous Backers ($50+)</h2>
- [4Sight Imaging](https://www.4sightimaging.com/)
- [Evercam Camera Management Software](https://evercam.io/) - [Evercam Camera Management Software](https://evercam.io/)
- [Admiral](https://getadmiral.com)
<h2 align="center">Backers</h2> <h2 align="center">Backers</h2>
- [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/) - [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/)
- [Haravan - Ecommerce Platform](https://www.haravan.com) - [Haravan - Ecommerce Platform](https://www.haravan.com)
- PeterCxy - Creator of Shelter App - PeterCxy - Creator of Shelter App
- [Hive Games](https://playhive.com/)
- Flowm
- Yoni Nakache
- Catalin Constantin
- MingLi Yuan
- Leroy van Logchem

2
docker/Dockerfile.go_build

@ -1,4 +1,4 @@
FROM amd64/golang:1.17-alpine as builder
FROM golang:1.17-alpine as builder
RUN apk add git g++ fuse RUN apk add git g++ fuse
RUN mkdir -p /go/src/github.com/chrislusf/ RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs

2
docker/Dockerfile.go_build_large

@ -1,4 +1,4 @@
FROM amd64/golang:1.17-alpine as builder
FROM golang:1.17-alpine as builder
RUN apk add git g++ fuse RUN apk add git g++ fuse
RUN mkdir -p /go/src/github.com/chrislusf/ RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs

4
k8s/helm_charts2/Chart.yaml

@ -1,5 +1,5 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
appVersion: "2.75"
version: "2.75"
appVersion: "2.77"
version: "2.77"

5
k8s/helm_charts2/templates/filer-statefulset.yaml

@ -133,11 +133,6 @@ spec:
-encryptVolumeData \ -encryptVolumeData \
{{- end }} {{- end }}
-ip=${POD_IP} \ -ip=${POD_IP} \
{{- if .Values.filer.enable_peers }}
{{- if gt (.Values.filer.replicas | int) 1 }}
-peers=$(echo -n "{{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}" | sed "s/$HOSTNAME.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}//" | sed 's/,$//; 's/^,//'; s/,,/,/;' ) \
{{- end }}
{{- end }}
{{- if .Values.filer.s3.enabled }} {{- if .Values.filer.s3.enabled }}
-s3 \ -s3 \
-s3.port={{ .Values.filer.s3.port }} \ -s3.port={{ .Values.filer.s3.port }} \

2
k8s/helm_charts2/values.yaml

@ -246,8 +246,6 @@ filer:
maxMB: null maxMB: null
# encrypt data on volume servers # encrypt data on volume servers
encryptVolumeData: false encryptVolumeData: false
# enable peers sync metadata, for leveldb (localdb for filer but with sync across)
enable_peers: false
# Whether proxy or redirect to volume server during file GET request # Whether proxy or redirect to volume server during file GET request
redirectOnRead: false redirectOnRead: false

4
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.6.9</version>
<version>1.7.0</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>
@ -60,7 +60,7 @@
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId> <artifactId>httpmime</artifactId>
<version>4.5.6</version>
<version>4.5.13</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>

4
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.6.9</version>
<version>1.7.0</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>
@ -60,7 +60,7 @@
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId> <artifactId>httpmime</artifactId>
<version>4.5.6</version>
<version>4.5.13</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>

4
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.6.9</version>
<version>1.7.0</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>
@ -60,7 +60,7 @@
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId> <artifactId>httpmime</artifactId>
<version>4.5.6</version>
<version>4.5.13</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>

6
other/java/client/src/main/java/seaweedfs/client/RemoteUtil.java

@ -14,10 +14,10 @@ public class RemoteUtil {
String dir = SeaweedOutputStream.getParentDirectory(fullpath); String dir = SeaweedOutputStream.getParentDirectory(fullpath);
String name = SeaweedOutputStream.getFileName(fullpath); String name = SeaweedOutputStream.getFileName(fullpath);
final FilerProto.DownloadToLocalResponse downloadToLocalResponse = filerClient.getBlockingStub()
.downloadToLocal(FilerProto.DownloadToLocalRequest.newBuilder()
final FilerProto.CacheRemoteObjectToLocalClusterResponse response = filerClient.getBlockingStub()
.cacheRemoteObjectToLocalCluster(FilerProto.CacheRemoteObjectToLocalClusterRequest.newBuilder()
.setDirectory(dir).setName(name).build()); .setDirectory(dir).setName(name).build());
return downloadToLocalResponse.getEntry();
return response.getEntry();
} }
} }

6
other/java/client/src/main/proto/filer.proto

@ -69,7 +69,7 @@ service SeaweedFiler {
rpc KvPut (KvPutRequest) returns (KvPutResponse) { rpc KvPut (KvPutRequest) returns (KvPutResponse) {
} }
rpc DownloadToLocal (DownloadToLocalRequest) returns (DownloadToLocalResponse) {
rpc CacheRemoteObjectToLocalCluster (CacheRemoteObjectToLocalClusterRequest) returns (CacheRemoteObjectToLocalClusterResponse) {
} }
} }
@ -403,10 +403,10 @@ message FilerConf {
///////////////////////// /////////////////////////
// Remote Storage related // Remote Storage related
///////////////////////// /////////////////////////
message DownloadToLocalRequest {
message CacheRemoteObjectToLocalClusterRequest {
string directory = 1; string directory = 1;
string name = 2; string name = 2;
} }
message DownloadToLocalResponse {
message CacheRemoteObjectToLocalClusterResponse {
Entry entry = 1; Entry entry = 1;
} }

4
other/java/examples/pom.xml

@ -11,13 +11,13 @@
<dependency> <dependency>
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.6.9</version>
<version>1.7.0</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-hadoop2-client</artifactId> <artifactId>seaweedfs-hadoop2-client</artifactId>
<version>1.6.9</version>
<version>1.7.0</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -301,7 +301,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
<seaweedfs.client.version>1.7.0</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
<seaweedfs.client.version>1.7.0</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -309,7 +309,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
<seaweedfs.client.version>1.7.0</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
<seaweedfs.client.version>1.7.0</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>

5
weed/Makefile

@ -2,10 +2,13 @@ BINARY = weed
SOURCE_DIR = . SOURCE_DIR = .
all: debug_mount
all: install
.PHONY : clean debug_mount .PHONY : clean debug_mount
install:
go install
clean: clean:
go clean $(SOURCE_DIR) go clean $(SOURCE_DIR)
rm -f $(BINARY) rm -f $(BINARY)

264
weed/cluster/cluster.go

@ -0,0 +1,264 @@
package cluster
import (
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"math"
"sync"
"time"
)
const (
MasterType = "master"
FilerType = "filer"
BrokerType = "broker"
)
type ClusterNode struct {
Address pb.ServerAddress
Version string
counter int
createdTs time.Time
}
type Leaders struct {
leaders [3]pb.ServerAddress
}
type Cluster struct {
filers map[pb.ServerAddress]*ClusterNode
filersLock sync.RWMutex
filerLeaders *Leaders
brokers map[pb.ServerAddress]*ClusterNode
brokersLock sync.RWMutex
}
func NewCluster() *Cluster {
return &Cluster{
filers: make(map[pb.ServerAddress]*ClusterNode),
filerLeaders: &Leaders{},
brokers: make(map[pb.ServerAddress]*ClusterNode),
}
}
func (cluster *Cluster) AddClusterNode(nodeType string, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
switch nodeType {
case FilerType:
cluster.filersLock.Lock()
defer cluster.filersLock.Unlock()
if existingNode, found := cluster.filers[address]; found {
existingNode.counter++
return nil
}
cluster.filers[address] = &ClusterNode{
Address: address,
Version: version,
counter: 1,
createdTs: time.Now(),
}
return cluster.ensureFilerLeaders(true, nodeType, address)
case BrokerType:
cluster.brokersLock.Lock()
defer cluster.brokersLock.Unlock()
if existingNode, found := cluster.brokers[address]; found {
existingNode.counter++
return nil
}
cluster.brokers[address] = &ClusterNode{
Address: address,
Version: version,
counter: 1,
createdTs: time.Now(),
}
return []*master_pb.KeepConnectedResponse{
{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(address),
IsAdd: true,
},
},
}
case MasterType:
}
return nil
}
func (cluster *Cluster) RemoveClusterNode(nodeType string, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
switch nodeType {
case FilerType:
cluster.filersLock.Lock()
defer cluster.filersLock.Unlock()
if existingNode, found := cluster.filers[address]; !found {
return nil
} else {
existingNode.counter--
if existingNode.counter <= 0 {
delete(cluster.filers, address)
return cluster.ensureFilerLeaders(false, nodeType, address)
}
}
case BrokerType:
cluster.brokersLock.Lock()
defer cluster.brokersLock.Unlock()
if existingNode, found := cluster.brokers[address]; !found {
return nil
} else {
existingNode.counter--
if existingNode.counter <= 0 {
delete(cluster.brokers, address)
return []*master_pb.KeepConnectedResponse{
{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(address),
IsAdd: false,
},
},
}
}
}
case MasterType:
}
return nil
}
func (cluster *Cluster) ListClusterNode(nodeType string) (nodes []*ClusterNode) {
switch nodeType {
case FilerType:
cluster.filersLock.RLock()
defer cluster.filersLock.RUnlock()
for _, node := range cluster.filers {
nodes = append(nodes, node)
}
case BrokerType:
cluster.brokersLock.RLock()
defer cluster.brokersLock.RUnlock()
for _, node := range cluster.brokers {
nodes = append(nodes, node)
}
case MasterType:
}
return
}
func (cluster *Cluster) IsOneLeader(address pb.ServerAddress) bool {
return cluster.filerLeaders.isOneLeader(address)
}
func (cluster *Cluster) ensureFilerLeaders(isAdd bool, nodeType string, address pb.ServerAddress) (result []*master_pb.KeepConnectedResponse) {
if isAdd {
if cluster.filerLeaders.addLeaderIfVacant(address) {
// has added the address as one leader
result = append(result, &master_pb.KeepConnectedResponse{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(address),
IsLeader: true,
IsAdd: true,
},
})
} else {
result = append(result, &master_pb.KeepConnectedResponse{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(address),
IsLeader: false,
IsAdd: true,
},
})
}
} else {
if cluster.filerLeaders.removeLeaderIfExists(address) {
result = append(result, &master_pb.KeepConnectedResponse{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(address),
IsLeader: true,
IsAdd: false,
},
})
// pick the freshest one, since it is less likely to go away
var shortestDuration int64 = math.MaxInt64
now := time.Now()
var candidateAddress pb.ServerAddress
for _, node := range cluster.filers {
if cluster.filerLeaders.isOneLeader(node.Address) {
continue
}
duration := now.Sub(node.createdTs).Nanoseconds()
if duration < shortestDuration {
shortestDuration = duration
candidateAddress = node.Address
}
}
if candidateAddress != "" {
cluster.filerLeaders.addLeaderIfVacant(candidateAddress)
// added a new leader
result = append(result, &master_pb.KeepConnectedResponse{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(candidateAddress),
IsLeader: true,
IsAdd: true,
},
})
}
} else {
result = append(result, &master_pb.KeepConnectedResponse{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(address),
IsLeader: false,
IsAdd: false,
},
})
}
}
return
}
func (leaders *Leaders) addLeaderIfVacant(address pb.ServerAddress) (hasChanged bool) {
if leaders.isOneLeader(address) {
return
}
for i := 0; i < len(leaders.leaders); i++ {
if leaders.leaders[i] == "" {
leaders.leaders[i] = address
hasChanged = true
return
}
}
return
}
func (leaders *Leaders) removeLeaderIfExists(address pb.ServerAddress) (hasChanged bool) {
if !leaders.isOneLeader(address) {
return
}
for i := 0; i < len(leaders.leaders); i++ {
if leaders.leaders[i] == address {
leaders.leaders[i] = ""
hasChanged = true
return
}
}
return
}
func (leaders *Leaders) isOneLeader(address pb.ServerAddress) bool {
for i := 0; i < len(leaders.leaders); i++ {
if leaders.leaders[i] == address {
return true
}
}
return false
}
func (leaders *Leaders) GetLeaders() (addresses []pb.ServerAddress) {
for i := 0; i < len(leaders.leaders); i++ {
if leaders.leaders[i] != "" {
addresses = append(addresses, leaders.leaders[i])
}
}
return
}

47
weed/cluster/cluster_test.go

@ -0,0 +1,47 @@
package cluster
import (
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/stretchr/testify/assert"
"testing"
)
func TestClusterAddRemoveNodes(t *testing.T) {
c := NewCluster()
c.AddClusterNode("filer", pb.ServerAddress("111:1"), "23.45")
c.AddClusterNode("filer", pb.ServerAddress("111:2"), "23.45")
assert.Equal(t, []pb.ServerAddress{
pb.ServerAddress("111:1"),
pb.ServerAddress("111:2"),
}, c.filerLeaders.GetLeaders())
c.AddClusterNode("filer", pb.ServerAddress("111:3"), "23.45")
c.AddClusterNode("filer", pb.ServerAddress("111:4"), "23.45")
assert.Equal(t, []pb.ServerAddress{
pb.ServerAddress("111:1"),
pb.ServerAddress("111:2"),
pb.ServerAddress("111:3"),
}, c.filerLeaders.GetLeaders())
c.AddClusterNode("filer", pb.ServerAddress("111:5"), "23.45")
c.AddClusterNode("filer", pb.ServerAddress("111:6"), "23.45")
c.RemoveClusterNode("filer", pb.ServerAddress("111:4"))
assert.Equal(t, []pb.ServerAddress{
pb.ServerAddress("111:1"),
pb.ServerAddress("111:2"),
pb.ServerAddress("111:3"),
}, c.filerLeaders.GetLeaders())
// remove oldest
c.RemoveClusterNode("filer", pb.ServerAddress("111:1"))
assert.Equal(t, []pb.ServerAddress{
pb.ServerAddress("111:6"),
pb.ServerAddress("111:2"),
pb.ServerAddress("111:3"),
}, c.filerLeaders.GetLeaders())
// remove oldest
c.RemoveClusterNode("filer", pb.ServerAddress("111:1"))
}

9
weed/command/filer.go

@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"os" "os"
"strings"
"time" "time"
"google.golang.org/grpc/reflection" "google.golang.org/grpc/reflection"
@ -46,7 +45,6 @@ type FilerOptions struct {
enableNotification *bool enableNotification *bool
disableHttp *bool disableHttp *bool
cipher *bool cipher *bool
peers *string
metricsHttpPort *int metricsHttpPort *int
saveToFilerLimit *int saveToFilerLimit *int
defaultLevelDbDirectory *string defaultLevelDbDirectory *string
@ -72,7 +70,6 @@ func init() {
f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack") f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack")
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store") f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory") f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
@ -186,11 +183,6 @@ func (fo *FilerOptions) startFiler() {
defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2") defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2")
var peers []string
if *fo.peers != "" {
peers = strings.Split(*fo.peers, ",")
}
filerAddress := pb.NewServerAddress(*fo.ip, *fo.port, *fo.portGrpc) filerAddress := pb.NewServerAddress(*fo.ip, *fo.port, *fo.portGrpc)
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{ fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
@ -207,7 +199,6 @@ func (fo *FilerOptions) startFiler() {
Host: filerAddress, Host: filerAddress,
Cipher: *fo.cipher, Cipher: *fo.cipher,
SaveToFilerLimit: int64(*fo.saveToFilerLimit), SaveToFilerLimit: int64(*fo.saveToFilerLimit),
Filers: pb.FromAddressStrings(peers),
ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024, ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
}) })
if nfs_err != nil { if nfs_err != nil {

2
weed/command/filer_meta_tail.go

@ -30,6 +30,8 @@ var cmdFilerMetaTail = &Command{
weed filer.meta.tail -timeAgo=30h | jq . weed filer.meta.tail -timeAgo=30h | jq .
weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name
weed filer.meta.tail -timeAgo=30h -es=http://<elasticSearchServerHost>:<port> -es.index=seaweedfs
`, `,
} }

1
weed/command/server.go

@ -106,7 +106,6 @@ func init() {
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 4, "split files larger than the limit") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 4, "split files larger than the limit")
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.") filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size") filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size")

7
weed/command/shell.go

@ -37,7 +37,7 @@ func runShell(command *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
if *shellOptions.Masters == "" && *shellInitialFiler == "" {
if *shellOptions.Masters == "" {
util.LoadConfiguration("shell", false) util.LoadConfiguration("shell", false)
v := util.GetViper() v := util.GetViper()
cluster := v.GetString("cluster.default") cluster := v.GetString("cluster.default")
@ -45,15 +45,14 @@ func runShell(command *Command, args []string) bool {
cluster = *shellCluster cluster = *shellCluster
} }
if cluster == "" { if cluster == "" {
*shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888"
*shellOptions.Masters = "localhost:9333"
} else { } else {
*shellOptions.Masters = v.GetString("cluster." + cluster + ".master") *shellOptions.Masters = v.GetString("cluster." + cluster + ".master")
*shellInitialFiler = v.GetString("cluster." + cluster + ".filer") *shellInitialFiler = v.GetString("cluster." + cluster + ".filer")
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
} }
} }
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
shellOptions.FilerAddress = pb.ServerAddress(*shellInitialFiler) shellOptions.FilerAddress = pb.ServerAddress(*shellInitialFiler)
shellOptions.Directory = "/" shellOptions.Directory = "/"

4
weed/filer/filechunk_manifest.go

@ -6,6 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
"io" "io"
"math" "math"
"math/rand"
"net/url" "net/url"
"strings" "strings"
"time" "time"
@ -142,6 +143,9 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKe
var shouldRetry bool var shouldRetry bool
var totalWritten int var totalWritten int
rand.Shuffle(len(urlStrings), func(i, j int) {
urlStrings[i], urlStrings[j] = urlStrings[j], urlStrings[i]
})
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 { for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
for _, urlString := range urlStrings { for _, urlString := range urlStrings {
var localProcesed int var localProcesed int

2
weed/filer/filechunks_read.go

@ -40,7 +40,7 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva
for _, point := range points { for _, point := range points {
if point.isStart { if point.isStart {
if len(queue) > 0 { if len(queue) > 0 {
lastIndex := len(queue) -1
lastIndex := len(queue) - 1
lastPoint := queue[lastIndex] lastPoint := queue[lastIndex]
if point.x != prevX && lastPoint.ts < point.ts { if point.x != prevX && lastPoint.ts < point.ts {
visibles = addToVisibles(visibles, prevX, lastPoint, point) visibles = addToVisibles(visibles, prevX, lastPoint, point)

8
weed/filer/filechunks_read_test.go

@ -52,7 +52,7 @@ func TestReadResolvedChunks(t *testing.T) {
func TestRandomizedReadResolvedChunks(t *testing.T) { func TestRandomizedReadResolvedChunks(t *testing.T) {
var limit int64 = 1024*1024
var limit int64 = 1024 * 1024
array := make([]int64, limit) array := make([]int64, limit)
var chunks []*filer_pb.FileChunk var chunks []*filer_pb.FileChunk
for ts := int64(0); ts < 1024; ts++ { for ts := int64(0); ts < 1024; ts++ {
@ -75,7 +75,7 @@ func TestRandomizedReadResolvedChunks(t *testing.T) {
visibles := readResolvedChunks(chunks) visibles := readResolvedChunks(chunks)
for _, visible := range visibles { for _, visible := range visibles {
for i := visible.start; i<visible.stop;i++{
for i := visible.start; i < visible.stop; i++ {
if array[i] != visible.modifiedTime { if array[i] != visible.modifiedTime {
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime) t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime)
} }
@ -101,12 +101,12 @@ func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.Fil
func TestSequentialReadResolvedChunks(t *testing.T) { func TestSequentialReadResolvedChunks(t *testing.T) {
var chunkSize int64 = 1024*1024*2
var chunkSize int64 = 1024 * 1024 * 2
var chunks []*filer_pb.FileChunk var chunks []*filer_pb.FileChunk
for ts := int64(0); ts < 13; ts++ { for ts := int64(0); ts < 13; ts++ {
chunks = append(chunks, &filer_pb.FileChunk{ chunks = append(chunks, &filer_pb.FileChunk{
FileId: "", FileId: "",
Offset: chunkSize*ts,
Offset: chunkSize * ts,
Size: uint64(chunkSize), Size: uint64(chunkSize),
Mtime: 1, Mtime: 1,
}) })

46
weed/filer/filer.go

@ -3,7 +3,9 @@ package filer
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/cluster"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"os" "os"
"strings" "strings"
"time" "time"
@ -50,7 +52,7 @@ type Filer struct {
func NewFiler(masters []pb.ServerAddress, grpcDialOption grpc.DialOption, func NewFiler(masters []pb.ServerAddress, grpcDialOption grpc.DialOption,
filerHost pb.ServerAddress, collection string, replication string, dataCenter string, notifyFn func()) *Filer { filerHost pb.ServerAddress, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
f := &Filer{ f := &Filer{
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, dataCenter, masters),
MasterClient: wdclient.NewMasterClient(grpcDialOption, cluster.FilerType, filerHost, dataCenter, masters),
fileIdDeletionQueue: util.NewUnboundedQueue(), fileIdDeletionQueue: util.NewUnboundedQueue(),
GrpcDialOption: grpcDialOption, GrpcDialOption: grpcDialOption,
FilerConf: NewFilerConf(), FilerConf: NewFilerConf(),
@ -66,22 +68,38 @@ func NewFiler(masters []pb.ServerAddress, grpcDialOption grpc.DialOption,
return f return f
} }
func (f *Filer) AggregateFromPeers(self pb.ServerAddress, filers []pb.ServerAddress) {
func (f *Filer) AggregateFromPeers(self pb.ServerAddress) {
// set peers
found := false
for _, peer := range filers {
if peer == self {
found = true
}
}
if !found {
filers = append(filers, self)
f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption)
f.MasterClient.OnPeerUpdate = f.MetaAggregator.OnPeerUpdate
for _, peerUpdate := range f.ListExistingPeerUpdates() {
f.MetaAggregator.OnPeerUpdate(peerUpdate)
} }
f.MetaAggregator = NewMetaAggregator(filers, f.GrpcDialOption)
f.MetaAggregator.StartLoopSubscribe(f, self)
}
func (f *Filer) ListExistingPeerUpdates() (existingNodes []*master_pb.ClusterNodeUpdate){
if grpcErr := pb.WithMasterClient(f.MasterClient.GetMaster(), f.GrpcDialOption, func(client master_pb.SeaweedClient) error {
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
ClientType: cluster.FilerType,
})
glog.V(0).Infof("the cluster has %d filers\n", len(resp.ClusterNodes))
for _, node := range resp.ClusterNodes {
existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{
NodeType: cluster.FilerType,
Address: node.Address,
IsLeader: node.IsLeader,
IsAdd: true,
})
}
return err
}); grpcErr != nil {
glog.V(0).Infof("connect to %s: %v", f.MasterClient.GetMaster(), grpcErr)
}
return
} }
func (f *Filer) SetStore(store FilerStore) { func (f *Filer) SetStore(store FilerStore) {
@ -117,7 +135,7 @@ func (fs *Filer) GetMaster() pb.ServerAddress {
return fs.MasterClient.GetMaster() return fs.MasterClient.GetMaster()
} }
func (fs *Filer) KeepConnectedToMaster() {
func (fs *Filer) KeepMasterClientConnected() {
fs.MasterClient.KeepConnectedToMaster() fs.MasterClient.KeepConnectedToMaster()
} }

63
weed/filer/filer_delete_entry.go

@ -15,6 +15,9 @@ const (
MsgFailDelNonEmptyFolder = "fail to delete non-empty folder" MsgFailDelNonEmptyFolder = "fail to delete non-empty folder"
) )
type OnChunksFunc func([]*filer_pb.FileChunk) error
type OnHardLinkIdsFunc func([]HardLinkId) error
func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) { func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) {
if p == "/" { if p == "/" {
return nil return nil
@ -27,20 +30,29 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
isDeleteCollection := f.isBucket(entry) isDeleteCollection := f.isBucket(entry)
var chunks []*filer_pb.FileChunk
var hardLinkIds []HardLinkId
chunks = append(chunks, entry.Chunks...)
if entry.IsDirectory() { if entry.IsDirectory() {
// delete the folder children, not including the folder itself // delete the folder children, not including the folder itself
var dirChunks []*filer_pb.FileChunk
var dirHardLinkIds []HardLinkId
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures)
err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures, func(chunks []*filer_pb.FileChunk) error {
if shouldDeleteChunks && !isDeleteCollection {
f.DirectDeleteChunks(chunks)
}
return nil
}, func(hardLinkIds []HardLinkId) error {
// A case not handled:
// what if the chunk is in a different collection?
if shouldDeleteChunks {
f.maybeDeleteHardLinks(hardLinkIds)
}
return nil
})
if err != nil { if err != nil {
glog.V(0).Infof("delete directory %s: %v", p, err) glog.V(0).Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err)
} }
chunks = append(chunks, dirChunks...)
hardLinkIds = append(hardLinkIds, dirHardLinkIds...)
}
if shouldDeleteChunks && !isDeleteCollection {
f.DirectDeleteChunks(entry.Chunks)
} }
// delete the file or folder // delete the file or folder
@ -49,15 +61,6 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
return fmt.Errorf("delete file %s: %v", p, err) return fmt.Errorf("delete file %s: %v", p, err)
} }
if shouldDeleteChunks && !isDeleteCollection {
f.DirectDeleteChunks(chunks)
}
// A case not handled:
// what if the chunk is in a different collection?
if shouldDeleteChunks {
f.maybeDeleteHardLinks(hardLinkIds)
}
if isDeleteCollection { if isDeleteCollection {
collectionName := entry.Name() collectionName := entry.Name()
f.doDeleteCollection(collectionName) f.doDeleteCollection(collectionName)
@ -67,7 +70,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
return nil return nil
} }
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32, onChunksFn OnChunksFunc, onHardLinkIdsFn OnHardLinkIdsFunc) (err error) {
lastFileName := "" lastFileName := ""
includeLastFile := false includeLastFile := false
@ -76,34 +79,30 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "") entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
if err != nil { if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err) glog.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
return fmt.Errorf("list folder %s: %v", entry.FullPath, err)
} }
if lastFileName == "" && !isRecursive && len(entries) > 0 { if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop // only for first iteration in the loop
glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
return nil, nil, fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
glog.V(0).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
} }
for _, sub := range entries { for _, sub := range entries {
lastFileName = sub.Name() lastFileName = sub.Name()
var dirChunks []*filer_pb.FileChunk
var dirHardLinkIds []HardLinkId
if sub.IsDirectory() { if sub.IsDirectory() {
subIsDeletingBucket := f.isBucket(sub) subIsDeletingBucket := f.isBucket(sub)
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil)
chunks = append(chunks, dirChunks...)
hardlinkIds = append(hardlinkIds, dirHardLinkIds...)
err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil, onChunksFn, onHardLinkIdsFn)
} else { } else {
f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil) f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil)
if len(sub.HardLinkId) != 0 { if len(sub.HardLinkId) != 0 {
// hard link chunk data are deleted separately // hard link chunk data are deleted separately
hardlinkIds = append(hardlinkIds, sub.HardLinkId)
err = onHardLinkIdsFn([]HardLinkId{sub.HardLinkId})
} else { } else {
chunks = append(chunks, sub.Chunks...)
err = onChunksFn(sub.Chunks)
} }
} }
if err != nil && !ignoreRecursiveError { if err != nil && !ignoreRecursiveError {
return nil, nil, err
return err
} }
} }
@ -113,15 +112,15 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
} }
} }
glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
} }
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures) f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
return chunks, hardlinkIds, nil
return nil
} }
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) { func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {

53
weed/filer/meta_aggregator.go

@ -3,6 +3,8 @@ package filer
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/cluster"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io" "io"
"sync" "sync"
@ -18,9 +20,13 @@ import (
) )
type MetaAggregator struct { type MetaAggregator struct {
filers []pb.ServerAddress
grpcDialOption grpc.DialOption
MetaLogBuffer *log_buffer.LogBuffer
filer *Filer
self pb.ServerAddress
isLeader bool
grpcDialOption grpc.DialOption
MetaLogBuffer *log_buffer.LogBuffer
peerStatues map[pb.ServerAddress]struct{}
peerStatuesLock sync.Mutex
// notifying clients // notifying clients
ListenersLock sync.Mutex ListenersLock sync.Mutex
ListenersCond *sync.Cond ListenersCond *sync.Cond
@ -28,10 +34,12 @@ type MetaAggregator struct {
// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk. // MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk.
// The old data comes from what each LocalMetadata persisted on disk. // The old data comes from what each LocalMetadata persisted on disk.
func NewMetaAggregator(filers []pb.ServerAddress, grpcDialOption grpc.DialOption) *MetaAggregator {
func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc.DialOption) *MetaAggregator {
t := &MetaAggregator{ t := &MetaAggregator{
filers: filers,
filer: filer,
self: self,
grpcDialOption: grpcDialOption, grpcDialOption: grpcDialOption,
peerStatues: make(map[pb.ServerAddress]struct{}),
} }
t.ListenersCond = sync.NewCond(&t.ListenersLock) t.ListenersCond = sync.NewCond(&t.ListenersLock)
t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, func() { t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, func() {
@ -40,10 +48,35 @@ func NewMetaAggregator(filers []pb.ServerAddress, grpcDialOption grpc.DialOption
return t return t
} }
func (ma *MetaAggregator) StartLoopSubscribe(f *Filer, self pb.ServerAddress) {
for _, filer := range ma.filers {
go ma.subscribeToOneFiler(f, self, filer)
func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate) {
if update.NodeType != cluster.FilerType {
return
} }
address := pb.ServerAddress(update.Address)
if update.IsAdd {
// every filer should subscribe to a new filer
ma.setActive(address, true)
go ma.subscribeToOneFiler(ma.filer, ma.self, address)
} else {
ma.setActive(address, false)
}
}
func (ma *MetaAggregator) setActive(address pb.ServerAddress, isActive bool) {
ma.peerStatuesLock.Lock()
defer ma.peerStatuesLock.Unlock()
if isActive {
ma.peerStatues[address] = struct{}{}
} else {
delete(ma.peerStatues, address)
}
}
func (ma *MetaAggregator) isActive(address pb.ServerAddress)(isActive bool) {
ma.peerStatuesLock.Lock()
defer ma.peerStatuesLock.Unlock()
_, isActive = ma.peerStatues[address]
return
} }
func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress) { func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress) {
@ -149,6 +182,10 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self pb.ServerAddress, p
} }
}) })
if !ma.isActive(peer) {
glog.V(0).Infof("stop subscribing remote %s meta change", peer)
return
}
if err != nil { if err != nil {
glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err) glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err)
time.Sleep(1733 * time.Millisecond) time.Sleep(1733 * time.Millisecond)

4
weed/filer/read_remote.go

@ -25,9 +25,9 @@ func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remot
return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):]) return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):])
} }
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *remote_pb.RemoteConf, remoteLocation *remote_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
func CacheRemoteObjectToLocalCluster(filerClient filer_pb.FilerClient, remoteConf *remote_pb.RemoteConf, remoteLocation *remote_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
_, err := client.CacheRemoteObjectToLocalCluster(context.Background(), &filer_pb.CacheRemoteObjectToLocalClusterRequest{
Directory: string(parent), Directory: string(parent),
Name: entry.Name, Name: entry.Name,
}) })

4
weed/filer/redis2/universal_redis_store.go

@ -134,8 +134,8 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
} }
members, err := store.Client.ZRangeByLex(ctx, genDirectoryListKey(string(fullpath)), &redis.ZRangeBy{ members, err := store.Client.ZRangeByLex(ctx, genDirectoryListKey(string(fullpath)), &redis.ZRangeBy{
Min: "-",
Max: "+",
Min: "-",
Max: "+",
}).Result() }).Result()
if err != nil { if err != nil {
return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err) return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err)

3
weed/filesys/dir_rename.go

@ -67,7 +67,6 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
return nil return nil
} }
func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamRenameEntryResponse) error { func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamRenameEntryResponse) error {
// comes from filer StreamRenameEntry, can only be create or delete entry // comes from filer StreamRenameEntry, can only be create or delete entry
@ -119,7 +118,7 @@ func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR
} }
dir.wfs.handlesLock.Unlock() dir.wfs.handlesLock.Unlock()
}else if resp.EventNotification.OldEntry != nil {
} else if resp.EventNotification.OldEntry != nil {
// without new entry, only old entry name exists. This is the second step to delete old entry // without new entry, only old entry name exists. This is the second step to delete old entry
if err := dir.wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, util.NewFullPath(resp.Directory, resp.EventNotification.OldEntry.Name), nil); err != nil { if err := dir.wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, util.NewFullPath(resp.Directory, resp.EventNotification.OldEntry.Name), nil); err != nil {
return err return err

6
weed/filesys/file.go

@ -364,15 +364,15 @@ func (file *File) getEntry() *filer_pb.Entry {
func (file *File) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) { func (file *File) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) {
err := file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.DownloadToLocalRequest{
request := &filer_pb.CacheRemoteObjectToLocalClusterRequest{
Directory: file.dir.FullPath(), Directory: file.dir.FullPath(),
Name: entry.Name, Name: entry.Name,
} }
glog.V(4).Infof("download entry: %v", request) glog.V(4).Infof("download entry: %v", request)
resp, err := client.DownloadToLocal(context.Background(), request)
resp, err := client.CacheRemoteObjectToLocalCluster(context.Background(), request)
if err != nil { if err != nil {
glog.Errorf("DownloadToLocal file %s/%s: %v", file.dir.FullPath(), file.Name, err)
glog.Errorf("CacheRemoteObjectToLocalCluster file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO return fuse.EIO
} }

8
weed/iamapi/iamapi_handlers.go

@ -8,7 +8,7 @@ import (
"net/http" "net/http"
) )
func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) {
func writeIamErrorResponse(w http.ResponseWriter, r *http.Request, err error, object string, value string, msg error) {
errCode := err.Error() errCode := err.Error()
errorResp := ErrorResponse{} errorResp := ErrorResponse{}
errorResp.Error.Type = "Sender" errorResp.Error.Type = "Sender"
@ -22,10 +22,10 @@ func writeIamErrorResponse(w http.ResponseWriter, err error, object string, valu
case iam.ErrCodeNoSuchEntityException: case iam.ErrCodeNoSuchEntityException:
msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value) msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value)
errorResp.Error.Message = &msg errorResp.Error.Message = &msg
s3err.WriteXMLResponse(w, http.StatusNotFound, errorResp)
s3err.WriteXMLResponse(w, r, http.StatusNotFound, errorResp)
case iam.ErrCodeServiceFailureException: case iam.ErrCodeServiceFailureException:
s3err.WriteXMLResponse(w, http.StatusInternalServerError, errorResp)
s3err.WriteXMLResponse(w, r, http.StatusInternalServerError, errorResp)
default: default:
s3err.WriteXMLResponse(w, http.StatusInternalServerError, errorResp)
s3err.WriteXMLResponse(w, r, http.StatusInternalServerError, errorResp)
} }
} }

22
weed/iamapi/iamapi_management_handlers.go

@ -362,7 +362,7 @@ func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, valu
func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
return return
} }
values := r.PostForm values := r.PostForm
@ -370,7 +370,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
s3cfgLock.RLock() s3cfgLock.RLock()
s3cfg := &iam_pb.S3ApiConfiguration{} s3cfg := &iam_pb.S3ApiConfiguration{}
if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil { if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
s3cfgLock.RUnlock() s3cfgLock.RUnlock()
@ -392,7 +392,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
userName := values.Get("UserName") userName := values.Get("UserName")
response, err = iama.GetUser(s3cfg, userName) response, err = iama.GetUser(s3cfg, userName)
if err != nil { if err != nil {
writeIamErrorResponse(w, err, "user", userName, nil)
writeIamErrorResponse(w, r, err, "user", userName, nil)
return return
} }
changed = false changed = false
@ -400,7 +400,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
userName := values.Get("UserName") userName := values.Get("UserName")
response, err = iama.DeleteUser(s3cfg, userName) response, err = iama.DeleteUser(s3cfg, userName)
if err != nil { if err != nil {
writeIamErrorResponse(w, err, "user", userName, nil)
writeIamErrorResponse(w, r, err, "user", userName, nil)
return return
} }
case "CreateAccessKey": case "CreateAccessKey":
@ -411,33 +411,33 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
response, err = iama.CreatePolicy(s3cfg, values) response, err = iama.CreatePolicy(s3cfg, values)
if err != nil { if err != nil {
glog.Errorf("CreatePolicy: %+v", err) glog.Errorf("CreatePolicy: %+v", err)
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
return return
} }
case "PutUserPolicy": case "PutUserPolicy":
response, err = iama.PutUserPolicy(s3cfg, values) response, err = iama.PutUserPolicy(s3cfg, values)
if err != nil { if err != nil {
glog.Errorf("PutUserPolicy: %+v", err) glog.Errorf("PutUserPolicy: %+v", err)
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
return return
} }
case "GetUserPolicy": case "GetUserPolicy":
response, err = iama.GetUserPolicy(s3cfg, values) response, err = iama.GetUserPolicy(s3cfg, values)
if err != nil { if err != nil {
writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
writeIamErrorResponse(w, r, err, "user", values.Get("UserName"), nil)
return return
} }
changed = false changed = false
case "DeleteUserPolicy": case "DeleteUserPolicy":
if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil { if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil {
writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
writeIamErrorResponse(w, r, err, "user", values.Get("UserName"), nil)
} }
default: default:
errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented) errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented)
errorResponse := ErrorResponse{} errorResponse := ErrorResponse{}
errorResponse.Error.Code = &errNotImplemented.Code errorResponse.Error.Code = &errNotImplemented.Code
errorResponse.Error.Message = &errNotImplemented.Description errorResponse.Error.Message = &errNotImplemented.Description
s3err.WriteXMLResponse(w, errNotImplemented.HTTPStatusCode, errorResponse)
s3err.WriteXMLResponse(w, r, errNotImplemented.HTTPStatusCode, errorResponse)
return return
} }
if changed { if changed {
@ -445,9 +445,9 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg) err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg)
s3cfgLock.Unlock() s3cfgLock.Unlock()
if err != nil { if err != nil {
writeIamErrorResponse(w, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err)
writeIamErrorResponse(w, r, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err)
return return
} }
} }
s3err.WriteXMLResponse(w, http.StatusOK, response)
s3err.WriteXMLResponse(w, r, http.StatusOK, response)
} }

2
weed/iamapi/iamapi_server.go

@ -49,7 +49,7 @@ var s3ApiConfigure IamS3ApiConfig
func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) { func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
s3ApiConfigure = IamS3ApiConfigure{ s3ApiConfigure = IamS3ApiConfigure{
option: option, option: option,
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", "", option.Masters),
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, "iam", "", "", option.Masters),
} }
s3Option := s3api.S3ApiServerOption{Filer: option.Filer} s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
iamApiServer = &IamApiServer{ iamApiServer = &IamApiServer{

9
weed/messaging/broker/broker_grpc_server_discovery.go

@ -3,6 +3,7 @@ package broker
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/cluster"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"time" "time"
@ -93,14 +94,16 @@ func (broker *MessageBroker) checkFilers() {
for !found { for !found {
for _, master := range masters { for _, master := range masters {
err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error { err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error {
resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{
ClientType: "filer",
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
ClientType: cluster.FilerType,
}) })
if err != nil { if err != nil {
return err return err
} }
filers = append(filers, pb.FromAddressStrings(resp.GrpcAddresses)...)
for _, clusterNode := range resp.ClusterNodes {
filers = append(filers, pb.ServerAddress(clusterNode.Address))
}
return nil return nil
}) })

6
weed/pb/filer.proto

@ -69,7 +69,7 @@ service SeaweedFiler {
rpc KvPut (KvPutRequest) returns (KvPutResponse) { rpc KvPut (KvPutRequest) returns (KvPutResponse) {
} }
rpc DownloadToLocal (DownloadToLocalRequest) returns (DownloadToLocalResponse) {
rpc CacheRemoteObjectToLocalCluster (CacheRemoteObjectToLocalClusterRequest) returns (CacheRemoteObjectToLocalClusterResponse) {
} }
} }
@ -403,10 +403,10 @@ message FilerConf {
///////////////////////// /////////////////////////
// Remote Storage related // Remote Storage related
///////////////////////// /////////////////////////
message DownloadToLocalRequest {
message CacheRemoteObjectToLocalClusterRequest {
string directory = 1; string directory = 1;
string name = 2; string name = 2;
} }
message DownloadToLocalResponse {
message CacheRemoteObjectToLocalClusterResponse {
Entry entry = 1; Entry entry = 1;
} }

221
weed/pb/filer_pb/filer.pb.go

@ -3334,7 +3334,7 @@ func (x *FilerConf) GetLocations() []*FilerConf_PathConf {
///////////////////////// /////////////////////////
// Remote Storage related // Remote Storage related
///////////////////////// /////////////////////////
type DownloadToLocalRequest struct {
type CacheRemoteObjectToLocalClusterRequest struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
@ -3343,8 +3343,8 @@ type DownloadToLocalRequest struct {
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
} }
func (x *DownloadToLocalRequest) Reset() {
*x = DownloadToLocalRequest{}
func (x *CacheRemoteObjectToLocalClusterRequest) Reset() {
*x = CacheRemoteObjectToLocalClusterRequest{}
if protoimpl.UnsafeEnabled { if protoimpl.UnsafeEnabled {
mi := &file_filer_proto_msgTypes[51] mi := &file_filer_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -3352,13 +3352,13 @@ func (x *DownloadToLocalRequest) Reset() {
} }
} }
func (x *DownloadToLocalRequest) String() string {
func (x *CacheRemoteObjectToLocalClusterRequest) String() string {
return protoimpl.X.MessageStringOf(x) return protoimpl.X.MessageStringOf(x)
} }
func (*DownloadToLocalRequest) ProtoMessage() {}
func (*CacheRemoteObjectToLocalClusterRequest) ProtoMessage() {}
func (x *DownloadToLocalRequest) ProtoReflect() protoreflect.Message {
func (x *CacheRemoteObjectToLocalClusterRequest) ProtoReflect() protoreflect.Message {
mi := &file_filer_proto_msgTypes[51] mi := &file_filer_proto_msgTypes[51]
if protoimpl.UnsafeEnabled && x != nil { if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -3370,26 +3370,26 @@ func (x *DownloadToLocalRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x) return mi.MessageOf(x)
} }
// Deprecated: Use DownloadToLocalRequest.ProtoReflect.Descriptor instead.
func (*DownloadToLocalRequest) Descriptor() ([]byte, []int) {
// Deprecated: Use CacheRemoteObjectToLocalClusterRequest.ProtoReflect.Descriptor instead.
func (*CacheRemoteObjectToLocalClusterRequest) Descriptor() ([]byte, []int) {
return file_filer_proto_rawDescGZIP(), []int{51} return file_filer_proto_rawDescGZIP(), []int{51}
} }
func (x *DownloadToLocalRequest) GetDirectory() string {
func (x *CacheRemoteObjectToLocalClusterRequest) GetDirectory() string {
if x != nil { if x != nil {
return x.Directory return x.Directory
} }
return "" return ""
} }
func (x *DownloadToLocalRequest) GetName() string {
func (x *CacheRemoteObjectToLocalClusterRequest) GetName() string {
if x != nil { if x != nil {
return x.Name return x.Name
} }
return "" return ""
} }
type DownloadToLocalResponse struct {
type CacheRemoteObjectToLocalClusterResponse struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
@ -3397,8 +3397,8 @@ type DownloadToLocalResponse struct {
Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
} }
func (x *DownloadToLocalResponse) Reset() {
*x = DownloadToLocalResponse{}
func (x *CacheRemoteObjectToLocalClusterResponse) Reset() {
*x = CacheRemoteObjectToLocalClusterResponse{}
if protoimpl.UnsafeEnabled { if protoimpl.UnsafeEnabled {
mi := &file_filer_proto_msgTypes[52] mi := &file_filer_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -3406,13 +3406,13 @@ func (x *DownloadToLocalResponse) Reset() {
} }
} }
func (x *DownloadToLocalResponse) String() string {
func (x *CacheRemoteObjectToLocalClusterResponse) String() string {
return protoimpl.X.MessageStringOf(x) return protoimpl.X.MessageStringOf(x)
} }
func (*DownloadToLocalResponse) ProtoMessage() {}
func (*CacheRemoteObjectToLocalClusterResponse) ProtoMessage() {}
func (x *DownloadToLocalResponse) ProtoReflect() protoreflect.Message {
func (x *CacheRemoteObjectToLocalClusterResponse) ProtoReflect() protoreflect.Message {
mi := &file_filer_proto_msgTypes[52] mi := &file_filer_proto_msgTypes[52]
if protoimpl.UnsafeEnabled && x != nil { if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -3424,12 +3424,12 @@ func (x *DownloadToLocalResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x) return mi.MessageOf(x)
} }
// Deprecated: Use DownloadToLocalResponse.ProtoReflect.Descriptor instead.
func (*DownloadToLocalResponse) Descriptor() ([]byte, []int) {
// Deprecated: Use CacheRemoteObjectToLocalClusterResponse.ProtoReflect.Descriptor instead.
func (*CacheRemoteObjectToLocalClusterResponse) Descriptor() ([]byte, []int) {
return file_filer_proto_rawDescGZIP(), []int{52} return file_filer_proto_rawDescGZIP(), []int{52}
} }
func (x *DownloadToLocalResponse) GetEntry() *Entry {
func (x *CacheRemoteObjectToLocalClusterResponse) GetEntry() *Entry {
if x != nil { if x != nil {
return x.Entry return x.Entry
} }
@ -4042,16 +4042,18 @@ var file_filer_proto_rawDesc = []byte{
0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x76, 0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x76,
0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20,
0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x4a, 0x0a,
0x16, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x5a, 0x0a,
0x26, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65,
0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x40, 0x0a, 0x17, 0x44, 0x6f, 0x77,
0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70,
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x27, 0x43, 0x61, 0x63,
0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f,
0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x32, 0x98, 0x0e, 0x0a, 0x0c,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x32, 0xc9, 0x0e, 0x0a, 0x0c,
0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14,
0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
@ -4159,18 +4161,21 @@ var file_filer_proto_rawDesc = []byte{
0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50,
0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f,
0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12,
0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c,
0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x21, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x6f, 0x77,
0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65,
0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65,
0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61,
0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66,
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, 0x01, 0x0a,
0x1f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x12, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68,
0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c,
0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x31, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x61,
0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54,
0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65,
0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c,
0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65,
0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f,
0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
} }
var ( var (
@ -4187,59 +4192,59 @@ func file_filer_proto_rawDescGZIP() []byte {
var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 57) var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 57)
var file_filer_proto_goTypes = []interface{}{ var file_filer_proto_goTypes = []interface{}{
(*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
(*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
(*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest
(*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse
(*RemoteEntry)(nil), // 4: filer_pb.RemoteEntry
(*Entry)(nil), // 5: filer_pb.Entry
(*FullEntry)(nil), // 6: filer_pb.FullEntry
(*EventNotification)(nil), // 7: filer_pb.EventNotification
(*FileChunk)(nil), // 8: filer_pb.FileChunk
(*FileChunkManifest)(nil), // 9: filer_pb.FileChunkManifest
(*FileId)(nil), // 10: filer_pb.FileId
(*FuseAttributes)(nil), // 11: filer_pb.FuseAttributes
(*CreateEntryRequest)(nil), // 12: filer_pb.CreateEntryRequest
(*CreateEntryResponse)(nil), // 13: filer_pb.CreateEntryResponse
(*UpdateEntryRequest)(nil), // 14: filer_pb.UpdateEntryRequest
(*UpdateEntryResponse)(nil), // 15: filer_pb.UpdateEntryResponse
(*AppendToEntryRequest)(nil), // 16: filer_pb.AppendToEntryRequest
(*AppendToEntryResponse)(nil), // 17: filer_pb.AppendToEntryResponse
(*DeleteEntryRequest)(nil), // 18: filer_pb.DeleteEntryRequest
(*DeleteEntryResponse)(nil), // 19: filer_pb.DeleteEntryResponse
(*AtomicRenameEntryRequest)(nil), // 20: filer_pb.AtomicRenameEntryRequest
(*AtomicRenameEntryResponse)(nil), // 21: filer_pb.AtomicRenameEntryResponse
(*StreamRenameEntryRequest)(nil), // 22: filer_pb.StreamRenameEntryRequest
(*StreamRenameEntryResponse)(nil), // 23: filer_pb.StreamRenameEntryResponse
(*AssignVolumeRequest)(nil), // 24: filer_pb.AssignVolumeRequest
(*AssignVolumeResponse)(nil), // 25: filer_pb.AssignVolumeResponse
(*LookupVolumeRequest)(nil), // 26: filer_pb.LookupVolumeRequest
(*Locations)(nil), // 27: filer_pb.Locations
(*Location)(nil), // 28: filer_pb.Location
(*LookupVolumeResponse)(nil), // 29: filer_pb.LookupVolumeResponse
(*Collection)(nil), // 30: filer_pb.Collection
(*CollectionListRequest)(nil), // 31: filer_pb.CollectionListRequest
(*CollectionListResponse)(nil), // 32: filer_pb.CollectionListResponse
(*DeleteCollectionRequest)(nil), // 33: filer_pb.DeleteCollectionRequest
(*DeleteCollectionResponse)(nil), // 34: filer_pb.DeleteCollectionResponse
(*StatisticsRequest)(nil), // 35: filer_pb.StatisticsRequest
(*StatisticsResponse)(nil), // 36: filer_pb.StatisticsResponse
(*GetFilerConfigurationRequest)(nil), // 37: filer_pb.GetFilerConfigurationRequest
(*GetFilerConfigurationResponse)(nil), // 38: filer_pb.GetFilerConfigurationResponse
(*SubscribeMetadataRequest)(nil), // 39: filer_pb.SubscribeMetadataRequest
(*SubscribeMetadataResponse)(nil), // 40: filer_pb.SubscribeMetadataResponse
(*LogEntry)(nil), // 41: filer_pb.LogEntry
(*KeepConnectedRequest)(nil), // 42: filer_pb.KeepConnectedRequest
(*KeepConnectedResponse)(nil), // 43: filer_pb.KeepConnectedResponse
(*LocateBrokerRequest)(nil), // 44: filer_pb.LocateBrokerRequest
(*LocateBrokerResponse)(nil), // 45: filer_pb.LocateBrokerResponse
(*KvGetRequest)(nil), // 46: filer_pb.KvGetRequest
(*KvGetResponse)(nil), // 47: filer_pb.KvGetResponse
(*KvPutRequest)(nil), // 48: filer_pb.KvPutRequest
(*KvPutResponse)(nil), // 49: filer_pb.KvPutResponse
(*FilerConf)(nil), // 50: filer_pb.FilerConf
(*DownloadToLocalRequest)(nil), // 51: filer_pb.DownloadToLocalRequest
(*DownloadToLocalResponse)(nil), // 52: filer_pb.DownloadToLocalResponse
(*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
(*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
(*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest
(*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse
(*RemoteEntry)(nil), // 4: filer_pb.RemoteEntry
(*Entry)(nil), // 5: filer_pb.Entry
(*FullEntry)(nil), // 6: filer_pb.FullEntry
(*EventNotification)(nil), // 7: filer_pb.EventNotification
(*FileChunk)(nil), // 8: filer_pb.FileChunk
(*FileChunkManifest)(nil), // 9: filer_pb.FileChunkManifest
(*FileId)(nil), // 10: filer_pb.FileId
(*FuseAttributes)(nil), // 11: filer_pb.FuseAttributes
(*CreateEntryRequest)(nil), // 12: filer_pb.CreateEntryRequest
(*CreateEntryResponse)(nil), // 13: filer_pb.CreateEntryResponse
(*UpdateEntryRequest)(nil), // 14: filer_pb.UpdateEntryRequest
(*UpdateEntryResponse)(nil), // 15: filer_pb.UpdateEntryResponse
(*AppendToEntryRequest)(nil), // 16: filer_pb.AppendToEntryRequest
(*AppendToEntryResponse)(nil), // 17: filer_pb.AppendToEntryResponse
(*DeleteEntryRequest)(nil), // 18: filer_pb.DeleteEntryRequest
(*DeleteEntryResponse)(nil), // 19: filer_pb.DeleteEntryResponse
(*AtomicRenameEntryRequest)(nil), // 20: filer_pb.AtomicRenameEntryRequest
(*AtomicRenameEntryResponse)(nil), // 21: filer_pb.AtomicRenameEntryResponse
(*StreamRenameEntryRequest)(nil), // 22: filer_pb.StreamRenameEntryRequest
(*StreamRenameEntryResponse)(nil), // 23: filer_pb.StreamRenameEntryResponse
(*AssignVolumeRequest)(nil), // 24: filer_pb.AssignVolumeRequest
(*AssignVolumeResponse)(nil), // 25: filer_pb.AssignVolumeResponse
(*LookupVolumeRequest)(nil), // 26: filer_pb.LookupVolumeRequest
(*Locations)(nil), // 27: filer_pb.Locations
(*Location)(nil), // 28: filer_pb.Location
(*LookupVolumeResponse)(nil), // 29: filer_pb.LookupVolumeResponse
(*Collection)(nil), // 30: filer_pb.Collection
(*CollectionListRequest)(nil), // 31: filer_pb.CollectionListRequest
(*CollectionListResponse)(nil), // 32: filer_pb.CollectionListResponse
(*DeleteCollectionRequest)(nil), // 33: filer_pb.DeleteCollectionRequest
(*DeleteCollectionResponse)(nil), // 34: filer_pb.DeleteCollectionResponse
(*StatisticsRequest)(nil), // 35: filer_pb.StatisticsRequest
(*StatisticsResponse)(nil), // 36: filer_pb.StatisticsResponse
(*GetFilerConfigurationRequest)(nil), // 37: filer_pb.GetFilerConfigurationRequest
(*GetFilerConfigurationResponse)(nil), // 38: filer_pb.GetFilerConfigurationResponse
(*SubscribeMetadataRequest)(nil), // 39: filer_pb.SubscribeMetadataRequest
(*SubscribeMetadataResponse)(nil), // 40: filer_pb.SubscribeMetadataResponse
(*LogEntry)(nil), // 41: filer_pb.LogEntry
(*KeepConnectedRequest)(nil), // 42: filer_pb.KeepConnectedRequest
(*KeepConnectedResponse)(nil), // 43: filer_pb.KeepConnectedResponse
(*LocateBrokerRequest)(nil), // 44: filer_pb.LocateBrokerRequest
(*LocateBrokerResponse)(nil), // 45: filer_pb.LocateBrokerResponse
(*KvGetRequest)(nil), // 46: filer_pb.KvGetRequest
(*KvGetResponse)(nil), // 47: filer_pb.KvGetResponse
(*KvPutRequest)(nil), // 48: filer_pb.KvPutRequest
(*KvPutResponse)(nil), // 49: filer_pb.KvPutResponse
(*FilerConf)(nil), // 50: filer_pb.FilerConf
(*CacheRemoteObjectToLocalClusterRequest)(nil), // 51: filer_pb.CacheRemoteObjectToLocalClusterRequest
(*CacheRemoteObjectToLocalClusterResponse)(nil), // 52: filer_pb.CacheRemoteObjectToLocalClusterResponse
nil, // 53: filer_pb.Entry.ExtendedEntry nil, // 53: filer_pb.Entry.ExtendedEntry
nil, // 54: filer_pb.LookupVolumeResponse.LocationsMapEntry nil, // 54: filer_pb.LookupVolumeResponse.LocationsMapEntry
(*LocateBrokerResponse_Resource)(nil), // 55: filer_pb.LocateBrokerResponse.Resource (*LocateBrokerResponse_Resource)(nil), // 55: filer_pb.LocateBrokerResponse.Resource
@ -4269,7 +4274,7 @@ var file_filer_proto_depIdxs = []int32{
7, // 20: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification 7, // 20: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification
55, // 21: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource 55, // 21: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource
56, // 22: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf 56, // 22: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf
5, // 23: filer_pb.DownloadToLocalResponse.entry:type_name -> filer_pb.Entry
5, // 23: filer_pb.CacheRemoteObjectToLocalClusterResponse.entry:type_name -> filer_pb.Entry
27, // 24: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations 27, // 24: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations
0, // 25: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest 0, // 25: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest
2, // 26: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest 2, // 26: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest
@ -4291,7 +4296,7 @@ var file_filer_proto_depIdxs = []int32{
44, // 42: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest 44, // 42: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest
46, // 43: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest 46, // 43: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest
48, // 44: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest 48, // 44: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest
51, // 45: filer_pb.SeaweedFiler.DownloadToLocal:input_type -> filer_pb.DownloadToLocalRequest
51, // 45: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:input_type -> filer_pb.CacheRemoteObjectToLocalClusterRequest
1, // 46: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse 1, // 46: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse
3, // 47: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse 3, // 47: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse
13, // 48: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse 13, // 48: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse
@ -4312,7 +4317,7 @@ var file_filer_proto_depIdxs = []int32{
45, // 63: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse 45, // 63: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse
47, // 64: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse 47, // 64: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse
49, // 65: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse 49, // 65: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse
52, // 66: filer_pb.SeaweedFiler.DownloadToLocal:output_type -> filer_pb.DownloadToLocalResponse
52, // 66: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:output_type -> filer_pb.CacheRemoteObjectToLocalClusterResponse
46, // [46:67] is the sub-list for method output_type 46, // [46:67] is the sub-list for method output_type
25, // [25:46] is the sub-list for method input_type 25, // [25:46] is the sub-list for method input_type
25, // [25:25] is the sub-list for extension type_name 25, // [25:25] is the sub-list for extension type_name
@ -4939,7 +4944,7 @@ func file_filer_proto_init() {
} }
} }
file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DownloadToLocalRequest); i {
switch v := v.(*CacheRemoteObjectToLocalClusterRequest); i {
case 0: case 0:
return &v.state return &v.state
case 1: case 1:
@ -4951,7 +4956,7 @@ func file_filer_proto_init() {
} }
} }
file_filer_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { file_filer_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DownloadToLocalResponse); i {
switch v := v.(*CacheRemoteObjectToLocalClusterResponse); i {
case 0: case 0:
return &v.state return &v.state
case 1: case 1:
@ -5039,7 +5044,7 @@ type SeaweedFilerClient interface {
LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error)
KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error)
KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error)
DownloadToLocal(ctx context.Context, in *DownloadToLocalRequest, opts ...grpc.CallOption) (*DownloadToLocalResponse, error)
CacheRemoteObjectToLocalCluster(ctx context.Context, in *CacheRemoteObjectToLocalClusterRequest, opts ...grpc.CallOption) (*CacheRemoteObjectToLocalClusterResponse, error)
} }
type seaweedFilerClient struct { type seaweedFilerClient struct {
@ -5344,9 +5349,9 @@ func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts .
return out, nil return out, nil
} }
func (c *seaweedFilerClient) DownloadToLocal(ctx context.Context, in *DownloadToLocalRequest, opts ...grpc.CallOption) (*DownloadToLocalResponse, error) {
out := new(DownloadToLocalResponse)
err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DownloadToLocal", in, out, opts...)
func (c *seaweedFilerClient) CacheRemoteObjectToLocalCluster(ctx context.Context, in *CacheRemoteObjectToLocalClusterRequest, opts ...grpc.CallOption) (*CacheRemoteObjectToLocalClusterResponse, error) {
out := new(CacheRemoteObjectToLocalClusterResponse)
err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CacheRemoteObjectToLocalCluster", in, out, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -5375,7 +5380,7 @@ type SeaweedFilerServer interface {
LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error)
KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error)
KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error)
DownloadToLocal(context.Context, *DownloadToLocalRequest) (*DownloadToLocalResponse, error)
CacheRemoteObjectToLocalCluster(context.Context, *CacheRemoteObjectToLocalClusterRequest) (*CacheRemoteObjectToLocalClusterResponse, error)
} }
// UnimplementedSeaweedFilerServer can be embedded to have forward compatible implementations. // UnimplementedSeaweedFilerServer can be embedded to have forward compatible implementations.
@ -5442,8 +5447,8 @@ func (*UnimplementedSeaweedFilerServer) KvGet(context.Context, *KvGetRequest) (*
func (*UnimplementedSeaweedFilerServer) KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) { func (*UnimplementedSeaweedFilerServer) KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method KvPut not implemented") return nil, status.Errorf(codes.Unimplemented, "method KvPut not implemented")
} }
func (*UnimplementedSeaweedFilerServer) DownloadToLocal(context.Context, *DownloadToLocalRequest) (*DownloadToLocalResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DownloadToLocal not implemented")
func (*UnimplementedSeaweedFilerServer) CacheRemoteObjectToLocalCluster(context.Context, *CacheRemoteObjectToLocalClusterRequest) (*CacheRemoteObjectToLocalClusterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CacheRemoteObjectToLocalCluster not implemented")
} }
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) { func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@ -5830,20 +5835,20 @@ func _SeaweedFiler_KvPut_Handler(srv interface{}, ctx context.Context, dec func(
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _SeaweedFiler_DownloadToLocal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DownloadToLocalRequest)
func _SeaweedFiler_CacheRemoteObjectToLocalCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CacheRemoteObjectToLocalClusterRequest)
if err := dec(in); err != nil { if err := dec(in); err != nil {
return nil, err return nil, err
} }
if interceptor == nil { if interceptor == nil {
return srv.(SeaweedFilerServer).DownloadToLocal(ctx, in)
return srv.(SeaweedFilerServer).CacheRemoteObjectToLocalCluster(ctx, in)
} }
info := &grpc.UnaryServerInfo{ info := &grpc.UnaryServerInfo{
Server: srv, Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/DownloadToLocal",
FullMethod: "/filer_pb.SeaweedFiler/CacheRemoteObjectToLocalCluster",
} }
handler := func(ctx context.Context, req interface{}) (interface{}, error) { handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).DownloadToLocal(ctx, req.(*DownloadToLocalRequest))
return srv.(SeaweedFilerServer).CacheRemoteObjectToLocalCluster(ctx, req.(*CacheRemoteObjectToLocalClusterRequest))
} }
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
@ -5913,8 +5918,8 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
Handler: _SeaweedFiler_KvPut_Handler, Handler: _SeaweedFiler_KvPut_Handler,
}, },
{ {
MethodName: "DownloadToLocal",
Handler: _SeaweedFiler_DownloadToLocal_Handler,
MethodName: "CacheRemoteObjectToLocalCluster",
Handler: _SeaweedFiler_CacheRemoteObjectToLocalCluster_Handler,
}, },
}, },
Streams: []grpc.StreamDesc{ Streams: []grpc.StreamDesc{

394
weed/pb/master.proto

@ -7,209 +7,222 @@ option go_package = "github.com/chrislusf/seaweedfs/weed/pb/master_pb";
////////////////////////////////////////////////// //////////////////////////////////////////////////
service Seaweed { service Seaweed {
rpc SendHeartbeat (stream Heartbeat) returns (stream HeartbeatResponse) {
}
rpc KeepConnected (stream KeepConnectedRequest) returns (stream VolumeLocation) {
}
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
}
rpc Assign (AssignRequest) returns (AssignResponse) {
}
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
}
rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
}
rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) {
}
rpc VolumeList (VolumeListRequest) returns (VolumeListResponse) {
}
rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) {
}
rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) {
}
rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) {
}
rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) {
}
rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) {
}
rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) {
}
rpc SendHeartbeat (stream Heartbeat) returns (stream HeartbeatResponse) {
}
rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
}
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
}
rpc Assign (AssignRequest) returns (AssignResponse) {
}
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
}
rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
}
rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) {
}
rpc VolumeList (VolumeListRequest) returns (VolumeListResponse) {
}
rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) {
}
rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) {
}
rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) {
}
rpc ListClusterNodes (ListClusterNodesRequest) returns (ListClusterNodesResponse) {
}
rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) {
}
rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) {
}
} }
////////////////////////////////////////////////// //////////////////////////////////////////////////
message Heartbeat { message Heartbeat {
string ip = 1;
uint32 port = 2;
string public_url = 3;
uint64 max_file_key = 5;
string data_center = 6;
string rack = 7;
uint32 admin_port = 8;
repeated VolumeInformationMessage volumes = 9;
// delta volumes
repeated VolumeShortInformationMessage new_volumes = 10;
repeated VolumeShortInformationMessage deleted_volumes = 11;
bool has_no_volumes = 12;
// erasure coding
repeated VolumeEcShardInformationMessage ec_shards = 16;
// delta erasure coding shards
repeated VolumeEcShardInformationMessage new_ec_shards = 17;
repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
bool has_no_ec_shards = 19;
map<string, uint32> max_volume_counts = 4;
uint32 grpc_port = 20;
string ip = 1;
uint32 port = 2;
string public_url = 3;
uint64 max_file_key = 5;
string data_center = 6;
string rack = 7;
uint32 admin_port = 8;
repeated VolumeInformationMessage volumes = 9;
// delta volumes
repeated VolumeShortInformationMessage new_volumes = 10;
repeated VolumeShortInformationMessage deleted_volumes = 11;
bool has_no_volumes = 12;
// erasure coding
repeated VolumeEcShardInformationMessage ec_shards = 16;
// delta erasure coding shards
repeated VolumeEcShardInformationMessage new_ec_shards = 17;
repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
bool has_no_ec_shards = 19;
map<string, uint32> max_volume_counts = 4;
uint32 grpc_port = 20;
} }
message HeartbeatResponse { message HeartbeatResponse {
uint64 volume_size_limit = 1;
string leader = 2;
string metrics_address = 3;
uint32 metrics_interval_seconds = 4;
repeated StorageBackend storage_backends = 5;
uint64 volume_size_limit = 1;
string leader = 2;
string metrics_address = 3;
uint32 metrics_interval_seconds = 4;
repeated StorageBackend storage_backends = 5;
} }
message VolumeInformationMessage { message VolumeInformationMessage {
uint32 id = 1;
uint64 size = 2;
string collection = 3;
uint64 file_count = 4;
uint64 delete_count = 5;
uint64 deleted_byte_count = 6;
bool read_only = 7;
uint32 replica_placement = 8;
uint32 version = 9;
uint32 ttl = 10;
uint32 compact_revision = 11;
int64 modified_at_second = 12;
string remote_storage_name = 13;
string remote_storage_key = 14;
string disk_type = 15;
uint32 id = 1;
uint64 size = 2;
string collection = 3;
uint64 file_count = 4;
uint64 delete_count = 5;
uint64 deleted_byte_count = 6;
bool read_only = 7;
uint32 replica_placement = 8;
uint32 version = 9;
uint32 ttl = 10;
uint32 compact_revision = 11;
int64 modified_at_second = 12;
string remote_storage_name = 13;
string remote_storage_key = 14;
string disk_type = 15;
} }
message VolumeShortInformationMessage { message VolumeShortInformationMessage {
uint32 id = 1;
string collection = 3;
uint32 replica_placement = 8;
uint32 version = 9;
uint32 ttl = 10;
string disk_type = 15;
uint32 id = 1;
string collection = 3;
uint32 replica_placement = 8;
uint32 version = 9;
uint32 ttl = 10;
string disk_type = 15;
} }
message VolumeEcShardInformationMessage { message VolumeEcShardInformationMessage {
uint32 id = 1;
string collection = 2;
uint32 ec_index_bits = 3;
string disk_type = 4;
uint32 id = 1;
string collection = 2;
uint32 ec_index_bits = 3;
string disk_type = 4;
} }
message StorageBackend { message StorageBackend {
string type = 1;
string id = 2;
map<string, string> properties = 3;
string type = 1;
string id = 2;
map<string, string> properties = 3;
} }
message Empty { message Empty {
} }
message SuperBlockExtra { message SuperBlockExtra {
message ErasureCoding {
uint32 data = 1;
uint32 parity = 2;
repeated uint32 volume_ids = 3;
}
ErasureCoding erasure_coding = 1;
message ErasureCoding {
uint32 data = 1;
uint32 parity = 2;
repeated uint32 volume_ids = 3;
}
ErasureCoding erasure_coding = 1;
} }
message KeepConnectedRequest { message KeepConnectedRequest {
string name = 1;
string client_address = 3;
string client_type = 1;
string client_address = 3;
string version = 4;
} }
message VolumeLocation { message VolumeLocation {
string url = 1;
string public_url = 2;
repeated uint32 new_vids = 3;
repeated uint32 deleted_vids = 4;
string leader = 5; // optional when leader is not itself
string data_center = 6; // optional when DataCenter is in use
uint32 grpc_port = 7;
string url = 1;
string public_url = 2;
repeated uint32 new_vids = 3;
repeated uint32 deleted_vids = 4;
string leader = 5; // optional when leader is not itself
string data_center = 6; // optional when DataCenter is in use
uint32 grpc_port = 7;
}
message ClusterNodeUpdate {
string node_type = 1;
string address = 2;
bool is_leader = 3;
bool is_add = 4;
}
message KeepConnectedResponse {
VolumeLocation volume_location = 1;
ClusterNodeUpdate cluster_node_update = 2;
} }
message LookupVolumeRequest { message LookupVolumeRequest {
repeated string volume_or_file_ids = 1;
string collection = 2; // optional, a bit faster if provided.
repeated string volume_or_file_ids = 1;
string collection = 2; // optional, a bit faster if provided.
} }
message LookupVolumeResponse { message LookupVolumeResponse {
message VolumeIdLocation {
string volume_or_file_id = 1;
repeated Location locations = 2;
string error = 3;
string auth = 4;
}
repeated VolumeIdLocation volume_id_locations = 1;
message VolumeIdLocation {
string volume_or_file_id = 1;
repeated Location locations = 2;
string error = 3;
string auth = 4;
}
repeated VolumeIdLocation volume_id_locations = 1;
} }
message Location { message Location {
string url = 1;
string public_url = 2;
uint32 grpc_port = 3;
string url = 1;
string public_url = 2;
uint32 grpc_port = 3;
} }
message AssignRequest { message AssignRequest {
uint64 count = 1;
string replication = 2;
string collection = 3;
string ttl = 4;
string data_center = 5;
string rack = 6;
string data_node = 7;
uint32 memory_map_max_size_mb = 8;
uint32 Writable_volume_count = 9;
string disk_type = 10;
uint64 count = 1;
string replication = 2;
string collection = 3;
string ttl = 4;
string data_center = 5;
string rack = 6;
string data_node = 7;
uint32 memory_map_max_size_mb = 8;
uint32 Writable_volume_count = 9;
string disk_type = 10;
} }
message AssignResponse { message AssignResponse {
string fid = 1;
uint64 count = 4;
string error = 5;
string auth = 6;
repeated Location replicas = 7;
Location location = 8;
string fid = 1;
uint64 count = 4;
string error = 5;
string auth = 6;
repeated Location replicas = 7;
Location location = 8;
} }
message StatisticsRequest { message StatisticsRequest {
string replication = 1;
string collection = 2;
string ttl = 3;
string disk_type = 4;
string replication = 1;
string collection = 2;
string ttl = 3;
string disk_type = 4;
} }
message StatisticsResponse { message StatisticsResponse {
uint64 total_size = 4;
uint64 used_size = 5;
uint64 file_count = 6;
uint64 total_size = 4;
uint64 used_size = 5;
uint64 file_count = 6;
} }
// //
// collection related // collection related
// //
message Collection { message Collection {
string name = 1;
string name = 1;
} }
message CollectionListRequest { message CollectionListRequest {
bool include_normal_volumes = 1;
bool include_ec_volumes = 2;
bool include_normal_volumes = 1;
bool include_ec_volumes = 2;
} }
message CollectionListResponse { message CollectionListResponse {
repeated Collection collections = 1;
repeated Collection collections = 1;
} }
message CollectionDeleteRequest { message CollectionDeleteRequest {
string name = 1;
string name = 1;
} }
message CollectionDeleteResponse { message CollectionDeleteResponse {
} }
@ -218,56 +231,56 @@ message CollectionDeleteResponse {
// volume related // volume related
// //
message DiskInfo { message DiskInfo {
string type = 1;
int64 volume_count = 2;
int64 max_volume_count = 3;
int64 free_volume_count = 4;
int64 active_volume_count = 5;
repeated VolumeInformationMessage volume_infos = 6;
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
int64 remote_volume_count = 8;
string type = 1;
int64 volume_count = 2;
int64 max_volume_count = 3;
int64 free_volume_count = 4;
int64 active_volume_count = 5;
repeated VolumeInformationMessage volume_infos = 6;
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
int64 remote_volume_count = 8;
} }
message DataNodeInfo { message DataNodeInfo {
string id = 1;
map<string, DiskInfo> diskInfos = 2;
uint32 grpc_port = 3;
string id = 1;
map<string, DiskInfo> diskInfos = 2;
uint32 grpc_port = 3;
} }
message RackInfo { message RackInfo {
string id = 1;
repeated DataNodeInfo data_node_infos = 2;
map<string, DiskInfo> diskInfos = 3;
string id = 1;
repeated DataNodeInfo data_node_infos = 2;
map<string, DiskInfo> diskInfos = 3;
} }
message DataCenterInfo { message DataCenterInfo {
string id = 1;
repeated RackInfo rack_infos = 2;
map<string, DiskInfo> diskInfos = 3;
string id = 1;
repeated RackInfo rack_infos = 2;
map<string, DiskInfo> diskInfos = 3;
} }
message TopologyInfo { message TopologyInfo {
string id = 1;
repeated DataCenterInfo data_center_infos = 2;
map<string, DiskInfo> diskInfos = 3;
string id = 1;
repeated DataCenterInfo data_center_infos = 2;
map<string, DiskInfo> diskInfos = 3;
} }
message VolumeListRequest { message VolumeListRequest {
} }
message VolumeListResponse { message VolumeListResponse {
TopologyInfo topology_info = 1;
uint64 volume_size_limit_mb = 2;
TopologyInfo topology_info = 1;
uint64 volume_size_limit_mb = 2;
} }
message LookupEcVolumeRequest { message LookupEcVolumeRequest {
uint32 volume_id = 1;
uint32 volume_id = 1;
} }
message LookupEcVolumeResponse { message LookupEcVolumeResponse {
uint32 volume_id = 1;
message EcShardIdLocation {
uint32 shard_id = 1;
repeated Location locations = 2;
}
repeated EcShardIdLocation shard_id_locations = 2;
uint32 volume_id = 1;
message EcShardIdLocation {
uint32 shard_id = 1;
repeated Location locations = 2;
}
repeated EcShardIdLocation shard_id_locations = 2;
} }
message VacuumVolumeRequest { message VacuumVolumeRequest {
float garbage_threshold = 1;
float garbage_threshold = 1;
} }
message VacuumVolumeResponse { message VacuumVolumeResponse {
} }
@ -275,37 +288,42 @@ message VacuumVolumeResponse {
message GetMasterConfigurationRequest { message GetMasterConfigurationRequest {
} }
message GetMasterConfigurationResponse { message GetMasterConfigurationResponse {
string metrics_address = 1;
uint32 metrics_interval_seconds = 2;
repeated StorageBackend storage_backends = 3;
string default_replication = 4;
string leader = 5;
uint32 volume_size_limit_m_b = 6;
bool volume_preallocate = 7;
string metrics_address = 1;
uint32 metrics_interval_seconds = 2;
repeated StorageBackend storage_backends = 3;
string default_replication = 4;
string leader = 5;
uint32 volume_size_limit_m_b = 6;
bool volume_preallocate = 7;
} }
message ListMasterClientsRequest {
string client_type = 1;
message ListClusterNodesRequest {
string client_type = 1;
} }
message ListMasterClientsResponse {
repeated string grpc_addresses = 1;
message ListClusterNodesResponse {
message ClusterNode {
string address = 1;
string version = 2;
bool is_leader = 3;
}
repeated ClusterNode cluster_nodes = 1;
} }
message LeaseAdminTokenRequest { message LeaseAdminTokenRequest {
int64 previous_token = 1;
int64 previous_lock_time = 2;
string lock_name = 3;
string client_name = 4;
int64 previous_token = 1;
int64 previous_lock_time = 2;
string lock_name = 3;
string client_name = 4;
} }
message LeaseAdminTokenResponse { message LeaseAdminTokenResponse {
int64 token = 1;
int64 lock_ts_ns = 2;
int64 token = 1;
int64 lock_ts_ns = 2;
} }
message ReleaseAdminTokenRequest { message ReleaseAdminTokenRequest {
int64 previous_token = 1;
int64 previous_lock_time = 2;
string lock_name = 3;
int64 previous_token = 1;
int64 previous_lock_time = 2;
string lock_name = 3;
} }
message ReleaseAdminTokenResponse { message ReleaseAdminTokenResponse {
} }

1533
weed/pb/master_pb/master.pb.go
File diff suppressed because it is too large
View File

5
weed/remote_storage/s3/s3_storage_client.go

@ -83,6 +83,7 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation
} }
isLastPage := false isLastPage := false
for !isLastPage && err == nil { for !isLastPage && err == nil {
var localErr error
listErr := s.conn.ListObjectsV2Pages(listInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool { listErr := s.conn.ListObjectsV2Pages(listInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, content := range page.Contents { for _, content := range page.Contents {
key := *content.Key key := *content.Key
@ -94,6 +95,7 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation
RemoteETag: *content.ETag, RemoteETag: *content.ETag,
StorageName: s.conf.Name, StorageName: s.conf.Name,
}); err != nil { }); err != nil {
localErr = err
return false return false
} }
} }
@ -104,6 +106,9 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation
if listErr != nil { if listErr != nil {
err = fmt.Errorf("list %v: %v", remote, listErr) err = fmt.Errorf("list %v: %v", remote, listErr)
} }
if localErr != nil {
err = fmt.Errorf("process %v: %v", remote, localErr)
}
} }
return return
} }

20
weed/s3api/auth_credentials.go

@ -5,6 +5,7 @@ import (
"net/http" "net/http"
"os" "os"
"strings" "strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -23,6 +24,8 @@ type Iam interface {
} }
type IdentityAccessManagement struct { type IdentityAccessManagement struct {
m sync.RWMutex
identities []*Identity identities []*Identity
domain string domain string
} }
@ -131,31 +134,38 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3Api
} }
identities = append(identities, t) identities = append(identities, t)
} }
iam.m.Lock()
// atomically switch // atomically switch
iam.identities = identities iam.identities = identities
iam.m.Unlock()
return nil return nil
} }
func (iam *IdentityAccessManagement) isEnabled() bool { func (iam *IdentityAccessManagement) isEnabled() bool {
iam.m.RLock()
defer iam.m.RUnlock()
return len(iam.identities) > 0 return len(iam.identities) > 0
} }
func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) { func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) {
iam.m.RLock()
defer iam.m.RUnlock()
for _, ident := range iam.identities { for _, ident := range iam.identities {
for _, cred := range ident.Credentials { for _, cred := range ident.Credentials {
// println("checking", ident.Name, cred.AccessKey)
if cred.AccessKey == accessKey { if cred.AccessKey == accessKey {
return ident, cred, true return ident, cred, true
} }
} }
} }
glog.V(1).Infof("could not find accessKey %s", accessKey)
return nil, nil, false return nil, nil, false
} }
func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, found bool) { func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, found bool) {
iam.m.RLock()
defer iam.m.RUnlock()
for _, ident := range iam.identities { for _, ident := range iam.identities {
if ident.Name == "anonymous" { if ident.Name == "anonymous" {
return ident, true return ident, true
@ -177,12 +187,14 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
r.Header.Set(xhttp.AmzIdentityId, identity.Name) r.Header.Set(xhttp.AmzIdentityId, identity.Name)
if identity.isAdmin() { if identity.isAdmin() {
r.Header.Set(xhttp.AmzIsAdmin, "true") r.Header.Set(xhttp.AmzIsAdmin, "true")
} else if _, ok := r.Header[xhttp.AmzIsAdmin]; ok {
r.Header.Del(xhttp.AmzIsAdmin)
} }
} }
f(w, r) f(w, r)
return return
} }
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
} }
} }

47
weed/s3api/s3api_bucket_handlers.go

@ -36,7 +36,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
if s3a.iam.isEnabled() { if s3a.iam.isEnabled() {
identity, s3Err = s3a.iam.authUser(r) identity, s3Err = s3a.iam.authUser(r)
if s3Err != s3err.ErrNone { if s3Err != s3err.ErrNone {
s3err.WriteErrorResponse(w, s3Err, r)
s3err.WriteErrorResponse(w, r, s3Err)
return return
} }
} }
@ -46,7 +46,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
entries, _, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32) entries, _, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
@ -73,7 +73,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
Buckets: buckets, Buckets: buckets,
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
@ -100,17 +100,24 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
} }
return nil return nil
}); err != nil { }); err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
if exist, err := s3a.exists(s3a.option.BucketsPath, bucket, true); err == nil && exist { if exist, err := s3a.exists(s3a.option.BucketsPath, bucket, true); err == nil && exist {
errCode = s3err.ErrBucketAlreadyExists errCode = s3err.ErrBucketAlreadyExists
} }
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
if s3a.iam.isEnabled() {
if _, errCode = s3a.iam.authRequest(r, s3_constants.ACTION_ADMIN); errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
}
}
fn := func(entry *filer_pb.Entry) { fn := func(entry *filer_pb.Entry) {
if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" { if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" {
if entry.Extended == nil { if entry.Extended == nil {
@ -123,11 +130,11 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
// create the folder for bucket, but lazily create actual collection // create the folder for bucket, but lazily create actual collection
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil { if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil {
glog.Errorf("PutBucketHandler mkdir: %v", err) glog.Errorf("PutBucketHandler mkdir: %v", err)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
writeSuccessResponseEmpty(w)
writeSuccessResponseEmpty(w, r)
} }
func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
@ -136,7 +143,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
glog.V(3).Infof("DeleteBucketHandler %s", bucket) glog.V(3).Infof("DeleteBucketHandler %s", bucket)
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
s3err.WriteErrorResponse(w, err, r)
s3err.WriteErrorResponse(w, r, err)
return return
} }
@ -158,11 +165,11 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
err = s3a.rm(s3a.option.BucketsPath, bucket, false, true) err = s3a.rm(s3a.option.BucketsPath, bucket, false, true)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
s3err.WriteEmptyResponse(w, http.StatusNoContent)
s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
} }
func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
@ -171,11 +178,11 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
glog.V(3).Infof("HeadBucketHandler %s", bucket) glog.V(3).Infof("HeadBucketHandler %s", bucket)
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
s3err.WriteErrorResponse(w, err, r)
s3err.WriteErrorResponse(w, r, err)
return return
} }
writeSuccessResponseEmpty(w)
writeSuccessResponseEmpty(w, r)
} }
func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode { func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode {
@ -216,7 +223,7 @@ func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Reque
glog.V(3).Infof("GetBucketAclHandler %s", bucket) glog.V(3).Infof("GetBucketAclHandler %s", bucket)
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
s3err.WriteErrorResponse(w, err, r)
s3err.WriteErrorResponse(w, r, err)
return return
} }
@ -245,7 +252,7 @@ func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Reque
}) })
} }
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
// GetBucketLifecycleConfigurationHandler Get Bucket Lifecycle configuration // GetBucketLifecycleConfigurationHandler Get Bucket Lifecycle configuration
@ -256,18 +263,18 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr
glog.V(3).Infof("GetBucketAclHandler %s", bucket) glog.V(3).Infof("GetBucketAclHandler %s", bucket)
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
s3err.WriteErrorResponse(w, err, r)
s3err.WriteErrorResponse(w, r, err)
return return
} }
fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil) fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil)
if err != nil { if err != nil {
glog.Errorf("GetBucketLifecycleConfigurationHandler: %s", err) glog.Errorf("GetBucketLifecycleConfigurationHandler: %s", err)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
ttls := fc.GetCollectionTtls(bucket) ttls := fc.GetCollectionTtls(bucket)
if len(ttls) == 0 { if len(ttls) == 0 {
s3err.WriteErrorResponse(w, s3err.ErrNoSuchLifecycleConfiguration, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchLifecycleConfiguration)
return return
} }
response := Lifecycle{} response := Lifecycle{}
@ -285,14 +292,14 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr
Expiration: Expiration{Days: days, set: true}, Expiration: Expiration{Days: days, set: true},
}) })
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
// PutBucketLifecycleConfigurationHandler Put Bucket Lifecycle configuration // PutBucketLifecycleConfigurationHandler Put Bucket Lifecycle configuration
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html // https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) {
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
} }
@ -300,6 +307,6 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html // https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
s3err.WriteEmptyResponse(w, http.StatusNoContent)
s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
} }

8
weed/s3api/s3api_handlers.go

@ -26,12 +26,12 @@ func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string {
return location.Url return location.Url
} }
func writeSuccessResponseXML(w http.ResponseWriter, response interface{}) {
s3err.WriteXMLResponse(w, http.StatusOK, response)
func writeSuccessResponseXML(w http.ResponseWriter, r *http.Request, response interface{}) {
s3err.WriteXMLResponse(w, r, http.StatusOK, response)
} }
func writeSuccessResponseEmpty(w http.ResponseWriter) {
s3err.WriteEmptyResponse(w, http.StatusOK)
func writeSuccessResponseEmpty(w http.ResponseWriter, r *http.Request) {
s3err.WriteEmptyResponse(w, r, http.StatusOK)
} }
func validateContentMd5(h http.Header) ([]byte, error) { func validateContentMd5(h http.Header) ([]byte, error) {

30
weed/s3api/s3api_object_copy_handlers.go

@ -34,16 +34,16 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
dir, name := fullPath.DirAndName() dir, name := fullPath.DirAndName()
entry, err := s3a.getEntry(dir, name) entry, err := s3a.getEntry(dir, name)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r)) entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r))
err = s3a.touch(dir, name, entry) err = s3a.touch(dir, name, entry)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
writeSuccessResponseXML(w, CopyObjectResult{
writeSuccessResponseXML(w, r, CopyObjectResult{
ETag: fmt.Sprintf("%x", entry.Attributes.Md5), ETag: fmt.Sprintf("%x", entry.Attributes.Md5),
LastModified: time.Now().UTC(), LastModified: time.Now().UTC(),
}) })
@ -52,19 +52,19 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
// If source object is empty or bucket is empty, reply back invalid copy source. // If source object is empty or bucket is empty, reply back invalid copy source.
if srcObject == "" || srcBucket == "" { if srcObject == "" || srcBucket == "" {
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject)) srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject))
dir, name := srcPath.DirAndName() dir, name := srcPath.DirAndName()
_, err = s3a.getEntry(dir, name) _, err = s3a.getEntry(dir, name)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
if srcBucket == dstBucket && srcObject == dstObject { if srcBucket == dstBucket && srcObject == dstObject {
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopyDest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopyDest)
return return
} }
@ -75,7 +75,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
_, _, resp, err := util.DownloadFile(srcUrl, "") _, _, resp, err := util.DownloadFile(srcUrl, "")
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
defer util.CloseResponse(resp) defer util.CloseResponse(resp)
@ -84,7 +84,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body) etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
@ -95,7 +95,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
LastModified: time.Now().UTC(), LastModified: time.Now().UTC(),
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
@ -128,7 +128,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
// If source object is empty or bucket is empty, reply back invalid copy source. // If source object is empty or bucket is empty, reply back invalid copy source.
if srcObject == "" || srcBucket == "" { if srcObject == "" || srcBucket == "" {
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
@ -137,7 +137,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
partID, err := strconv.Atoi(partIDString) partID, err := strconv.Atoi(partIDString)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidPart, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
return return
} }
@ -145,7 +145,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
// check partID with maximum part ID for multipart objects // check partID with maximum part ID for multipart objects
if partID > globalMaxPartID { if partID > globalMaxPartID {
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxParts, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
return return
} }
@ -158,7 +158,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader) dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
defer dataReader.Close() defer dataReader.Close()
@ -167,7 +167,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
@ -178,7 +178,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
LastModified: time.Now().UTC(), LastModified: time.Now().UTC(),
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }

32
weed/s3api/s3api_object_handlers.go

@ -56,20 +56,20 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
_, err := validateContentMd5(r.Header) _, err := validateContentMd5(r.Header)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
return return
} }
if r.Header.Get("Cache-Control") != "" { if r.Header.Get("Cache-Control") != "" {
if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil { if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
return return
} }
} }
if r.Header.Get("Expires") != "" { if r.Header.Get("Expires") != "" {
if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil { if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
return return
} }
} }
@ -87,12 +87,12 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
_, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
} }
if s3ErrCode != s3err.ErrNone { if s3ErrCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, s3ErrCode, r)
s3err.WriteErrorResponse(w, r, s3ErrCode)
return return
} }
} else { } else {
if authTypeStreamingSigned == rAuthType { if authTypeStreamingSigned == rAuthType {
s3err.WriteErrorResponse(w, s3err.ErrAuthNotSetup, r)
s3err.WriteErrorResponse(w, r, s3err.ErrAuthNotSetup)
return return
} }
} }
@ -100,7 +100,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
if strings.HasSuffix(object, "/") { if strings.HasSuffix(object, "/") {
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil { if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
} else { } else {
@ -113,14 +113,14 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
setEtag(w, etag) setEtag(w, etag)
} }
writeSuccessResponseEmpty(w)
writeSuccessResponseEmpty(w, r)
} }
func urlPathEscape(object string) string { func urlPathEscape(object string) string {
@ -137,7 +137,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
glog.V(3).Infof("GetObjectHandler %s %s", bucket, object) glog.V(3).Infof("GetObjectHandler %s %s", bucket, object)
if strings.HasSuffix(r.URL.Path, "/") { if strings.HasSuffix(r.URL.Path, "/") {
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
return return
} }
@ -215,13 +215,13 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
deleteXMLBytes, err := io.ReadAll(r.Body) deleteXMLBytes, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
deleteObjects := &DeleteObjectsRequest{} deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
s3err.WriteErrorResponse(w, s3err.ErrMalformedXML, r)
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
return return
} }
@ -273,7 +273,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
} }
deleteResp.Errors = deleteErrors deleteResp.Errors = deleteErrors
writeSuccessResponseXML(w, deleteResp)
writeSuccessResponseXML(w, r, deleteResp)
} }
@ -317,7 +317,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
if err != nil { if err != nil {
glog.Errorf("NewRequest %s: %v", destUrl, err) glog.Errorf("NewRequest %s: %v", destUrl, err)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
@ -346,19 +346,19 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
if postErr != nil { if postErr != nil {
glog.Errorf("post to filer: %v", postErr) glog.Errorf("post to filer: %v", postErr)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
defer util.CloseResponse(resp) defer util.CloseResponse(resp)
if resp.StatusCode == http.StatusPreconditionFailed { if resp.StatusCode == http.StatusPreconditionFailed {
s3err.WriteErrorResponse(w, s3err.ErrPreconditionFailed, r)
s3err.WriteErrorResponse(w, r, s3err.ErrPreconditionFailed)
return return
} }
if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 { if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 {
if r.Method != "DELETE" { if r.Method != "DELETE" {
s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
return return
} }
} }

30
weed/s3api/s3api_object_handlers_postpolicy.go

@ -29,23 +29,23 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
reader, err := r.MultipartReader() reader, err := r.MultipartReader()
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
return return
} }
form, err := reader.ReadForm(int64(5 * humanize.MiByte)) form, err := reader.ReadForm(int64(5 * humanize.MiByte))
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
return return
} }
defer form.RemoveAll() defer form.RemoveAll()
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form) fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
return return
} }
if fileBody == nil { if fileBody == nil {
s3err.WriteErrorResponse(w, s3err.ErrPOSTFileRequired, r)
s3err.WriteErrorResponse(w, r, s3err.ErrPOSTFileRequired)
return return
} }
defer fileBody.Close() defer fileBody.Close()
@ -63,7 +63,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
if successRedirect != "" { if successRedirect != "" {
redirectURL, err = url.Parse(successRedirect) redirectURL, err = url.Parse(successRedirect)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
return return
} }
} }
@ -71,13 +71,13 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
// Verify policy signature. // Verify policy signature.
errCode := s3a.iam.doesPolicySignatureMatch(formValues) errCode := s3a.iam.doesPolicySignatureMatch(formValues)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy")) policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
return return
} }
@ -86,7 +86,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
postPolicyForm, err := policy.ParsePostPolicyForm(string(policyBytes)) postPolicyForm, err := policy.ParsePostPolicyForm(string(policyBytes))
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrPostPolicyConditionInvalidFormat, r)
s3err.WriteErrorResponse(w, r, s3err.ErrPostPolicyConditionInvalidFormat)
return return
} }
@ -102,12 +102,12 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
lengthRange := postPolicyForm.Conditions.ContentLengthRange lengthRange := postPolicyForm.Conditions.ContentLengthRange
if lengthRange.Valid { if lengthRange.Valid {
if fileSize < lengthRange.Min { if fileSize < lengthRange.Min {
s3err.WriteErrorResponse(w, s3err.ErrEntityTooSmall, r)
s3err.WriteErrorResponse(w, r, s3err.ErrEntityTooSmall)
return return
} }
if fileSize > lengthRange.Max { if fileSize > lengthRange.Max {
s3err.WriteErrorResponse(w, s3err.ErrEntityTooLarge, r)
s3err.WriteErrorResponse(w, r, s3err.ErrEntityTooLarge)
return return
} }
} }
@ -118,7 +118,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody) etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
@ -126,7 +126,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
// Replace raw query params.. // Replace raw query params..
redirectURL.RawQuery = getRedirectPostRawQuery(bucket, object, etag) redirectURL.RawQuery = getRedirectPostRawQuery(bucket, object, etag)
w.Header().Set("Location", redirectURL.String()) w.Header().Set("Location", redirectURL.String())
s3err.WriteEmptyResponse(w, http.StatusSeeOther)
s3err.WriteEmptyResponse(w, r, http.StatusSeeOther)
return return
} }
@ -141,11 +141,11 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
ETag: `"` + etag + `"`, ETag: `"` + etag + `"`,
Location: w.Header().Get("Location"), Location: w.Header().Get("Location"),
} }
s3err.WriteXMLResponse(w, http.StatusCreated, resp)
s3err.WriteXMLResponse(w, r, http.StatusCreated, resp)
case "200": case "200":
s3err.WriteEmptyResponse(w, http.StatusOK)
s3err.WriteEmptyResponse(w, r, http.StatusOK)
default: default:
writeSuccessResponseEmpty(w)
writeSuccessResponseEmpty(w, r)
} }
} }

40
weed/s3api/s3api_object_multipart_handlers.go

@ -45,11 +45,11 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
glog.V(2).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) glog.V(2).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
@ -69,11 +69,11 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
glog.V(2).Info("CompleteMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) glog.V(2).Info("CompleteMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
@ -91,13 +91,13 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
}) })
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response))) glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)))
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
@ -107,13 +107,13 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query()) prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query())
if maxUploads < 0 { if maxUploads < 0 {
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxUploads, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxUploads)
return return
} }
if keyMarker != "" { if keyMarker != "" {
// Marker not common with prefix is not implemented. // Marker not common with prefix is not implemented.
if !strings.HasPrefix(keyMarker, prefix) { if !strings.HasPrefix(keyMarker, prefix) {
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
return return
} }
} }
@ -131,13 +131,13 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
glog.V(2).Infof("ListMultipartUploadsHandler %s errCode=%d", string(s3err.EncodeXMLResponse(response)), errCode) glog.V(2).Infof("ListMultipartUploadsHandler %s errCode=%d", string(s3err.EncodeXMLResponse(response)), errCode)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
// TODO handle encodingType // TODO handle encodingType
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
// ListObjectPartsHandler - Lists object parts in a multipart upload. // ListObjectPartsHandler - Lists object parts in a multipart upload.
@ -146,11 +146,11 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query()) uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query())
if partNumberMarker < 0 { if partNumberMarker < 0 {
s3err.WriteErrorResponse(w, s3err.ErrInvalidPartNumberMarker, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPartNumberMarker)
return return
} }
if maxParts < 0 { if maxParts < 0 {
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxParts, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
return return
} }
@ -165,11 +165,11 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
glog.V(2).Infof("ListObjectPartsHandler %s count=%d", string(s3err.EncodeXMLResponse(response)), len(response.Part)) glog.V(2).Infof("ListObjectPartsHandler %s count=%d", string(s3err.EncodeXMLResponse(response)), len(response.Part))
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
@ -180,18 +180,18 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
uploadID := r.URL.Query().Get("uploadId") uploadID := r.URL.Query().Get("uploadId")
exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true) exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
if !exists { if !exists {
s3err.WriteErrorResponse(w, s3err.ErrNoSuchUpload, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
return return
} }
partIDString := r.URL.Query().Get("partNumber") partIDString := r.URL.Query().Get("partNumber")
partID, err := strconv.Atoi(partIDString) partID, err := strconv.Atoi(partIDString)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInvalidPart, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
return return
} }
if partID > globalMaxPartID { if partID > globalMaxPartID {
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxParts, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
return return
} }
@ -208,7 +208,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
_, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
} }
if s3ErrCode != s3err.ErrNone { if s3ErrCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, s3ErrCode, r)
s3err.WriteErrorResponse(w, r, s3ErrCode)
return return
} }
} }
@ -225,13 +225,13 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, errCode, r)
s3err.WriteErrorResponse(w, r, errCode)
return return
} }
setEtag(w, etag) setEtag(w, etag)
writeSuccessResponseEmpty(w)
writeSuccessResponseEmpty(w, r)
} }

24
weed/s3api/s3api_object_tagging_handlers.go

@ -26,15 +26,15 @@ func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.R
if err != nil { if err != nil {
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
} else { } else {
glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
} }
return return
} }
writeSuccessResponseXML(w, FromTags(tags))
writeSuccessResponseXML(w, r, FromTags(tags))
} }
@ -52,29 +52,29 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
input, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength)) input, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
if err != nil { if err != nil {
glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err) glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
if err = xml.Unmarshal(input, tagging); err != nil { if err = xml.Unmarshal(input, tagging); err != nil {
glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err) glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrMalformedXML, r)
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
return return
} }
tags := tagging.ToTags() tags := tagging.ToTags()
if len(tags) > 10 { if len(tags) > 10 {
glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags)) glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags))
s3err.WriteErrorResponse(w, s3err.ErrInvalidTag, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
return return
} }
for k, v := range tags { for k, v := range tags {
if len(k) > 128 { if len(k) > 128 {
glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k) glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k)
s3err.WriteErrorResponse(w, s3err.ErrInvalidTag, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
return return
} }
if len(v) > 256 { if len(v) > 256 {
glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v) glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v)
s3err.WriteErrorResponse(w, s3err.ErrInvalidTag, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
return return
} }
} }
@ -82,10 +82,10 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil { if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil {
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
} else { } else {
glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
} }
return return
} }
@ -108,10 +108,10 @@ func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *htt
if err != nil { if err != nil {
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
} else { } else {
glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
} }
return return
} }

24
weed/s3api/s3api_objects_list_handlers.go

@ -45,11 +45,11 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query()) originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
if maxKeys < 0 { if maxKeys < 0 {
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxKeys, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
return return
} }
if delimiter != "" && delimiter != "/" { if delimiter != "" && delimiter != "/" {
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
return return
} }
@ -61,13 +61,13 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter) response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
if len(response.Contents) == 0 { if len(response.Contents) == 0 {
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists { if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
s3err.WriteErrorResponse(w, s3err.ErrNoSuchBucket, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
return return
} }
} }
@ -87,7 +87,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
StartAfter: startAfter, StartAfter: startAfter,
} }
writeSuccessResponseXML(w, responseV2)
writeSuccessResponseXML(w, r, responseV2)
} }
func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
@ -101,29 +101,29 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
if maxKeys < 0 { if maxKeys < 0 {
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxKeys, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
return return
} }
if delimiter != "" && delimiter != "/" { if delimiter != "" && delimiter != "/" {
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
return return
} }
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter) response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
if len(response.Contents) == 0 { if len(response.Contents) == 0 {
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists { if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
s3err.WriteErrorResponse(w, s3err.ErrNoSuchBucket, r)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
return return
} }
} }
writeSuccessResponseXML(w, response)
writeSuccessResponseXML(w, r, response)
} }
func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) { func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) {
@ -220,12 +220,16 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
err = subErr err = subErr
return return
} }
counter += subCounter
isTruncated = isTruncated || subIsTruncated isTruncated = isTruncated || subIsTruncated
maxKeys -= subCounter maxKeys -= subCounter
nextMarker = subDir + "/" + subNextMarker nextMarker = subDir + "/" + subNextMarker
// finished processing this sub directory // finished processing this sub directory
marker = subDir marker = subDir
} }
if maxKeys <= 0 {
return
}
// now marker is also a direct child of dir // now marker is also a direct child of dir
request := &filer_pb.ListEntriesRequest{ request := &filer_pb.ListEntriesRequest{

4
weed/s3api/s3api_server.go

@ -66,7 +66,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
// HeadObject // HeadObject
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET")) bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET"))
// HeadBucket // HeadBucket
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN), "GET"))
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_READ), "GET"))
// CopyObjectPart // CopyObjectPart
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
@ -137,7 +137,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST")) bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
// PutBucket // PutBucket
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN), "PUT"))
bucket.Methods("PUT").HandlerFunc(track(s3a.PutBucketHandler, "PUT"))
// DeleteBucket // DeleteBucket
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE")) bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE"))

2
weed/s3api/s3api_status_handlers.go

@ -4,5 +4,5 @@ import "net/http"
func (s3a *S3ApiServer) StatusHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) StatusHandler(w http.ResponseWriter, r *http.Request) {
// write out the response code and content type header // write out the response code and content type header
writeSuccessResponseEmpty(w)
writeSuccessResponseEmpty(w, r)
} }

24
weed/s3api/s3err/error_handler.go

@ -19,15 +19,15 @@ const (
MimeXML mimeType = "application/xml" MimeXML mimeType = "application/xml"
) )
func WriteXMLResponse(w http.ResponseWriter, statusCode int, response interface{}) {
WriteResponse(w, statusCode, EncodeXMLResponse(response), MimeXML)
func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) {
WriteResponse(w, r, statusCode, EncodeXMLResponse(response), MimeXML)
} }
func WriteEmptyResponse(w http.ResponseWriter, statusCode int) {
WriteResponse(w, statusCode, []byte{}, mimeNone)
func WriteEmptyResponse(w http.ResponseWriter, r *http.Request, statusCode int) {
WriteResponse(w, r, statusCode, []byte{}, mimeNone)
} }
func WriteErrorResponse(w http.ResponseWriter, errorCode ErrorCode, r *http.Request) {
func WriteErrorResponse(w http.ResponseWriter, r *http.Request, errorCode ErrorCode) {
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
@ -38,7 +38,7 @@ func WriteErrorResponse(w http.ResponseWriter, errorCode ErrorCode, r *http.Requ
apiError := GetAPIError(errorCode) apiError := GetAPIError(errorCode)
errorResponse := getRESTErrorResponse(apiError, r.URL.Path, bucket, object) errorResponse := getRESTErrorResponse(apiError, r.URL.Path, bucket, object)
encodedErrorResponse := EncodeXMLResponse(errorResponse) encodedErrorResponse := EncodeXMLResponse(errorResponse)
WriteResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, MimeXML)
WriteResponse(w, r, apiError.HTTPStatusCode, encodedErrorResponse, MimeXML)
} }
func getRESTErrorResponse(err APIError, resource string, bucket, object string) RESTErrorResponse { func getRESTErrorResponse(err APIError, resource string, bucket, object string) RESTErrorResponse {
@ -61,13 +61,17 @@ func EncodeXMLResponse(response interface{}) []byte {
return bytesBuffer.Bytes() return bytesBuffer.Bytes()
} }
func setCommonHeaders(w http.ResponseWriter) {
func setCommonHeaders(w http.ResponseWriter, r *http.Request) {
w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
w.Header().Set("Accept-Ranges", "bytes") w.Header().Set("Accept-Ranges", "bytes")
if r.Header.Get("Origin") != "" {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
}
} }
func WriteResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
setCommonHeaders(w)
func WriteResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) {
setCommonHeaders(w, r)
if response != nil { if response != nil {
w.Header().Set("Content-Length", strconv.Itoa(len(response))) w.Header().Set("Content-Length", strconv.Itoa(len(response)))
} }
@ -88,5 +92,5 @@ func WriteResponse(w http.ResponseWriter, statusCode int, response []byte, mType
// If none of the http routes match respond with MethodNotAllowed // If none of the http routes match respond with MethodNotAllowed
func NotFoundHandler(w http.ResponseWriter, r *http.Request) { func NotFoundHandler(w http.ResponseWriter, r *http.Request) {
glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI) glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
WriteErrorResponse(w, ErrMethodNotAllowed, r)
WriteErrorResponse(w, r, ErrMethodNotAllowed)
} }

3
weed/s3api/stats.go

@ -2,7 +2,6 @@ package s3api
import ( import (
stats_collect "github.com/chrislusf/seaweedfs/weed/stats" stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
"net/http" "net/http"
"strconv" "strconv"
"time" "time"
@ -28,7 +27,7 @@ func (r *StatusRecorder) Flush() {
func track(f http.HandlerFunc, action string) http.HandlerFunc { func track(f http.HandlerFunc, action string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS S3 "+util.VERSION)
w.Header().Set("Server", "SeaweedFS S3")
recorder := NewStatusResponseWriter(w) recorder := NewStatusResponseWriter(w)
start := time.Now() start := time.Now()
f(recorder, r) f(recorder, r)

4
weed/server/filer_grpc_server_remote.go

@ -17,7 +17,7 @@ import (
"time" "time"
) )
func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.DownloadToLocalRequest) (*filer_pb.DownloadToLocalResponse, error) {
func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req *filer_pb.CacheRemoteObjectToLocalClusterRequest) (*filer_pb.CacheRemoteObjectToLocalClusterResponse, error) {
// load all mappings // load all mappings
mappingEntry, err := fs.filer.FindEntry(ctx, util.JoinPath(filer.DirectoryEtcRemote, filer.REMOTE_STORAGE_MOUNT_FILE)) mappingEntry, err := fs.filer.FindEntry(ctx, util.JoinPath(filer.DirectoryEtcRemote, filer.REMOTE_STORAGE_MOUNT_FILE))
@ -57,7 +57,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
return nil, err return nil, err
} }
resp := &filer_pb.DownloadToLocalResponse{}
resp := &filer_pb.CacheRemoteObjectToLocalClusterResponse{}
if entry.Remote == nil || entry.Remote.RemoteSize == 0 { if entry.Remote == nil || entry.Remote.RemoteSize == 0 {
return resp, nil return resp, nil
} }

2
weed/server/filer_grpc_server_rename.go

@ -161,7 +161,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee
if err := stream.Send(&filer_pb.StreamRenameEntryResponse{ if err := stream.Send(&filer_pb.StreamRenameEntryResponse{
Directory: string(newParent), Directory: string(newParent),
EventNotification: &filer_pb.EventNotification{ EventNotification: &filer_pb.EventNotification{
OldEntry: &filer_pb.Entry{
OldEntry: &filer_pb.Entry{
Name: entry.Name(), Name: entry.Name(),
}, },
NewEntry: newEntry.ToProtoEntry(), NewEntry: newEntry.ToProtoEntry(),

5
weed/server/filer_server.go

@ -61,7 +61,6 @@ type FilerOption struct {
recursiveDelete bool recursiveDelete bool
Cipher bool Cipher bool
SaveToFilerLimit int64 SaveToFilerLimit int64
Filers []pb.ServerAddress
ConcurrentUploadLimit int64 ConcurrentUploadLimit int64
} }
@ -108,7 +107,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
fs.checkWithMaster() fs.checkWithMaster()
go stats.LoopPushingMetric("filer", string(fs.option.Host), fs.metricsAddress, fs.metricsIntervalSec) go stats.LoopPushingMetric("filer", string(fs.option.Host), fs.metricsAddress, fs.metricsIntervalSec)
go fs.filer.KeepConnectedToMaster()
go fs.filer.KeepMasterClientConnected()
v := util.GetViper() v := util.GetViper()
if !util.LoadConfiguration("filer", false) { if !util.LoadConfiguration("filer", false) {
@ -143,7 +142,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) readonlyMux.HandleFunc("/", fs.readonlyFilerHandler)
} }
fs.filer.AggregateFromPeers(option.Host, option.Filers)
fs.filer.AggregateFromPeers(option.Host)
fs.filer.LoadBuckets() fs.filer.LoadBuckets()

4
weed/server/filer_server_handlers_read.go

@ -169,11 +169,11 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
chunks := entry.Chunks chunks := entry.Chunks
if entry.IsInRemoteOnly() { if entry.IsInRemoteOnly() {
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
if resp, err := fs.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
if resp, err := fs.CacheRemoteObjectToLocalCluster(context.Background(), &filer_pb.CacheRemoteObjectToLocalClusterRequest{
Directory: dir, Directory: dir,
Name: name, Name: name,
}); err != nil { }); err != nil {
glog.Errorf("DownloadToLocal %s: %v", entry.FullPath, err)
glog.Errorf("CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err)
return fmt.Errorf("cache %s: %v", entry.FullPath, err) return fmt.Errorf("cache %s: %v", entry.FullPath, err)
} else { } else {
chunks = resp.Entry.Chunks chunks = resp.Entry.Chunks

59
weed/server/master_grpc_server.go

@ -6,7 +6,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"net" "net"
"strings"
"time" "time"
"github.com/chrislusf/raft" "github.com/chrislusf/raft"
@ -45,11 +44,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
} }
if len(message.DeletedVids) > 0 { if len(message.DeletedVids) > 0 {
ms.clientChansLock.RLock()
for _, ch := range ms.clientChans {
ch <- message
}
ms.clientChansLock.RUnlock()
ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: message})
} }
} }
}() }()
@ -154,12 +149,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
} }
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 { if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {
ms.clientChansLock.RLock()
for host, ch := range ms.clientChans {
glog.V(0).Infof("master send to %s: %s", host, message.String())
ch <- message
}
ms.clientChansLock.RUnlock()
ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: message})
} }
// tell the volume servers about the leader // tell the volume servers about the leader
@ -195,12 +185,20 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
// buffer by 1 so we don't end up getting stuck writing to stopChan forever // buffer by 1 so we don't end up getting stuck writing to stopChan forever
stopChan := make(chan bool, 1) stopChan := make(chan bool, 1)
clientName, messageChan := ms.addClient(req.Name, peerAddress)
clientName, messageChan := ms.addClient(req.ClientType, peerAddress)
for _, update := range ms.Cluster.AddClusterNode(req.ClientType, peerAddress, req.Version) {
ms.broadcastToClients(update)
}
defer ms.deleteClient(clientName)
defer func() {
for _, update := range ms.Cluster.RemoveClusterNode(req.ClientType, peerAddress) {
ms.broadcastToClients(update)
}
ms.deleteClient(clientName)
}()
for _, message := range ms.Topo.ToVolumeLocations() { for _, message := range ms.Topo.ToVolumeLocations() {
if sendErr := stream.Send(message); sendErr != nil {
if sendErr := stream.Send(&master_pb.KeepConnectedResponse{VolumeLocation: message}); sendErr != nil {
return sendErr return sendErr
} }
} }
@ -235,21 +233,31 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
} }
func (ms *MasterServer) broadcastToClients(message *master_pb.KeepConnectedResponse) {
ms.clientChansLock.RLock()
for _, ch := range ms.clientChans {
ch <- message
}
ms.clientChansLock.RUnlock()
}
func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error { func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {
leader, err := ms.Topo.Leader() leader, err := ms.Topo.Leader()
if err != nil { if err != nil {
glog.Errorf("topo leader: %v", err) glog.Errorf("topo leader: %v", err)
return raft.NotLeaderError return raft.NotLeaderError
} }
if err := stream.Send(&master_pb.VolumeLocation{
Leader: string(leader),
if err := stream.Send(&master_pb.KeepConnectedResponse{
VolumeLocation: &master_pb.VolumeLocation{
Leader: string(leader),
},
}); err != nil { }); err != nil {
return err return err
} }
return nil return nil
} }
func (ms *MasterServer) addClient(clientType string, clientAddress pb.ServerAddress) (clientName string, messageChan chan *master_pb.VolumeLocation) {
func (ms *MasterServer) addClient(clientType string, clientAddress pb.ServerAddress) (clientName string, messageChan chan *master_pb.KeepConnectedResponse) {
clientName = clientType + "@" + string(clientAddress) clientName = clientType + "@" + string(clientAddress)
glog.V(0).Infof("+ client %v", clientName) glog.V(0).Infof("+ client %v", clientName)
@ -258,7 +266,7 @@ func (ms *MasterServer) addClient(clientType string, clientAddress pb.ServerAddr
// trying to send to it in SendHeartbeat and so we can't lock the // trying to send to it in SendHeartbeat and so we can't lock the
// clientChansLock to remove the channel and we're stuck writing to it // clientChansLock to remove the channel and we're stuck writing to it
// 100 is probably overkill // 100 is probably overkill
messageChan = make(chan *master_pb.VolumeLocation, 100)
messageChan = make(chan *master_pb.KeepConnectedResponse, 100)
ms.clientChansLock.Lock() ms.clientChansLock.Lock()
ms.clientChans[clientName] = messageChan ms.clientChans[clientName] = messageChan
@ -295,19 +303,6 @@ func findClientAddress(ctx context.Context, grpcPort uint32) string {
} }
func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) {
resp := &master_pb.ListMasterClientsResponse{}
ms.clientChansLock.RLock()
defer ms.clientChansLock.RUnlock()
for k := range ms.clientChans {
if strings.HasPrefix(k, req.ClientType+"@") {
resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:])
}
}
return resp, nil
}
func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) { func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) {
// tell the volume servers about the leader // tell the volume servers about the leader

21
weed/server/master_grpc_server_cluster.go

@ -0,0 +1,21 @@
package weed_server
import (
"context"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
)
func (ms *MasterServer) ListClusterNodes(ctx context.Context, req *master_pb.ListClusterNodesRequest) (*master_pb.ListClusterNodesResponse, error) {
resp := &master_pb.ListClusterNodesResponse{}
clusterNodes := ms.Cluster.ListClusterNode(req.ClientType)
for _, node := range clusterNodes {
resp.ClusterNodes = append(resp.ClusterNodes, &master_pb.ListClusterNodesResponse_ClusterNode{
Address: string(node.Address),
Version: node.Version,
IsLeader: ms.Cluster.IsOneLeader(node.Address),
})
}
return resp, nil
}

10
weed/server/master_server.go

@ -2,6 +2,7 @@ package weed_server
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/cluster"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"net/http" "net/http"
"net/http/httputil" "net/http/httputil"
@ -60,13 +61,15 @@ type MasterServer struct {
// notifying clients // notifying clients
clientChansLock sync.RWMutex clientChansLock sync.RWMutex
clientChans map[string]chan *master_pb.VolumeLocation
clientChans map[string]chan *master_pb.KeepConnectedResponse
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
MasterClient *wdclient.MasterClient MasterClient *wdclient.MasterClient
adminLocks *AdminLocks adminLocks *AdminLocks
Cluster *cluster.Cluster
} }
func NewMasterServer(r *mux.Router, option *MasterOption, peers []pb.ServerAddress) *MasterServer { func NewMasterServer(r *mux.Router, option *MasterOption, peers []pb.ServerAddress) *MasterServer {
@ -99,10 +102,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []pb.ServerAddre
option: option, option: option,
preallocateSize: preallocateSize, preallocateSize: preallocateSize,
vgCh: make(chan *topology.VolumeGrowRequest, 1<<6), vgCh: make(chan *topology.VolumeGrowRequest, 1<<6),
clientChans: make(map[string]chan *master_pb.VolumeLocation),
clientChans: make(map[string]chan *master_pb.KeepConnectedResponse),
grpcDialOption: grpcDialOption, grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Master, "", peers),
MasterClient: wdclient.NewMasterClient(grpcDialOption, cluster.MasterType, option.Master, "", peers),
adminLocks: NewAdminLocks(), adminLocks: NewAdminLocks(),
Cluster: cluster.NewCluster(),
} }
ms.boundedLeaderChan = make(chan int, 16) ms.boundedLeaderChan = make(chan int, 16)

2
weed/server/volume_grpc_copy.go

@ -80,7 +80,7 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre
// println("source:", volFileInfoResp.String()) // println("source:", volFileInfoResp.String())
copyResponse := &volume_server_pb.VolumeCopyResponse{} copyResponse := &volume_server_pb.VolumeCopyResponse{}
reportInterval := int64(1024*1024*128)
reportInterval := int64(1024 * 1024 * 128)
nextReportTarget := reportInterval nextReportTarget := reportInterval
var modifiedTsNs int64 var modifiedTsNs int64
var sendErr error var sendErr error

7
weed/server/volume_grpc_tier_upload.go

@ -62,13 +62,8 @@ func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTi
}) })
} }
// remember the file original source
attributes := make(map[string]string)
attributes["volumeId"] = v.Id.String()
attributes["collection"] = v.Collection
attributes["ext"] = ".dat"
// copy the data file // copy the data file
key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn)
key, size, err := backendStorage.CopyFile(diskFile.File, fn)
if err != nil { if err != nil {
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err) return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err)
} }

2
weed/server/volume_grpc_vacuum.go

@ -27,7 +27,7 @@ func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_serve
func (vs *VolumeServer) VacuumVolumeCompact(req *volume_server_pb.VacuumVolumeCompactRequest, stream volume_server_pb.VolumeServer_VacuumVolumeCompactServer) error { func (vs *VolumeServer) VacuumVolumeCompact(req *volume_server_pb.VacuumVolumeCompactRequest, stream volume_server_pb.VolumeServer_VacuumVolumeCompactServer) error {
resp := &volume_server_pb.VacuumVolumeCompactResponse{} resp := &volume_server_pb.VacuumVolumeCompactResponse{}
reportInterval := int64(1024*1024*128)
reportInterval := int64(1024 * 1024 * 128)
nextReportTarget := reportInterval nextReportTarget := reportInterval
var sendErr error var sendErr error

55
weed/shell/command_cluster_ps.go

@ -0,0 +1,55 @@
package shell
import (
"context"
"flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/cluster"
"io"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
)
func init() {
Commands = append(Commands, &commandClusterPs{})
}
type commandClusterPs struct {
}
func (c *commandClusterPs) Name() string {
return "cluster.ps"
}
func (c *commandClusterPs) Help() string {
return `check current cluster process status
cluster.ps
`
}
func (c *commandClusterPs) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
clusterPsCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if err = clusterPsCommand.Parse(args); err != nil {
return nil
}
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
ClientType: cluster.FilerType,
})
fmt.Fprintf(writer, "the cluster has %d filers\n", len(resp.ClusterNodes))
for _, node := range resp.ClusterNodes {
fmt.Fprintf(writer, " * %s (%v)\n", node.Address, node.Version)
}
return err
})
if err != nil {
return
}
return nil
}

5
weed/shell/command_ec_encode.go

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"io" "io"
"math/rand"
"sync" "sync"
"time" "time"
@ -33,7 +34,7 @@ func (c *commandEcEncode) Name() string {
func (c *commandEcEncode) Help() string { func (c *commandEcEncode) Help() string {
return `apply erasure coding to a volume return `apply erasure coding to a volume
ec.encode [-collection=""] [-fullPercent=95] [-quietFor=1h]
ec.encode [-collection=""] [-fullPercent=95 -quietFor=1h]
ec.encode [-collection=""] [-volumeId=<volume_id>] ec.encode [-collection=""] [-volumeId=<volume_id>]
This command will: This command will:
@ -248,7 +249,7 @@ func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServer
func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) { func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) {
allocated = make([][]uint32, len(servers)) allocated = make([][]uint32, len(servers))
allocatedShardIdIndex := uint32(0) allocatedShardIdIndex := uint32(0)
serverIndex := 0
serverIndex := rand.Intn(len(servers))
for allocatedShardIdIndex < erasure_coding.TotalShardsCount { for allocatedShardIdIndex < erasure_coding.TotalShardsCount {
if servers[serverIndex].freeEcSlot > 0 { if servers[serverIndex].freeEcSlot > 0 {
allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex) allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex)

8
weed/shell/command_remote_cache.go

@ -26,7 +26,7 @@ func (c *commandRemoteCache) Help() string {
return `cache the file content for mounted directories or files return `cache the file content for mounted directories or files
# assume a remote storage is configured to name "cloud1" # assume a remote storage is configured to name "cloud1"
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
# mount and pull one bucket # mount and pull one bucket
remote.mount -dir=/xxx -remote=cloud1/bucket remote.mount -dir=/xxx -remote=cloud1/bucket
@ -163,10 +163,10 @@ func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.
remoteLocation := filer.MapFullPathToRemoteStorageLocation(localMountedDir, remoteMountedLocation, dir.Child(entry.Name)) remoteLocation := filer.MapFullPathToRemoteStorageLocation(localMountedDir, remoteMountedLocation, dir.Child(entry.Name))
if err := filer.DownloadToLocal(commandEnv, remoteConf, remoteLocation, dir, entry); err != nil {
fmt.Fprintf(writer, "DownloadToLocal %+v: %v\n", remoteLocation, err)
if err := filer.CacheRemoteObjectToLocalCluster(commandEnv, remoteConf, remoteLocation, dir, entry); err != nil {
fmt.Fprintf(writer, "CacheRemoteObjectToLocalCluster %+v: %v\n", remoteLocation, err)
if executionErr == nil { if executionErr == nil {
executionErr = fmt.Errorf("DownloadToLocal %+v: %v\n", remoteLocation, err)
executionErr = fmt.Errorf("CacheRemoteObjectToLocalCluster %+v: %v\n", remoteLocation, err)
} }
return return
} }

2
weed/shell/command_remote_meta_sync.go

@ -27,7 +27,7 @@ func (c *commandRemoteMetaSync) Help() string {
return `synchronize the local file meta data with the remote file metadata return `synchronize the local file meta data with the remote file metadata
# assume a remote storage is configured to name "cloud1" # assume a remote storage is configured to name "cloud1"
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
# mount and pull one bucket # mount and pull one bucket
remote.mount -dir=/xxx -remote=cloud1/bucket remote.mount -dir=/xxx -remote=cloud1/bucket

2
weed/shell/command_remote_mount.go

@ -32,7 +32,7 @@ func (c *commandRemoteMount) Help() string {
return `mount remote storage and pull its metadata return `mount remote storage and pull its metadata
# assume a remote storage is configured to name "cloud1" # assume a remote storage is configured to name "cloud1"
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
# mount and pull one bucket # mount and pull one bucket
remote.mount -dir=/xxx -remote=cloud1/bucket remote.mount -dir=/xxx -remote=cloud1/bucket

2
weed/shell/command_remote_mount_buckets.go

@ -27,7 +27,7 @@ func (c *commandRemoteMountBuckets) Help() string {
return `mount all buckets in remote storage and pull its metadata return `mount all buckets in remote storage and pull its metadata
# assume a remote storage is configured to name "cloud1" # assume a remote storage is configured to name "cloud1"
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
# mount all buckets # mount all buckets
remote.mount.buckets -remote=cloud1 remote.mount.buckets -remote=cloud1

2
weed/shell/command_remote_unmount.go

@ -27,7 +27,7 @@ func (c *commandRemoteUnmount) Help() string {
return `unmount remote storage return `unmount remote storage
# assume a remote storage is configured to name "s3_1" # assume a remote storage is configured to name "s3_1"
remote.configure -name=s3_1 -type=s3 -access_key=xxx -secret_key=yyy
remote.configure -name=s3_1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
# mount and pull one bucket # mount and pull one bucket
remote.mount -dir=/xxx -remote=s3_1/bucket remote.mount -dir=/xxx -remote=s3_1/bucket

23
weed/shell/command_volume_fix_replication_test.go

@ -261,6 +261,29 @@ func TestSatisfyReplicaPlacement00x(t *testing.T) {
} }
func TestSatisfyReplicaPlacement100(t *testing.T) {
var tests = []testcase{
{
name: "test 100",
replication: "100",
replicas: []*VolumeReplica{
{
location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
},
{
location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
},
},
possibleLocation: location{"dc2", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
expected: true,
},
}
runTests(tests, t)
}
func runTests(tests []testcase, t *testing.T) { func runTests(tests []testcase, t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication) replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)

27
weed/shell/shell_liner.go

@ -3,9 +3,13 @@ package shell
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/cluster"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util/grace" "github.com/chrislusf/seaweedfs/weed/util/grace"
"io" "io"
"math/rand"
"os" "os"
"path" "path"
"regexp" "regexp"
@ -47,6 +51,29 @@ func RunShell(options ShellOptions) {
go commandEnv.MasterClient.KeepConnectedToMaster() go commandEnv.MasterClient.KeepConnectedToMaster()
commandEnv.MasterClient.WaitUntilConnected() commandEnv.MasterClient.WaitUntilConnected()
if commandEnv.option.FilerAddress == "" {
var filers []pb.ServerAddress
commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
ClientType: cluster.FilerType,
})
if err != nil {
return err
}
for _, clusterNode := range resp.ClusterNodes {
filers = append(filers, pb.ServerAddress(clusterNode.Address))
}
return nil
})
fmt.Printf("master: %s ", *options.Masters)
if len(filers) > 0 {
fmt.Printf("filers: %v", filers)
commandEnv.option.FilerAddress = filers[rand.Intn(len(filers))]
}
fmt.Println()
}
if commandEnv.option.FilerAddress != "" { if commandEnv.option.FilerAddress != "" {
commandEnv.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error { commandEnv.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})

2
weed/storage/backend/backend.go

@ -25,7 +25,7 @@ type BackendStorageFile interface {
type BackendStorage interface { type BackendStorage interface {
ToProperties() map[string]string ToProperties() map[string]string
NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile
CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error)
DeleteFile(key string) (err error) DeleteFile(key string) (err error)
} }

4
weed/storage/backend/s3_backend/s3_backend.go

@ -79,13 +79,13 @@ func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb
return f return f
} }
func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
randomUuid, _ := uuid.NewRandom() randomUuid, _ := uuid.NewRandom()
key = randomUuid.String() key = randomUuid.String()
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn)
return return
} }

5
weed/storage/backend/s3_backend/s3_sessions.go

@ -2,6 +2,8 @@ package s3_backend
import ( import (
"fmt" "fmt"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/chrislusf/seaweedfs/weed/util"
"sync" "sync"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
@ -47,6 +49,9 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string)
if err != nil { if err != nil {
return nil, fmt.Errorf("create aws session in region %s: %v", region, err) return nil, fmt.Errorf("create aws session in region %s: %v", region, err)
} }
sess.Handlers.Build.PushBack(func(r *request.Request) {
r.HTTPRequest.Header.Set("User-Agent", "SeaweedFS/"+util.VERSION_NUMBER)
})
t := s3.New(sess) t := s3.New(sess)

24
weed/storage/backend/s3_backend/s3_upload.go

@ -12,9 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
) )
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string,
attributes map[string]string,
fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
//open the file //open the file
f, err := os.Open(filename) f, err := os.Open(filename)
@ -48,25 +46,13 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
fn: fn, fn: fn,
} }
// process tagging
tags := ""
for k, v := range attributes {
if len(tags) > 0 {
tags = tags + "&"
}
tags = tags + k + "=" + v
}
// Upload the file to S3. // Upload the file to S3.
var result *s3manager.UploadOutput var result *s3manager.UploadOutput
result, err = uploader.Upload(&s3manager.UploadInput{ result, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
Body: fileReader,
ACL: aws.String("private"),
ServerSideEncryption: aws.String("AES256"),
StorageClass: aws.String("STANDARD_IA"),
Tagging: aws.String(tags),
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
Body: fileReader,
StorageClass: aws.String("STANDARD_IA"),
}) })
//in case it fails to upload //in case it fails to upload

2
weed/util/constants.go

@ -5,7 +5,7 @@ import (
) )
var ( var (
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.75)
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.77)
VERSION = sizeLimit + " " + VERSION_NUMBER VERSION = sizeLimit + " " + VERSION_NUMBER
COMMIT = "" COMMIT = ""
) )

65
weed/wdclient/masterclient.go

@ -21,6 +21,8 @@ type MasterClient struct {
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
vidMap vidMap
OnPeerUpdate func(update *master_pb.ClusterNodeUpdate)
} }
func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientHost pb.ServerAddress, clientDataCenter string, masters []pb.ServerAddress) *MasterClient { func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientHost pb.ServerAddress, clientDataCenter string, masters []pb.ServerAddress) *MasterClient {
@ -93,7 +95,7 @@ func (mc *MasterClient) tryAllMasters() {
} }
func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedLeader pb.ServerAddress) { func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedLeader pb.ServerAddress) {
glog.V(0).Infof("%s masterClient Connecting to master %v", mc.clientType, master)
glog.V(1).Infof("%s masterClient Connecting to master %v", mc.clientType, master)
gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -105,7 +107,11 @@ func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedL
return err return err
} }
if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.clientType, ClientAddress: string(mc.clientHost)}); err != nil {
if err = stream.Send(&master_pb.KeepConnectedRequest{
ClientType: mc.clientType,
ClientAddress: string(mc.clientHost),
Version: util.Version(),
}); err != nil {
glog.V(0).Infof("%s masterClient failed to send to %s: %v", mc.clientType, master, err) glog.V(0).Infof("%s masterClient failed to send to %s: %v", mc.clientType, master, err)
return err return err
} }
@ -114,34 +120,49 @@ func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedL
mc.currentMaster = master mc.currentMaster = master
for { for {
volumeLocation, err := stream.Recv()
resp, err := stream.Recv()
if err != nil { if err != nil {
glog.V(0).Infof("%s masterClient failed to receive from %s: %v", mc.clientType, master, err) glog.V(0).Infof("%s masterClient failed to receive from %s: %v", mc.clientType, master, err)
return err return err
} }
// maybe the leader is changed
if volumeLocation.Leader != "" {
glog.V(0).Infof("redirected to leader %v", volumeLocation.Leader)
nextHintedLeader = pb.ServerAddress(volumeLocation.Leader)
return nil
if resp.VolumeLocation != nil {
// maybe the leader is changed
if resp.VolumeLocation.Leader != "" {
glog.V(0).Infof("redirected to leader %v", resp.VolumeLocation.Leader)
nextHintedLeader = pb.ServerAddress(resp.VolumeLocation.Leader)
return nil
}
// process new volume location
loc := Location{
Url: resp.VolumeLocation.Url,
PublicUrl: resp.VolumeLocation.PublicUrl,
DataCenter: resp.VolumeLocation.DataCenter,
GrpcPort: int(resp.VolumeLocation.GrpcPort),
}
for _, newVid := range resp.VolumeLocation.NewVids {
glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid)
mc.addLocation(newVid, loc)
}
for _, deletedVid := range resp.VolumeLocation.DeletedVids {
glog.V(1).Infof("%s: %s masterClient removes volume %d", mc.clientType, loc.Url, deletedVid)
mc.deleteLocation(deletedVid, loc)
}
} }
// process new volume location
loc := Location{
Url: volumeLocation.Url,
PublicUrl: volumeLocation.PublicUrl,
DataCenter: volumeLocation.DataCenter,
GrpcPort: int(volumeLocation.GrpcPort),
}
for _, newVid := range volumeLocation.NewVids {
glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid)
mc.addLocation(newVid, loc)
}
for _, deletedVid := range volumeLocation.DeletedVids {
glog.V(1).Infof("%s: %s masterClient removes volume %d", mc.clientType, loc.Url, deletedVid)
mc.deleteLocation(deletedVid, loc)
if resp.ClusterNodeUpdate != nil {
update := resp.ClusterNodeUpdate
if mc.OnPeerUpdate != nil {
if update.IsAdd {
glog.V(0).Infof("+ %s %s leader:%v\n", update.NodeType, update.Address, update.IsLeader)
} else {
glog.V(0).Infof("- %s %s leader:%v\n", update.NodeType, update.Address, update.IsLeader)
}
mc.OnPeerUpdate(update)
}
} }
} }
}) })

Loading…
Cancel
Save