Browse Source

refactor: separating out remote.proto

pull/2283/head
Chris Lu 3 years ago
parent
commit
05a648bb96
  1. 49
      other/java/client/src/main/proto/filer.proto
  2. 7
      weed/command/filer_remote_sync.go
  3. 41
      weed/filer/filer_remote_storage.go
  4. 4
      weed/filer/filer_remote_storage_test.go
  5. 9
      weed/filer/read_remote.go
  6. 1
      weed/pb/Makefile
  7. 49
      weed/pb/filer.proto
  8. 905
      weed/pb/filer_pb/filer.pb.go
  9. 4
      weed/pb/filer_pb/filer_pb_helper.go
  10. 59
      weed/pb/remote.proto
  11. 619
      weed/pb/remote_pb/remote.pb.go
  12. 8
      weed/pb/remote_pb/remote_pb_helper.go
  13. 12
      weed/pb/volume_server.proto
  14. 1899
      weed/pb/volume_server_pb/volume_server.pb.go
  15. 19
      weed/remote_storage/azure/azure_storage_client.go
  16. 19
      weed/remote_storage/gcs/gcs_storage_client.go
  17. 25
      weed/remote_storage/remote_storage.go
  18. 4
      weed/remote_storage/s3/aliyun.go
  19. 4
      weed/remote_storage/s3/backblaze.go
  20. 4
      weed/remote_storage/s3/baidu.go
  21. 19
      weed/remote_storage/s3/s3_storage_client.go
  22. 4
      weed/remote_storage/s3/tencent.go
  23. 4
      weed/remote_storage/s3/wasabi.go
  24. 21
      weed/server/filer_grpc_server_remote.go
  25. 4
      weed/server/volume_grpc_erasure_coding.go
  26. 17
      weed/server/volume_grpc_remote.go
  27. 8
      weed/shell/command_remote_cache.go
  28. 7
      weed/shell/command_remote_configure.go
  29. 5
      weed/shell/command_remote_meta_sync.go
  30. 9
      weed/shell/command_remote_mount.go
  31. 4
      weed/shell/command_remote_uncache.go
  32. 3
      weed/shell/command_remote_unmount.go
  33. 6
      weed/storage/erasure_coding/ec_volume.go
  34. 6
      weed/storage/store.go
  35. 2
      weed/storage/volume_info/volume_info.go
  36. 6
      weed/storage/volume_tier.go

49
other/java/client/src/main/proto/filer.proto

@ -336,6 +336,7 @@ message KeepConnectedResponse {
message LocateBrokerRequest { message LocateBrokerRequest {
string resource = 1; string resource = 1;
} }
message LocateBrokerResponse { message LocateBrokerResponse {
bool found = 1; bool found = 1;
// if found, send the exact address // if found, send the exact address
@ -386,54 +387,6 @@ message FilerConf {
///////////////////////// /////////////////////////
// Remote Storage related // Remote Storage related
///////////////////////// /////////////////////////
message RemoteConf {
string type = 1;
string name = 2;
string s3_access_key = 4;
string s3_secret_key = 5;
string s3_region = 6;
string s3_endpoint = 7;
string s3_storage_class = 8;
bool s3_force_path_style = 9;
string gcs_google_application_credentials = 10;
string azure_account_name = 15;
string azure_account_key = 16;
string backblaze_key_id = 20;
string backblaze_application_key = 21;
string backblaze_endpoint = 22;
string aliyun_access_key = 25;
string aliyun_secret_key = 26;
string aliyun_endpoint = 27;
string aliyun_region = 28;
string tencent_secret_id = 30;
string tencent_secret_key = 31;
string tencent_endpoint = 32;
string tencent_region = 33;
string baidu_access_key = 35;
string baidu_secret_key = 36;
string baidu_endpoint = 37;
string baidu_region = 38;
string wasabi_access_key = 40;
string wasabi_secret_key = 41;
string wasabi_endpoint = 42;
string wasabi_region = 43;
}
message RemoteStorageMapping {
map<string,RemoteStorageLocation> mappings = 1;
}
message RemoteStorageLocation {
string name = 1;
string bucket = 2;
string path = 3;
}
message DownloadToLocalRequest { message DownloadToLocalRequest {
string directory = 1; string directory = 1;
string name = 2; string name = 2;

7
weed/command/filer_remote_sync.go

@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
@ -100,7 +101,7 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
return true return true
} }
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string, remoteStorage *filer_pb.RemoteConf, remoteStorageMountLocation *filer_pb.RemoteStorageLocation) error {
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string, remoteStorage *remote_pb.RemoteConf, remoteStorageMountLocation *remote_pb.RemoteStorageLocation) error {
dirHash := util.HashStringToLong(mountedDir) dirHash := util.HashStringToLong(mountedDir)
@ -206,10 +207,10 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
"filer.remote.sync", mountedDir, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false) "filer.remote.sync", mountedDir, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
} }
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *filer_pb.RemoteStorageLocation) *filer_pb.RemoteStorageLocation {
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *remote_pb.RemoteStorageLocation) *remote_pb.RemoteStorageLocation {
source := string(sourcePath[len(mountDir):]) source := string(sourcePath[len(mountDir):])
dest := util.FullPath(remoteMountLocation.Path).Child(source) dest := util.FullPath(remoteMountLocation.Path).Child(source)
return &filer_pb.RemoteStorageLocation{
return &remote_pb.RemoteStorageLocation{
Name: remoteMountLocation.Name, Name: remoteMountLocation.Name,
Bucket: remoteMountLocation.Bucket, Bucket: remoteMountLocation.Bucket,
Path: string(dest), Path: string(dest),

41
weed/filer/filer_remote_storage.go

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -21,13 +22,13 @@ const REMOTE_STORAGE_MOUNT_FILE = "mount.mapping"
type FilerRemoteStorage struct { type FilerRemoteStorage struct {
rules ptrie.Trie rules ptrie.Trie
storageNameToConf map[string]*filer_pb.RemoteConf
storageNameToConf map[string]*remote_pb.RemoteConf
} }
func NewFilerRemoteStorage() (rs *FilerRemoteStorage) { func NewFilerRemoteStorage() (rs *FilerRemoteStorage) {
rs = &FilerRemoteStorage{ rs = &FilerRemoteStorage{
rules: ptrie.New(), rules: ptrie.New(),
storageNameToConf: make(map[string]*filer_pb.RemoteConf),
storageNameToConf: make(map[string]*remote_pb.RemoteConf),
} }
return rs return rs
} }
@ -56,7 +57,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F
if !strings.HasSuffix(entry.Name(), REMOTE_STORAGE_CONF_SUFFIX) { if !strings.HasSuffix(entry.Name(), REMOTE_STORAGE_CONF_SUFFIX) {
return nil return nil
} }
conf := &filer_pb.RemoteConf{}
conf := &remote_pb.RemoteConf{}
if err := proto.Unmarshal(entry.Content, conf); err != nil { if err := proto.Unmarshal(entry.Content, conf); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, entry.Name(), err) return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, entry.Name(), err)
} }
@ -66,7 +67,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F
} }
func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err error) { func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err error) {
mappings := &filer_pb.RemoteStorageMapping{}
mappings := &remote_pb.RemoteStorageMapping{}
if err := proto.Unmarshal(data, mappings); err != nil { if err := proto.Unmarshal(data, mappings); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, err) return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, err)
} }
@ -76,23 +77,23 @@ func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err er
return nil return nil
} }
func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc *filer_pb.RemoteStorageLocation) {
func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc *remote_pb.RemoteStorageLocation) {
rs.rules.Put([]byte(dir+"/"), loc) rs.rules.Put([]byte(dir+"/"), loc)
} }
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *filer_pb.RemoteStorageLocation) {
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *remote_pb.RemoteStorageLocation) {
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool { rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
mountDir = util.FullPath(string(key[:len(key)-1])) mountDir = util.FullPath(string(key[:len(key)-1]))
remoteLocation = value.(*filer_pb.RemoteStorageLocation)
remoteLocation = value.(*remote_pb.RemoteStorageLocation)
return true return true
}) })
return return
} }
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
var storageLocation *filer_pb.RemoteStorageLocation
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *remote_pb.RemoteConf, found bool) {
var storageLocation *remote_pb.RemoteStorageLocation
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool { rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
storageLocation = value.(*filer_pb.RemoteStorageLocation)
storageLocation = value.(*remote_pb.RemoteStorageLocation)
return true return true
}) })
@ -104,7 +105,7 @@ func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client r
return rs.GetRemoteStorageClient(storageLocation.Name) return rs.GetRemoteStorageClient(storageLocation.Name)
} }
func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client remote_storage.RemoteStorageClient, remoteConf *remote_pb.RemoteConf, found bool) {
remoteConf, found = rs.storageNameToConf[storageName] remoteConf, found = rs.storageNameToConf[storageName]
if !found { if !found {
return return
@ -118,9 +119,9 @@ func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client
return return
} }
func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *filer_pb.RemoteStorageMapping, err error) {
mappings = &filer_pb.RemoteStorageMapping{
Mappings: make(map[string]*filer_pb.RemoteStorageLocation),
func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *remote_pb.RemoteStorageMapping, err error) {
mappings = &remote_pb.RemoteStorageMapping{
Mappings: make(map[string]*remote_pb.RemoteStorageLocation),
} }
if len(oldContent) > 0 { if len(oldContent) > 0 {
if err = proto.Unmarshal(oldContent, mappings); err != nil { if err = proto.Unmarshal(oldContent, mappings); err != nil {
@ -130,7 +131,7 @@ func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *filer_pb.Remot
return return
} }
func AddRemoteStorageMapping(oldContent []byte, dir string, storageLocation *filer_pb.RemoteStorageLocation) (newContent []byte, err error) {
func AddRemoteStorageMapping(oldContent []byte, dir string, storageLocation *remote_pb.RemoteStorageLocation) (newContent []byte, err error) {
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent) mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
if unmarshalErr != nil { if unmarshalErr != nil {
// skip // skip
@ -162,7 +163,7 @@ func RemoveRemoteStorageMapping(oldContent []byte, dir string) (newContent []byt
return return
} }
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *filer_pb.RemoteStorageMapping, readErr error) {
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *remote_pb.RemoteStorageMapping, readErr error) {
var oldContent []byte var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE) oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
@ -179,7 +180,7 @@ func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (map
return return
} }
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *filer_pb.RemoteConf, readErr error) {
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *remote_pb.RemoteConf, readErr error) {
var oldContent []byte var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX) oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX)
@ -189,7 +190,7 @@ func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string,
} }
// unmarshal storage configuration // unmarshal storage configuration
conf = &filer_pb.RemoteConf{}
conf = &remote_pb.RemoteConf{}
if unMarshalErr := proto.Unmarshal(oldContent, conf); unMarshalErr != nil { if unMarshalErr := proto.Unmarshal(oldContent, conf); unMarshalErr != nil {
readErr = fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr) readErr = fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
return return
@ -198,7 +199,7 @@ func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string,
return return
} }
func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress string, dir string) (*filer_pb.RemoteStorageMapping, string, *filer_pb.RemoteStorageLocation, *filer_pb.RemoteConf, error) {
func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress string, dir string) (*remote_pb.RemoteStorageMapping, string, *remote_pb.RemoteStorageLocation, *remote_pb.RemoteConf, error) {
mappings, listErr := ReadMountMappings(grpcDialOption, filerAddress) mappings, listErr := ReadMountMappings(grpcDialOption, filerAddress)
if listErr != nil { if listErr != nil {
@ -209,7 +210,7 @@ func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress string, dir st
} }
var localMountedDir string var localMountedDir string
var remoteStorageMountedLocation *filer_pb.RemoteStorageLocation
var remoteStorageMountedLocation *remote_pb.RemoteStorageLocation
for k, loc := range mappings.Mappings { for k, loc := range mappings.Mappings {
if strings.HasPrefix(dir, k) { if strings.HasPrefix(dir, k) {
localMountedDir, remoteStorageMountedLocation = k, loc localMountedDir, remoteStorageMountedLocation = k, loc

4
weed/filer/filer_remote_storage_test.go

@ -7,14 +7,14 @@ import (
) )
func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) { func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {
conf := &filer_pb.RemoteConf{
conf := &remote_pb.RemoteConf{
Name: "s7", Name: "s7",
Type: "s3", Type: "s3",
} }
rs := NewFilerRemoteStorage() rs := NewFilerRemoteStorage()
rs.storageNameToConf[conf.Name] = conf rs.storageNameToConf[conf.Name] = conf
rs.mapDirectoryToRemoteStorage("/a/b/c", &filer_pb.RemoteStorageLocation{
rs.mapDirectoryToRemoteStorage("/a/b/c", &remote_pb.RemoteStorageLocation{
Name: "s7", Name: "s7",
Bucket: "some", Bucket: "some",
Path: "/dir", Path: "/dir",

9
weed/filer/read_remote.go

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -24,8 +25,8 @@ func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data []byte,
return client.ReadFile(sourceLoc, offset, size) return client.ReadFile(sourceLoc, offset, size)
} }
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, fp util.FullPath) *filer_pb.RemoteStorageLocation {
remoteLocation := &filer_pb.RemoteStorageLocation{
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, fp util.FullPath) *remote_pb.RemoteStorageLocation {
remoteLocation := &remote_pb.RemoteStorageLocation{
Name: remoteMountedLocation.Name, Name: remoteMountedLocation.Name,
Bucket: remoteMountedLocation.Bucket, Bucket: remoteMountedLocation.Bucket,
Path: remoteMountedLocation.Path, Path: remoteMountedLocation.Path,
@ -34,11 +35,11 @@ func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMou
return remoteLocation return remoteLocation
} }
func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, remoteLocationPath string)(fp util.FullPath) {
func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, remoteLocationPath string)(fp util.FullPath) {
return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):]) return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):])
} }
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *filer_pb.RemoteConf, remoteLocation *filer_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *remote_pb.RemoteConf, remoteLocation *remote_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{ _, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
Directory: string(parent), Directory: string(parent),

1
weed/pb/Makefile

@ -6,6 +6,7 @@ gen:
protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative
protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative
protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative
protoc remote.proto --go_out=plugins=grpc:./remote_pb --go_opt=paths=source_relative
protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative
protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative
# protoc filer.proto --java_out=../../other/java/client/src/main/java # protoc filer.proto --java_out=../../other/java/client/src/main/java

49
weed/pb/filer.proto

@ -336,6 +336,7 @@ message KeepConnectedResponse {
message LocateBrokerRequest { message LocateBrokerRequest {
string resource = 1; string resource = 1;
} }
message LocateBrokerResponse { message LocateBrokerResponse {
bool found = 1; bool found = 1;
// if found, send the exact address // if found, send the exact address
@ -386,54 +387,6 @@ message FilerConf {
///////////////////////// /////////////////////////
// Remote Storage related // Remote Storage related
///////////////////////// /////////////////////////
message RemoteConf {
string type = 1;
string name = 2;
string s3_access_key = 4;
string s3_secret_key = 5;
string s3_region = 6;
string s3_endpoint = 7;
string s3_storage_class = 8;
bool s3_force_path_style = 9;
string gcs_google_application_credentials = 10;
string azure_account_name = 15;
string azure_account_key = 16;
string backblaze_key_id = 20;
string backblaze_application_key = 21;
string backblaze_endpoint = 22;
string aliyun_access_key = 25;
string aliyun_secret_key = 26;
string aliyun_endpoint = 27;
string aliyun_region = 28;
string tencent_secret_id = 30;
string tencent_secret_key = 31;
string tencent_endpoint = 32;
string tencent_region = 33;
string baidu_access_key = 35;
string baidu_secret_key = 36;
string baidu_endpoint = 37;
string baidu_region = 38;
string wasabi_access_key = 40;
string wasabi_secret_key = 41;
string wasabi_endpoint = 42;
string wasabi_region = 43;
}
message RemoteStorageMapping {
map<string,RemoteStorageLocation> mappings = 1;
}
message RemoteStorageLocation {
string name = 1;
string bucket = 2;
string path = 3;
}
message DownloadToLocalRequest { message DownloadToLocalRequest {
string directory = 1; string directory = 1;
string name = 2; string name = 2;

905
weed/pb/filer_pb/filer.pb.go
File diff suppressed because it is too large
View File

4
weed/pb/filer_pb/filer_pb_helper.go

@ -151,7 +151,3 @@ func (fp *FilerConf_PathConf) Key() interface{} {
key, _ := proto.Marshal(fp) key, _ := proto.Marshal(fp)
return string(key) return string(key)
} }
func (fp *RemoteStorageLocation) Key() interface{} {
key, _ := proto.Marshal(fp)
return string(key)
}

59
weed/pb/remote.proto

@ -0,0 +1,59 @@
syntax = "proto3";
package remote_pb;
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/remote_pb";
option java_package = "seaweedfs.client";
option java_outer_classname = "FilerProto";
/////////////////////////
// Remote Storage related
/////////////////////////
message RemoteConf {
string type = 1;
string name = 2;
string s3_access_key = 4;
string s3_secret_key = 5;
string s3_region = 6;
string s3_endpoint = 7;
string s3_storage_class = 8;
bool s3_force_path_style = 9;
string gcs_google_application_credentials = 10;
string azure_account_name = 15;
string azure_account_key = 16;
string backblaze_key_id = 20;
string backblaze_application_key = 21;
string backblaze_endpoint = 22;
string aliyun_access_key = 25;
string aliyun_secret_key = 26;
string aliyun_endpoint = 27;
string aliyun_region = 28;
string tencent_secret_id = 30;
string tencent_secret_key = 31;
string tencent_endpoint = 32;
string tencent_region = 33;
string baidu_access_key = 35;
string baidu_secret_key = 36;
string baidu_endpoint = 37;
string baidu_region = 38;
string wasabi_access_key = 40;
string wasabi_secret_key = 41;
string wasabi_endpoint = 42;
string wasabi_region = 43;
}
message RemoteStorageMapping {
map<string,RemoteStorageLocation> mappings = 1;
}
message RemoteStorageLocation {
string name = 1;
string bucket = 2;
string path = 3;
}

619
weed/pb/remote_pb/remote.pb.go

@ -0,0 +1,619 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// source: remote.proto
package remote_pb
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
/////////////////////////
// Remote Storage related
/////////////////////////
type RemoteConf struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
S3AccessKey string `protobuf:"bytes,4,opt,name=s3_access_key,json=s3AccessKey,proto3" json:"s3_access_key,omitempty"`
S3SecretKey string `protobuf:"bytes,5,opt,name=s3_secret_key,json=s3SecretKey,proto3" json:"s3_secret_key,omitempty"`
S3Region string `protobuf:"bytes,6,opt,name=s3_region,json=s3Region,proto3" json:"s3_region,omitempty"`
S3Endpoint string `protobuf:"bytes,7,opt,name=s3_endpoint,json=s3Endpoint,proto3" json:"s3_endpoint,omitempty"`
S3StorageClass string `protobuf:"bytes,8,opt,name=s3_storage_class,json=s3StorageClass,proto3" json:"s3_storage_class,omitempty"`
S3ForcePathStyle bool `protobuf:"varint,9,opt,name=s3_force_path_style,json=s3ForcePathStyle,proto3" json:"s3_force_path_style,omitempty"`
GcsGoogleApplicationCredentials string `protobuf:"bytes,10,opt,name=gcs_google_application_credentials,json=gcsGoogleApplicationCredentials,proto3" json:"gcs_google_application_credentials,omitempty"`
AzureAccountName string `protobuf:"bytes,15,opt,name=azure_account_name,json=azureAccountName,proto3" json:"azure_account_name,omitempty"`
AzureAccountKey string `protobuf:"bytes,16,opt,name=azure_account_key,json=azureAccountKey,proto3" json:"azure_account_key,omitempty"`
BackblazeKeyId string `protobuf:"bytes,20,opt,name=backblaze_key_id,json=backblazeKeyId,proto3" json:"backblaze_key_id,omitempty"`
BackblazeApplicationKey string `protobuf:"bytes,21,opt,name=backblaze_application_key,json=backblazeApplicationKey,proto3" json:"backblaze_application_key,omitempty"`
BackblazeEndpoint string `protobuf:"bytes,22,opt,name=backblaze_endpoint,json=backblazeEndpoint,proto3" json:"backblaze_endpoint,omitempty"`
AliyunAccessKey string `protobuf:"bytes,25,opt,name=aliyun_access_key,json=aliyunAccessKey,proto3" json:"aliyun_access_key,omitempty"`
AliyunSecretKey string `protobuf:"bytes,26,opt,name=aliyun_secret_key,json=aliyunSecretKey,proto3" json:"aliyun_secret_key,omitempty"`
AliyunEndpoint string `protobuf:"bytes,27,opt,name=aliyun_endpoint,json=aliyunEndpoint,proto3" json:"aliyun_endpoint,omitempty"`
AliyunRegion string `protobuf:"bytes,28,opt,name=aliyun_region,json=aliyunRegion,proto3" json:"aliyun_region,omitempty"`
TencentSecretId string `protobuf:"bytes,30,opt,name=tencent_secret_id,json=tencentSecretId,proto3" json:"tencent_secret_id,omitempty"`
TencentSecretKey string `protobuf:"bytes,31,opt,name=tencent_secret_key,json=tencentSecretKey,proto3" json:"tencent_secret_key,omitempty"`
TencentEndpoint string `protobuf:"bytes,32,opt,name=tencent_endpoint,json=tencentEndpoint,proto3" json:"tencent_endpoint,omitempty"`
TencentRegion string `protobuf:"bytes,33,opt,name=tencent_region,json=tencentRegion,proto3" json:"tencent_region,omitempty"`
BaiduAccessKey string `protobuf:"bytes,35,opt,name=baidu_access_key,json=baiduAccessKey,proto3" json:"baidu_access_key,omitempty"`
BaiduSecretKey string `protobuf:"bytes,36,opt,name=baidu_secret_key,json=baiduSecretKey,proto3" json:"baidu_secret_key,omitempty"`
BaiduEndpoint string `protobuf:"bytes,37,opt,name=baidu_endpoint,json=baiduEndpoint,proto3" json:"baidu_endpoint,omitempty"`
BaiduRegion string `protobuf:"bytes,38,opt,name=baidu_region,json=baiduRegion,proto3" json:"baidu_region,omitempty"`
WasabiAccessKey string `protobuf:"bytes,40,opt,name=wasabi_access_key,json=wasabiAccessKey,proto3" json:"wasabi_access_key,omitempty"`
WasabiSecretKey string `protobuf:"bytes,41,opt,name=wasabi_secret_key,json=wasabiSecretKey,proto3" json:"wasabi_secret_key,omitempty"`
WasabiEndpoint string `protobuf:"bytes,42,opt,name=wasabi_endpoint,json=wasabiEndpoint,proto3" json:"wasabi_endpoint,omitempty"`
WasabiRegion string `protobuf:"bytes,43,opt,name=wasabi_region,json=wasabiRegion,proto3" json:"wasabi_region,omitempty"`
}
func (x *RemoteConf) Reset() {
*x = RemoteConf{}
if protoimpl.UnsafeEnabled {
mi := &file_remote_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RemoteConf) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RemoteConf) ProtoMessage() {}
func (x *RemoteConf) ProtoReflect() protoreflect.Message {
mi := &file_remote_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RemoteConf.ProtoReflect.Descriptor instead.
func (*RemoteConf) Descriptor() ([]byte, []int) {
return file_remote_proto_rawDescGZIP(), []int{0}
}
func (x *RemoteConf) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *RemoteConf) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *RemoteConf) GetS3AccessKey() string {
if x != nil {
return x.S3AccessKey
}
return ""
}
func (x *RemoteConf) GetS3SecretKey() string {
if x != nil {
return x.S3SecretKey
}
return ""
}
func (x *RemoteConf) GetS3Region() string {
if x != nil {
return x.S3Region
}
return ""
}
func (x *RemoteConf) GetS3Endpoint() string {
if x != nil {
return x.S3Endpoint
}
return ""
}
func (x *RemoteConf) GetS3StorageClass() string {
if x != nil {
return x.S3StorageClass
}
return ""
}
func (x *RemoteConf) GetS3ForcePathStyle() bool {
if x != nil {
return x.S3ForcePathStyle
}
return false
}
func (x *RemoteConf) GetGcsGoogleApplicationCredentials() string {
if x != nil {
return x.GcsGoogleApplicationCredentials
}
return ""
}
func (x *RemoteConf) GetAzureAccountName() string {
if x != nil {
return x.AzureAccountName
}
return ""
}
func (x *RemoteConf) GetAzureAccountKey() string {
if x != nil {
return x.AzureAccountKey
}
return ""
}
func (x *RemoteConf) GetBackblazeKeyId() string {
if x != nil {
return x.BackblazeKeyId
}
return ""
}
func (x *RemoteConf) GetBackblazeApplicationKey() string {
if x != nil {
return x.BackblazeApplicationKey
}
return ""
}
func (x *RemoteConf) GetBackblazeEndpoint() string {
if x != nil {
return x.BackblazeEndpoint
}
return ""
}
func (x *RemoteConf) GetAliyunAccessKey() string {
if x != nil {
return x.AliyunAccessKey
}
return ""
}
func (x *RemoteConf) GetAliyunSecretKey() string {
if x != nil {
return x.AliyunSecretKey
}
return ""
}
func (x *RemoteConf) GetAliyunEndpoint() string {
if x != nil {
return x.AliyunEndpoint
}
return ""
}
func (x *RemoteConf) GetAliyunRegion() string {
if x != nil {
return x.AliyunRegion
}
return ""
}
func (x *RemoteConf) GetTencentSecretId() string {
if x != nil {
return x.TencentSecretId
}
return ""
}
func (x *RemoteConf) GetTencentSecretKey() string {
if x != nil {
return x.TencentSecretKey
}
return ""
}
func (x *RemoteConf) GetTencentEndpoint() string {
if x != nil {
return x.TencentEndpoint
}
return ""
}
func (x *RemoteConf) GetTencentRegion() string {
if x != nil {
return x.TencentRegion
}
return ""
}
func (x *RemoteConf) GetBaiduAccessKey() string {
if x != nil {
return x.BaiduAccessKey
}
return ""
}
func (x *RemoteConf) GetBaiduSecretKey() string {
if x != nil {
return x.BaiduSecretKey
}
return ""
}
func (x *RemoteConf) GetBaiduEndpoint() string {
if x != nil {
return x.BaiduEndpoint
}
return ""
}
func (x *RemoteConf) GetBaiduRegion() string {
if x != nil {
return x.BaiduRegion
}
return ""
}
func (x *RemoteConf) GetWasabiAccessKey() string {
if x != nil {
return x.WasabiAccessKey
}
return ""
}
func (x *RemoteConf) GetWasabiSecretKey() string {
if x != nil {
return x.WasabiSecretKey
}
return ""
}
func (x *RemoteConf) GetWasabiEndpoint() string {
if x != nil {
return x.WasabiEndpoint
}
return ""
}
func (x *RemoteConf) GetWasabiRegion() string {
if x != nil {
return x.WasabiRegion
}
return ""
}
type RemoteStorageMapping struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Mappings map[string]*RemoteStorageLocation `protobuf:"bytes,1,rep,name=mappings,proto3" json:"mappings,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *RemoteStorageMapping) Reset() {
*x = RemoteStorageMapping{}
if protoimpl.UnsafeEnabled {
mi := &file_remote_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RemoteStorageMapping) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RemoteStorageMapping) ProtoMessage() {}
func (x *RemoteStorageMapping) ProtoReflect() protoreflect.Message {
mi := &file_remote_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RemoteStorageMapping.ProtoReflect.Descriptor instead.
func (*RemoteStorageMapping) Descriptor() ([]byte, []int) {
return file_remote_proto_rawDescGZIP(), []int{1}
}
func (x *RemoteStorageMapping) GetMappings() map[string]*RemoteStorageLocation {
if x != nil {
return x.Mappings
}
return nil
}
type RemoteStorageLocation struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"`
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
}
func (x *RemoteStorageLocation) Reset() {
*x = RemoteStorageLocation{}
if protoimpl.UnsafeEnabled {
mi := &file_remote_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RemoteStorageLocation) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RemoteStorageLocation) ProtoMessage() {}
func (x *RemoteStorageLocation) ProtoReflect() protoreflect.Message {
mi := &file_remote_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RemoteStorageLocation.ProtoReflect.Descriptor instead.
func (*RemoteStorageLocation) Descriptor() ([]byte, []int) {
return file_remote_proto_rawDescGZIP(), []int{2}
}
func (x *RemoteStorageLocation) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *RemoteStorageLocation) GetBucket() string {
if x != nil {
return x.Bucket
}
return ""
}
func (x *RemoteStorageLocation) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
var File_remote_proto protoreflect.FileDescriptor
var file_remote_proto_rawDesc = []byte{
0x0a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09,
0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x22, 0xe5, 0x09, 0x0a, 0x0a, 0x52, 0x65,
0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x22, 0x0a, 0x0d, 0x73, 0x33, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65,
0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x33, 0x41, 0x63, 0x63, 0x65, 0x73,
0x73, 0x4b, 0x65, 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x33, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65,
0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x33, 0x53,
0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x72,
0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x33, 0x52,
0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x33, 0x5f, 0x65, 0x6e, 0x64, 0x70,
0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x33, 0x45, 0x6e,
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x33, 0x5f, 0x73, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0e, 0x73, 0x33, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73,
0x12, 0x2d, 0x0a, 0x13, 0x73, 0x33, 0x5f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74,
0x68, 0x5f, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73,
0x33, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12,
0x4b, 0x0a, 0x22, 0x67, 0x63, 0x73, 0x5f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x61, 0x70,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x67, 0x63, 0x73,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12,
0x61, 0x7a, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x41,
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x7a,
0x75, 0x72, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18,
0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x41, 0x63, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c,
0x61, 0x7a, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x4b, 0x65, 0x79, 0x49, 0x64,
0x12, 0x3a, 0x0a, 0x19, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x61, 0x70,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x15, 0x20,
0x01, 0x28, 0x09, 0x52, 0x17, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x41, 0x70,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x12,
0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
0x6e, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c,
0x61, 0x7a, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x61,
0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79,
0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x41, 0x63,
0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x69, 0x79, 0x75,
0x6e, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1a, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x65, 0x6e,
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x6c,
0x69, 0x79, 0x75, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d,
0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x1c, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x52, 0x65, 0x67, 0x69, 0x6f,
0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63,
0x72, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65,
0x6e, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a,
0x12, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f,
0x6b, 0x65, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x6e, 0x63, 0x65,
0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x74,
0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x45, 0x6e,
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e,
0x74, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a,
0x10, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65,
0x79, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61, 0x69, 0x64, 0x75, 0x41, 0x63,
0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x69, 0x64, 0x75,
0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x24, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0e, 0x62, 0x61, 0x69, 0x64, 0x75, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65,
0x79, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f,
0x69, 0x6e, 0x74, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x62, 0x61, 0x69, 0x64, 0x75,
0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x69, 0x64,
0x75, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x26, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
0x62, 0x61, 0x69, 0x64, 0x75, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x77,
0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79,
0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x41, 0x63,
0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x61, 0x73, 0x61, 0x62,
0x69, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x29, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0f, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x65, 0x6e,
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x61,
0x73, 0x61, 0x62, 0x69, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d,
0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x2b, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x52, 0x65, 0x67, 0x69, 0x6f,
0x6e, 0x22, 0xc0, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72,
0x61, 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x61,
0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x72,
0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53,
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61,
0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x61, 0x70,
0x70, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x5d, 0x0a, 0x0d, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67,
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74,
0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x50, 0x0a,
0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x30, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c,
0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65,
0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_remote_proto_rawDescOnce sync.Once
file_remote_proto_rawDescData = file_remote_proto_rawDesc
)
func file_remote_proto_rawDescGZIP() []byte {
file_remote_proto_rawDescOnce.Do(func() {
file_remote_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_proto_rawDescData)
})
return file_remote_proto_rawDescData
}
var file_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_remote_proto_goTypes = []interface{}{
(*RemoteConf)(nil), // 0: remote_pb.RemoteConf
(*RemoteStorageMapping)(nil), // 1: remote_pb.RemoteStorageMapping
(*RemoteStorageLocation)(nil), // 2: remote_pb.RemoteStorageLocation
nil, // 3: remote_pb.RemoteStorageMapping.MappingsEntry
}
var file_remote_proto_depIdxs = []int32{
3, // 0: remote_pb.RemoteStorageMapping.mappings:type_name -> remote_pb.RemoteStorageMapping.MappingsEntry
2, // 1: remote_pb.RemoteStorageMapping.MappingsEntry.value:type_name -> remote_pb.RemoteStorageLocation
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_remote_proto_init() }
func file_remote_proto_init() {
if File_remote_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_remote_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoteConf); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_remote_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoteStorageMapping); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_remote_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoteStorageLocation); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_remote_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_remote_proto_goTypes,
DependencyIndexes: file_remote_proto_depIdxs,
MessageInfos: file_remote_proto_msgTypes,
}.Build()
File_remote_proto = out.File
file_remote_proto_rawDesc = nil
file_remote_proto_goTypes = nil
file_remote_proto_depIdxs = nil
}

8
weed/pb/remote_pb/remote_pb_helper.go

@ -0,0 +1,8 @@
package remote_pb
import "github.com/golang/protobuf/proto"
func (fp *RemoteStorageLocation) Key() interface{} {
key, _ := proto.Marshal(fp)
return string(key)
}

12
weed/pb/volume_server.proto

@ -3,6 +3,8 @@ syntax = "proto3";
package volume_server_pb; package volume_server_pb;
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"; option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb";
import "remote.proto";
////////////////////////////////////////////////// //////////////////////////////////////////////////
service VolumeServer { service VolumeServer {
@ -473,14 +475,8 @@ message FetchAndWriteNeedleRequest {
int64 offset = 4; int64 offset = 4;
int64 size = 5; int64 size = 5;
// remote conf // remote conf
string remote_type = 6;
string remote_name = 7;
string s3_access_key = 8;
string s3_secret_key = 9;
string s3_region = 10;
string s3_endpoint = 11;
string remote_bucket = 12;
string remote_path = 13;
remote_pb.RemoteConf remote_conf = 15;
remote_pb.RemoteStorageLocation remote_location = 16;
} }
message FetchAndWriteNeedleResponse { message FetchAndWriteNeedleResponse {
} }

1899
weed/pb/volume_server_pb/volume_server.pb.go
File diff suppressed because it is too large
View File

19
weed/remote_storage/azure/azure_storage_client.go

@ -6,6 +6,7 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io" "io"
@ -21,7 +22,7 @@ func init() {
type azureRemoteStorageMaker struct{} type azureRemoteStorageMaker struct{}
func (s azureRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
func (s azureRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &azureRemoteStorageClient{ client := &azureRemoteStorageClient{
conf: conf, conf: conf,
@ -52,13 +53,13 @@ func (s azureRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage
} }
type azureRemoteStorageClient struct { type azureRemoteStorageClient struct {
conf *filer_pb.RemoteConf
conf *remote_pb.RemoteConf
serviceURL azblob.ServiceURL serviceURL azblob.ServiceURL
} }
var _ = remote_storage.RemoteStorageClient(&azureRemoteStorageClient{}) var _ = remote_storage.RemoteStorageClient(&azureRemoteStorageClient{})
func (az *azureRemoteStorageClient) Traverse(loc *filer_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
func (az *azureRemoteStorageClient) Traverse(loc *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
pathKey := loc.Path[1:] pathKey := loc.Path[1:]
containerURL := az.serviceURL.NewContainerURL(loc.Bucket) containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
@ -96,7 +97,7 @@ func (az *azureRemoteStorageClient) Traverse(loc *filer_pb.RemoteStorageLocation
return return
} }
func (az *azureRemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
func (az *azureRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
key := loc.Path[1:] key := loc.Path[1:]
containerURL := az.serviceURL.NewContainerURL(loc.Bucket) containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
@ -119,11 +120,11 @@ func (az *azureRemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation
return return
} }
func (az *azureRemoteStorageClient) WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
func (az *azureRemoteStorageClient) WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
return nil return nil
} }
func (az *azureRemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
func (az *azureRemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
key := loc.Path[1:] key := loc.Path[1:]
containerURL := az.serviceURL.NewContainerURL(loc.Bucket) containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
@ -155,7 +156,7 @@ func (az *azureRemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocatio
} }
func (az *azureRemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
func (az *azureRemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
key := loc.Path[1:] key := loc.Path[1:]
containerURL := az.serviceURL.NewContainerURL(loc.Bucket) containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
blobURL := containerURL.NewBlockBlobURL(key) blobURL := containerURL.NewBlockBlobURL(key)
@ -183,7 +184,7 @@ func toMetadata(attributes map[string][]byte) map[string]string {
return metadata return metadata
} }
func (az *azureRemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
func (az *azureRemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) { if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
return nil return nil
} }
@ -196,7 +197,7 @@ func (az *azureRemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStora
return return
} }
func (az *azureRemoteStorageClient) DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error) {
func (az *azureRemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) {
key := loc.Path[1:] key := loc.Path[1:]
containerURL := az.serviceURL.NewContainerURL(loc.Bucket) containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
if _, err = containerURL.NewBlobURL(key).Delete(context.Background(), if _, err = containerURL.NewBlobURL(key).Delete(context.Background(),

19
weed/remote_storage/gcs/gcs_storage_client.go

@ -5,6 +5,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
@ -21,7 +22,7 @@ func init() {
type gcsRemoteStorageMaker struct{} type gcsRemoteStorageMaker struct{}
func (s gcsRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &gcsRemoteStorageClient{ client := &gcsRemoteStorageClient{
conf: conf, conf: conf,
} }
@ -48,13 +49,13 @@ func (s gcsRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.R
} }
type gcsRemoteStorageClient struct { type gcsRemoteStorageClient struct {
conf *filer_pb.RemoteConf
conf *remote_pb.RemoteConf
client *storage.Client client *storage.Client
} }
var _ = remote_storage.RemoteStorageClient(&gcsRemoteStorageClient{}) var _ = remote_storage.RemoteStorageClient(&gcsRemoteStorageClient{})
func (gcs *gcsRemoteStorageClient) Traverse(loc *filer_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
func (gcs *gcsRemoteStorageClient) Traverse(loc *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
pathKey := loc.Path[1:] pathKey := loc.Path[1:]
@ -86,7 +87,7 @@ func (gcs *gcsRemoteStorageClient) Traverse(loc *filer_pb.RemoteStorageLocation,
} }
return return
} }
func (gcs *gcsRemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
func (gcs *gcsRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
key := loc.Path[1:] key := loc.Path[1:]
rangeReader, readErr := gcs.client.Bucket(loc.Bucket).Object(key).NewRangeReader(context.Background(), offset, size) rangeReader, readErr := gcs.client.Bucket(loc.Bucket).Object(key).NewRangeReader(context.Background(), offset, size)
@ -102,11 +103,11 @@ func (gcs *gcsRemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation,
return return
} }
func (gcs *gcsRemoteStorageClient) WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
func (gcs *gcsRemoteStorageClient) WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
return nil return nil
} }
func (gcs *gcsRemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
func (gcs *gcsRemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
key := loc.Path[1:] key := loc.Path[1:]
@ -125,7 +126,7 @@ func (gcs *gcsRemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation
} }
func (gcs *gcsRemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
func (gcs *gcsRemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
key := loc.Path[1:] key := loc.Path[1:]
attr, err := gcs.client.Bucket(loc.Bucket).Object(key).Attrs(context.Background()) attr, err := gcs.client.Bucket(loc.Bucket).Object(key).Attrs(context.Background())
@ -150,7 +151,7 @@ func toMetadata(attributes map[string][]byte) map[string]string {
return metadata return metadata
} }
func (gcs *gcsRemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
func (gcs *gcsRemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) { if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
return nil return nil
} }
@ -168,7 +169,7 @@ func (gcs *gcsRemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorag
return return
} }
func (gcs *gcsRemoteStorageClient) DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error) {
func (gcs *gcsRemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) {
key := loc.Path[1:] key := loc.Path[1:]
if err = gcs.client.Bucket(loc.Bucket).Object(key).Delete(context.Background()); err != nil { if err = gcs.client.Bucket(loc.Bucket).Object(key).Delete(context.Background()); err != nil {
return fmt.Errorf("gcs delete %s%s: %v", loc.Bucket, key, err) return fmt.Errorf("gcs delete %s%s: %v", loc.Bucket, key, err)

25
weed/remote_storage/remote_storage.go

@ -3,13 +3,14 @@ package remote_storage
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"io" "io"
"strings" "strings"
"sync" "sync"
) )
func ParseLocation(remote string) (loc *filer_pb.RemoteStorageLocation) {
loc = &filer_pb.RemoteStorageLocation{}
func ParseLocation(remote string) (loc *remote_pb.RemoteStorageLocation) {
loc = &remote_pb.RemoteStorageLocation{}
if strings.HasSuffix(string(remote), "/") { if strings.HasSuffix(string(remote), "/") {
remote = remote[:len(remote)-1] remote = remote[:len(remote)-1]
} }
@ -27,23 +28,23 @@ func ParseLocation(remote string) (loc *filer_pb.RemoteStorageLocation) {
return return
} }
func FormatLocation(loc *filer_pb.RemoteStorageLocation) string {
func FormatLocation(loc *remote_pb.RemoteStorageLocation) string {
return fmt.Sprintf("%s/%s%s", loc.Name, loc.Bucket, loc.Path) return fmt.Sprintf("%s/%s%s", loc.Name, loc.Bucket, loc.Path)
} }
type VisitFunc func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error type VisitFunc func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error
type RemoteStorageClient interface { type RemoteStorageClient interface {
Traverse(loc *filer_pb.RemoteStorageLocation, visitFn VisitFunc) error
ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error)
WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error)
WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error)
UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error)
DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error)
Traverse(loc *remote_pb.RemoteStorageLocation, visitFn VisitFunc) error
ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error)
WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error)
WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error)
UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error)
DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error)
} }
type RemoteStorageClientMaker interface { type RemoteStorageClientMaker interface {
Make(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error)
Make(remoteConf *remote_pb.RemoteConf) (RemoteStorageClient, error)
} }
var ( var (
@ -52,7 +53,7 @@ var (
remoteStorageClientsLock sync.Mutex remoteStorageClientsLock sync.Mutex
) )
func makeRemoteStorageClient(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error) {
func makeRemoteStorageClient(remoteConf *remote_pb.RemoteConf) (RemoteStorageClient, error) {
maker, found := RemoteStorageClientMakers[remoteConf.Type] maker, found := RemoteStorageClientMakers[remoteConf.Type]
if !found { if !found {
return nil, fmt.Errorf("remote storage type %s not found", remoteConf.Type) return nil, fmt.Errorf("remote storage type %s not found", remoteConf.Type)
@ -60,7 +61,7 @@ func makeRemoteStorageClient(remoteConf *filer_pb.RemoteConf) (RemoteStorageClie
return maker.Make(remoteConf) return maker.Make(remoteConf)
} }
func GetRemoteStorage(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error) {
func GetRemoteStorage(remoteConf *remote_pb.RemoteConf) (RemoteStorageClient, error) {
remoteStorageClientsLock.Lock() remoteStorageClientsLock.Lock()
defer remoteStorageClientsLock.Unlock() defer remoteStorageClientsLock.Unlock()

4
weed/remote_storage/s3/aliyun.go

@ -6,7 +6,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"os" "os"
@ -18,7 +18,7 @@ func init() {
type AliyunRemoteStorageMaker struct{} type AliyunRemoteStorageMaker struct{}
func (s AliyunRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
func (s AliyunRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &s3RemoteStorageClient{ client := &s3RemoteStorageClient{
conf: conf, conf: conf,
} }

4
weed/remote_storage/s3/backblaze.go

@ -6,7 +6,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
) )
@ -16,7 +16,7 @@ func init() {
type BackBlazeRemoteStorageMaker struct{} type BackBlazeRemoteStorageMaker struct{}
func (s BackBlazeRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
func (s BackBlazeRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &s3RemoteStorageClient{ client := &s3RemoteStorageClient{
conf: conf, conf: conf,
} }

4
weed/remote_storage/s3/baidu.go

@ -6,7 +6,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"os" "os"
@ -18,7 +18,7 @@ func init() {
type BaiduRemoteStorageMaker struct{} type BaiduRemoteStorageMaker struct{}
func (s BaiduRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
func (s BaiduRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &s3RemoteStorageClient{ client := &s3RemoteStorageClient{
conf: conf, conf: conf,
} }

19
weed/remote_storage/s3/s3_storage_client.go

@ -10,6 +10,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io" "io"
@ -22,7 +23,7 @@ func init() {
type s3RemoteStorageMaker struct{} type s3RemoteStorageMaker struct{}
func (s s3RemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
func (s s3RemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &s3RemoteStorageClient{ client := &s3RemoteStorageClient{
conf: conf, conf: conf,
} }
@ -46,13 +47,13 @@ func (s s3RemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.Re
} }
type s3RemoteStorageClient struct { type s3RemoteStorageClient struct {
conf *filer_pb.RemoteConf
conf *remote_pb.RemoteConf
conn s3iface.S3API conn s3iface.S3API
} }
var _ = remote_storage.RemoteStorageClient(&s3RemoteStorageClient{}) var _ = remote_storage.RemoteStorageClient(&s3RemoteStorageClient{})
func (s *s3RemoteStorageClient) Traverse(remote *filer_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
pathKey := remote.Path[1:] pathKey := remote.Path[1:]
@ -94,7 +95,7 @@ func (s *s3RemoteStorageClient) Traverse(remote *filer_pb.RemoteStorageLocation,
} }
return return
} }
func (s *s3RemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
func (s *s3RemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
downloader := s3manager.NewDownloaderWithClient(s.conn, func(u *s3manager.Downloader) { downloader := s3manager.NewDownloaderWithClient(s.conn, func(u *s3manager.Downloader) {
u.PartSize = int64(4 * 1024 * 1024) u.PartSize = int64(4 * 1024 * 1024)
u.Concurrency = 1 u.Concurrency = 1
@ -115,11 +116,11 @@ func (s *s3RemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, of
return writerAt.Bytes(), nil return writerAt.Bytes(), nil
} }
func (s *s3RemoteStorageClient) WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
func (s *s3RemoteStorageClient) WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
return nil return nil
} }
func (s *s3RemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
func (s *s3RemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
fileSize := int64(filer.FileSize(entry)) fileSize := int64(filer.FileSize(entry))
@ -173,7 +174,7 @@ func toTagging(attributes map[string][]byte) *s3.Tagging {
return tagging return tagging
} }
func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
resp, err := s.conn.HeadObject(&s3.HeadObjectInput{ resp, err := s.conn.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(loc.Bucket), Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]), Key: aws.String(loc.Path[1:]),
@ -191,7 +192,7 @@ func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageL
} }
func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) { if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
return nil return nil
} }
@ -210,7 +211,7 @@ func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLo
} }
return return
} }
func (s *s3RemoteStorageClient) DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error) {
func (s *s3RemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) {
_, err = s.conn.DeleteObject(&s3.DeleteObjectInput{ _, err = s.conn.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(loc.Bucket), Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]), Key: aws.String(loc.Path[1:]),

4
weed/remote_storage/s3/tencent.go

@ -6,7 +6,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"os" "os"
@ -18,7 +18,7 @@ func init() {
type TencentRemoteStorageMaker struct{} type TencentRemoteStorageMaker struct{}
func (s TencentRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
func (s TencentRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &s3RemoteStorageClient{ client := &s3RemoteStorageClient{
conf: conf, conf: conf,
} }

4
weed/remote_storage/s3/wasabi.go

@ -7,7 +7,7 @@ import (
"github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -18,7 +18,7 @@ func init() {
type WasabiRemoteStorageMaker struct{} type WasabiRemoteStorageMaker struct{}
func (s WasabiRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
func (s WasabiRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &s3RemoteStorageClient{ client := &s3RemoteStorageClient{
conf: conf, conf: conf,
} }

21
weed/server/filer_grpc_server_remote.go

@ -6,6 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -27,7 +28,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
} }
// find mapping // find mapping
var remoteStorageMountedLocation *filer_pb.RemoteStorageLocation
var remoteStorageMountedLocation *remote_pb.RemoteStorageLocation
var localMountedDir string var localMountedDir string
for k, loc := range mappings.Mappings { for k, loc := range mappings.Mappings {
if strings.HasPrefix(req.Directory, k) { if strings.HasPrefix(req.Directory, k) {
@ -43,7 +44,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
if err != nil { if err != nil {
return nil, err return nil, err
} }
storageConf := &filer_pb.RemoteConf{}
storageConf := &remote_pb.RemoteConf{}
if unMarshalErr := proto.Unmarshal(storageConfEntry.Content, storageConf); unMarshalErr != nil { if unMarshalErr := proto.Unmarshal(storageConfEntry.Content, storageConf); unMarshalErr != nil {
return nil, fmt.Errorf("unmarshal remote storage conf %s/%s: %v", filer.DirectoryEtcRemote, remoteStorageMountedLocation.Name+filer.REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr) return nil, fmt.Errorf("unmarshal remote storage conf %s/%s: %v", filer.DirectoryEtcRemote, remoteStorageMountedLocation.Name+filer.REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
} }
@ -114,14 +115,12 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
Cookie: uint32(fileId.Cookie), Cookie: uint32(fileId.Cookie),
Offset: localOffset, Offset: localOffset,
Size: size, Size: size,
RemoteType: storageConf.Type,
RemoteName: storageConf.Name,
S3AccessKey: storageConf.S3AccessKey,
S3SecretKey: storageConf.S3SecretKey,
S3Region: storageConf.S3Region,
S3Endpoint: storageConf.S3Endpoint,
RemoteBucket: remoteStorageMountedLocation.Bucket,
RemotePath: string(dest),
RemoteConf: storageConf,
RemoteLocation: &remote_pb.RemoteStorageLocation{
Name: remoteStorageMountedLocation.Name,
Bucket: remoteStorageMountedLocation.Bucket,
Path: string(dest),
},
}) })
if fetchAndWriteErr != nil { if fetchAndWriteErr != nil {
return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr) return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr)
@ -129,7 +128,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
return nil return nil
}) })
if err != nil {
if err != nil && fetchAndWriteErr == nil {
fetchAndWriteErr = err fetchAndWriteErr = err
return return
} }

4
weed/server/volume_grpc_erasure_coding.go

@ -3,6 +3,7 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
@ -12,7 +13,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
@ -60,7 +60,7 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_
} }
// write .vif files // write .vif files
if err := pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil {
if err := volume_info.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil {
return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
} }

17
weed/server/volume_grpc_remote.go

@ -3,7 +3,6 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
@ -17,25 +16,15 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
return nil, fmt.Errorf("not found volume id %d", req.VolumeId) return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
} }
remoteConf := &filer_pb.RemoteConf{
Type: req.RemoteType,
Name: req.RemoteName,
S3AccessKey: req.S3AccessKey,
S3SecretKey: req.S3SecretKey,
S3Region: req.S3Region,
S3Endpoint: req.S3Endpoint,
}
remoteConf := req.RemoteConf
client, getClientErr := remote_storage.GetRemoteStorage(remoteConf) client, getClientErr := remote_storage.GetRemoteStorage(remoteConf)
if getClientErr != nil { if getClientErr != nil {
return nil, fmt.Errorf("get remote client: %v", getClientErr) return nil, fmt.Errorf("get remote client: %v", getClientErr)
} }
remoteStorageLocation := &filer_pb.RemoteStorageLocation{
Name: req.RemoteName,
Bucket: req.RemoteBucket,
Path: req.RemotePath,
}
remoteStorageLocation := req.RemoteLocation
data, ReadRemoteErr := client.ReadFile(remoteStorageLocation, req.Offset, req.Size) data, ReadRemoteErr := client.ReadFile(remoteStorageLocation, req.Offset, req.Size)
if ReadRemoteErr != nil { if ReadRemoteErr != nil {
return nil, fmt.Errorf("read from remote %+v: %v", remoteStorageLocation, ReadRemoteErr) return nil, fmt.Errorf("read from remote %+v: %v", remoteStorageLocation, ReadRemoteErr)

8
weed/shell/command_remote_cache.go

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io" "io"
) )
@ -110,13 +111,13 @@ func mayHaveCachedToLocal(entry *filer_pb.Entry) bool {
if entry.RemoteEntry == nil { if entry.RemoteEntry == nil {
return false // should not uncache an entry that is not in remote return false // should not uncache an entry that is not in remote
} }
if entry.RemoteEntry.LastLocalSyncTsNs > 0 && len(entry.Chunks) > 0 {
if entry.RemoteEntry.LastLocalSyncTsNs > 0 {
return true return true
} }
return false return false
} }
func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, dirToCache util.FullPath, fileFilter *FileFilter, remoteConf *filer_pb.RemoteConf) error {
func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, dirToCache util.FullPath, fileFilter *FileFilter, remoteConf *remote_pb.RemoteConf) error {
return recursivelyTraverseDirectory(commandEnv, dirToCache, func(dir util.FullPath, entry *filer_pb.Entry) bool { return recursivelyTraverseDirectory(commandEnv, dirToCache, func(dir util.FullPath, entry *filer_pb.Entry) bool {
if !shouldCacheToLocal(entry) { if !shouldCacheToLocal(entry) {
@ -127,7 +128,7 @@ func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.
return true return true
} }
println(dir, entry.Name)
fmt.Fprintf(writer, "Cache %+v ... ", dir.Child(entry.Name))
remoteLocation := filer.MapFullPathToRemoteStorageLocation(localMountedDir, remoteMountedLocation, dir.Child(entry.Name)) remoteLocation := filer.MapFullPathToRemoteStorageLocation(localMountedDir, remoteMountedLocation, dir.Child(entry.Name))
@ -135,6 +136,7 @@ func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.
fmt.Fprintf(writer, "DownloadToLocal %+v: %v\n", remoteLocation, err) fmt.Fprintf(writer, "DownloadToLocal %+v: %v\n", remoteLocation, err)
return false return false
} }
fmt.Fprintf(writer, "Done\n")
return true return true
}) })

7
weed/shell/command_remote_configure.go

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -48,7 +49,7 @@ var (
func (c *commandRemoteConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { func (c *commandRemoteConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
conf := &filer_pb.RemoteConf{}
conf := &remote_pb.RemoteConf{}
remoteConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) remoteConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
isDelete := remoteConfigureCommand.Bool("delete", false, "delete one remote storage by its name") isDelete := remoteConfigureCommand.Bool("delete", false, "delete one remote storage by its name")
@ -122,7 +123,7 @@ func (c *commandRemoteConfigure) listExistingRemoteStorages(commandEnv *CommandE
if !strings.HasSuffix(entry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) { if !strings.HasSuffix(entry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
return nil return nil
} }
conf := &filer_pb.RemoteConf{}
conf := &remote_pb.RemoteConf{}
if err := proto.Unmarshal(entry.Content, conf); err != nil { if err := proto.Unmarshal(entry.Content, conf); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, entry.Name, err) return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, entry.Name, err)
@ -168,7 +169,7 @@ func (c *commandRemoteConfigure) deleteRemoteStorage(commandEnv *CommandEnv, wri
} }
func (c *commandRemoteConfigure) saveRemoteStorage(commandEnv *CommandEnv, writer io.Writer, conf *filer_pb.RemoteConf) error {
func (c *commandRemoteConfigure) saveRemoteStorage(commandEnv *CommandEnv, writer io.Writer, conf *remote_pb.RemoteConf) error {
data, err := proto.Marshal(conf) data, err := proto.Marshal(conf)
if err != nil { if err != nil {

5
weed/shell/command_remote_meta_sync.go

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io" "io"
@ -67,7 +68,7 @@ func (c *commandRemoteMetaSync) Do(args []string, commandEnv *CommandEnv, writer
return nil return nil
} }
func detectMountInfo(commandEnv *CommandEnv, writer io.Writer, dir string) (*filer_pb.RemoteStorageMapping, string, *filer_pb.RemoteStorageLocation, *filer_pb.RemoteConf, error) {
func detectMountInfo(commandEnv *CommandEnv, writer io.Writer, dir string) (*remote_pb.RemoteStorageMapping, string, *remote_pb.RemoteStorageLocation, *remote_pb.RemoteConf, error) {
return filer.DetectMountInfo(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, dir) return filer.DetectMountInfo(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, dir)
} }
@ -106,7 +107,7 @@ func detectMountInfo(commandEnv *CommandEnv, writer io.Writer, dir string) (*fil
the remote version is updated, need to pull meta the remote version is updated, need to pull meta
} }
*/ */
func pullMetadata(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, dirToCache util.FullPath, remoteConf *filer_pb.RemoteConf) error {
func pullMetadata(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, dirToCache util.FullPath, remoteConf *remote_pb.RemoteConf) error {
// visit remote storage // visit remote storage
remoteStorage, err := remote_storage.GetRemoteStorage(remoteConf) remoteStorage, err := remote_storage.GetRemoteStorage(remoteConf)

9
weed/shell/command_remote_mount.go

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/jsonpb"
@ -81,7 +82,7 @@ func (c *commandRemoteMount) Do(args []string, commandEnv *CommandEnv, writer io
return nil return nil
} }
func listExistingRemoteStorageMounts(commandEnv *CommandEnv, writer io.Writer) (mappings *filer_pb.RemoteStorageMapping, err error) {
func listExistingRemoteStorageMounts(commandEnv *CommandEnv, writer io.Writer) (mappings *remote_pb.RemoteStorageMapping, err error) {
// read current mapping // read current mapping
mappings, err = filer.ReadMountMappings(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress) mappings, err = filer.ReadMountMappings(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress)
@ -109,13 +110,13 @@ func jsonPrintln(writer io.Writer, message proto.Message) error {
return err return err
} }
func (c *commandRemoteMount) findRemoteStorageConfiguration(commandEnv *CommandEnv, writer io.Writer, remote *filer_pb.RemoteStorageLocation) (conf *filer_pb.RemoteConf, err error) {
func (c *commandRemoteMount) findRemoteStorageConfiguration(commandEnv *CommandEnv, writer io.Writer, remote *remote_pb.RemoteStorageLocation) (conf *remote_pb.RemoteConf, err error) {
return filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, remote.Name) return filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, remote.Name)
} }
func (c *commandRemoteMount) syncMetadata(commandEnv *CommandEnv, writer io.Writer, dir string, nonEmpty bool, remoteConf *filer_pb.RemoteConf, remote *filer_pb.RemoteStorageLocation) error {
func (c *commandRemoteMount) syncMetadata(commandEnv *CommandEnv, writer io.Writer, dir string, nonEmpty bool, remoteConf *remote_pb.RemoteConf, remote *remote_pb.RemoteStorageLocation) error {
// find existing directory, and ensure the directory is empty // find existing directory, and ensure the directory is empty
err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -160,7 +161,7 @@ func (c *commandRemoteMount) syncMetadata(commandEnv *CommandEnv, writer io.Writ
return nil return nil
} }
func (c *commandRemoteMount) saveMountMapping(commandEnv *CommandEnv, writer io.Writer, dir string, remoteStorageLocation *filer_pb.RemoteStorageLocation) (err error) {
func (c *commandRemoteMount) saveMountMapping(commandEnv *CommandEnv, writer io.Writer, dir string, remoteStorageLocation *remote_pb.RemoteStorageLocation) (err error) {
// read current mapping // read current mapping
var oldContent, newContent []byte var oldContent, newContent []byte

4
weed/shell/command_remote_uncache.go

@ -83,6 +83,7 @@ func (c *commandRemoteUncache) Do(args []string, commandEnv *CommandEnv, writer
func (c *commandRemoteUncache) uncacheContentData(commandEnv *CommandEnv, writer io.Writer, dirToCache util.FullPath, fileFilter *FileFilter) error { func (c *commandRemoteUncache) uncacheContentData(commandEnv *CommandEnv, writer io.Writer, dirToCache util.FullPath, fileFilter *FileFilter) error {
return recursivelyTraverseDirectory(commandEnv, dirToCache, func(dir util.FullPath, entry *filer_pb.Entry) bool { return recursivelyTraverseDirectory(commandEnv, dirToCache, func(dir util.FullPath, entry *filer_pb.Entry) bool {
if !mayHaveCachedToLocal(entry) { if !mayHaveCachedToLocal(entry) {
return true // true means recursive traversal should continue return true // true means recursive traversal should continue
} }
@ -98,7 +99,7 @@ func (c *commandRemoteUncache) uncacheContentData(commandEnv *CommandEnv, writer
entry.RemoteEntry.LastLocalSyncTsNs = 0 entry.RemoteEntry.LastLocalSyncTsNs = 0
entry.Chunks = nil entry.Chunks = nil
println(dir, entry.Name)
fmt.Fprintf(writer, "Uncache %+v ... ", dir.Child(entry.Name))
err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, updateErr := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{ _, updateErr := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
@ -111,6 +112,7 @@ func (c *commandRemoteUncache) uncacheContentData(commandEnv *CommandEnv, writer
fmt.Fprintf(writer, "uncache %+v: %v\n", dir.Child(entry.Name), err) fmt.Fprintf(writer, "uncache %+v: %v\n", dir.Child(entry.Name), err)
return false return false
} }
fmt.Fprintf(writer, "Done\n")
return true return true
}) })

3
weed/shell/command_remote_unmount.go

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io" "io"
) )
@ -71,7 +72,7 @@ func (c *commandRemoteUnmount) Do(args []string, commandEnv *CommandEnv, writer
return nil return nil
} }
func (c *commandRemoteUnmount) findRemoteStorageConfiguration(commandEnv *CommandEnv, writer io.Writer, remote *filer_pb.RemoteStorageLocation) (conf *filer_pb.RemoteConf, err error) {
func (c *commandRemoteUnmount) findRemoteStorageConfiguration(commandEnv *CommandEnv, writer io.Writer, remote *remote_pb.RemoteStorageLocation) (conf *remote_pb.RemoteConf, err error) {
return filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, remote.Name) return filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, remote.Name)

6
weed/storage/erasure_coding/ec_volume.go

@ -3,13 +3,13 @@ package erasure_coding
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
"math" "math"
"os" "os"
"sort" "sort"
"sync" "sync"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/idx"
@ -63,10 +63,10 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection
// read volume info // read volume info
ev.Version = needle.Version3 ev.Version = needle.Version3
if volumeInfo, _, found, _ := pb.MaybeLoadVolumeInfo(dataBaseFileName + ".vif"); found {
if volumeInfo, _, found, _ := volume_info.MaybeLoadVolumeInfo(dataBaseFileName + ".vif"); found {
ev.Version = needle.Version(volumeInfo.Version) ev.Version = needle.Version(volumeInfo.Version)
} else { } else {
pb.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)})
volume_info.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)})
} }
ev.ShardLocations = make(map[ShardId][]string) ev.ShardLocations = make(map[ShardId][]string)

6
weed/storage/store.go

@ -2,6 +2,7 @@ package storage
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"path/filepath" "path/filepath"
"strings" "strings"
@ -10,7 +11,6 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
@ -474,12 +474,12 @@ func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error {
// load, modify, save // load, modify, save
baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name())) baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name()))
vifFile := filepath.Join(location.Directory, baseFileName+".vif") vifFile := filepath.Join(location.Directory, baseFileName+".vif")
volumeInfo, _, _, err := pb.MaybeLoadVolumeInfo(vifFile)
volumeInfo, _, _, err := volume_info.MaybeLoadVolumeInfo(vifFile)
if err != nil { if err != nil {
return fmt.Errorf("volume %d fail to load vif", i) return fmt.Errorf("volume %d fail to load vif", i)
} }
volumeInfo.Replication = replication volumeInfo.Replication = replication
err = pb.SaveVolumeInfo(vifFile, volumeInfo)
err = volume_info.SaveVolumeInfo(vifFile, volumeInfo)
if err != nil { if err != nil {
return fmt.Errorf("volume %d fail to save vif", i) return fmt.Errorf("volume %d fail to save vif", i)
} }

2
weed/pb/volume_info.go → weed/storage/volume_info/volume_info.go

@ -1,4 +1,4 @@
package pb
package volume_info
import ( import (
"bytes" "bytes"

6
weed/storage/volume_tier.go

@ -2,11 +2,11 @@ package storage
import ( import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/backend"
_ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
volume_info "github.com/chrislusf/seaweedfs/weed/storage/volume_info"
) )
func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo {
@ -16,7 +16,7 @@ func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo {
func (v *Volume) maybeLoadVolumeInfo() (found bool) { func (v *Volume) maybeLoadVolumeInfo() (found bool) {
var err error var err error
v.volumeInfo, v.hasRemoteFile, found, err = pb.MaybeLoadVolumeInfo(v.FileName(".vif"))
v.volumeInfo, v.hasRemoteFile, found, err = volume_info.MaybeLoadVolumeInfo(v.FileName(".vif"))
if v.volumeInfo.Version == 0 { if v.volumeInfo.Version == 0 {
v.volumeInfo.Version = uint32(needle.CurrentVersion) v.volumeInfo.Version = uint32(needle.CurrentVersion)
@ -56,6 +56,6 @@ func (v *Volume) SaveVolumeInfo() error {
tierFileName := v.FileName(".vif") tierFileName := v.FileName(".vif")
return pb.SaveVolumeInfo(tierFileName, v.volumeInfo)
return volume_info.SaveVolumeInfo(tierFileName, v.volumeInfo)
} }
Loading…
Cancel
Save