Browse Source

cloud tier: remove tagging since not all s3 vendors support this

pull/2427/head
Chris Lu 3 years ago
parent
commit
c857cc7286
  1. 7
      weed/server/volume_grpc_tier_upload.go
  2. 2
      weed/storage/backend/backend.go
  3. 4
      weed/storage/backend/s3_backend/s3_backend.go
  4. 16
      weed/storage/backend/s3_backend/s3_upload.go

7
weed/server/volume_grpc_tier_upload.go

@ -62,13 +62,8 @@ func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTi
}) })
} }
// remember the file original source
attributes := make(map[string]string)
attributes["volumeId"] = v.Id.String()
attributes["collection"] = v.Collection
attributes["ext"] = ".dat"
// copy the data file // copy the data file
key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn)
key, size, err := backendStorage.CopyFile(diskFile.File, fn)
if err != nil { if err != nil {
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err) return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err)
} }

2
weed/storage/backend/backend.go

@ -25,7 +25,7 @@ type BackendStorageFile interface {
type BackendStorage interface { type BackendStorage interface {
ToProperties() map[string]string ToProperties() map[string]string
NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile
CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error)
DeleteFile(key string) (err error) DeleteFile(key string) (err error)
} }

4
weed/storage/backend/s3_backend/s3_backend.go

@ -79,13 +79,13 @@ func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb
return f return f
} }
func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
randomUuid, _ := uuid.NewRandom() randomUuid, _ := uuid.NewRandom()
key = randomUuid.String() key = randomUuid.String()
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn)
return return
} }

16
weed/storage/backend/s3_backend/s3_upload.go

@ -12,9 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
) )
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string,
attributes map[string]string,
fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
//open the file //open the file
f, err := os.Open(filename) f, err := os.Open(filename)
@ -48,25 +46,13 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
fn: fn, fn: fn,
} }
// process tagging
tags := ""
for k, v := range attributes {
if len(tags) > 0 {
tags = tags + "&"
}
tags = tags + k + "=" + v
}
// Upload the file to S3. // Upload the file to S3.
var result *s3manager.UploadOutput var result *s3manager.UploadOutput
result, err = uploader.Upload(&s3manager.UploadInput{ result, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(destBucket), Bucket: aws.String(destBucket),
Key: aws.String(destKey), Key: aws.String(destKey),
Body: fileReader, Body: fileReader,
ACL: aws.String("private"),
ServerSideEncryption: aws.String("AES256"),
StorageClass: aws.String("STANDARD_IA"), StorageClass: aws.String("STANDARD_IA"),
Tagging: aws.String(tags),
}) })
//in case it fails to upload //in case it fails to upload

Loading…
Cancel
Save