Browse Source

s3 backend support customizing storage class

pull/3001/head
chrislu 3 years ago
parent
commit
192983b464
  1. 1
      docker/compose/master-cloud.toml
  2. 1
      weed/command/scaffold/master.toml
  3. 8
      weed/storage/backend/s3_backend/s3_backend.go
  4. 4
      weed/storage/backend/s3_backend/s3_upload.go

1
docker/compose/master-cloud.toml

@ -28,3 +28,4 @@ sleep_minutes = 17 # sleep minutes between each script execution
region = "us-east-2"
bucket = "volume_bucket" # an existing bucket
endpoint = "http://server2:8333"
storage_class = "STANDARD_IA"

1
weed/command/scaffold/master.toml

@ -36,6 +36,7 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
endpoint = ""
storage_class = "STANDARD_IA"
# create this number of logical volumes if no more writable volumes
# count_x means how many copies of data.

8
weed/storage/backend/s3_backend/s3_backend.go

@ -37,6 +37,7 @@ type S3BackendStorage struct {
region string
bucket string
endpoint string
storageClass string
conn s3iface.S3API
}
@ -48,6 +49,10 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st
s.region = configuration.GetString(configPrefix + "region")
s.bucket = configuration.GetString(configPrefix + "bucket")
s.endpoint = configuration.GetString(configPrefix + "endpoint")
s.storageClass = configuration.GetString(configPrefix + "storageClass")
if s.storageClass == "" {
s.storageClass = "STANDARD_IA"
}
s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
@ -62,6 +67,7 @@ func (s *S3BackendStorage) ToProperties() map[string]string {
m["region"] = s.region
m["bucket"] = s.bucket
m["endpoint"] = s.endpoint
m["storage_class"] = s.storageClass
return m
}
@ -85,7 +91,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, s.storageClass, fn)
return
}

4
weed/storage/backend/s3_backend/s3_upload.go

@ -12,7 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
)
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, storageClass string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
//open the file
f, err := os.Open(filename)
@ -52,7 +52,7 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
Body: fileReader,
StorageClass: aws.String("STANDARD_IA"),
StorageClass: aws.String(storageClass),
})
//in case it fails to upload

Loading…
Cancel
Save