You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

200 lines
5.6 KiB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package s3_backend
  2. import (
  3. "fmt"
  4. "io"
  5. "os"
  6. "strings"
  7. "time"
  8. "github.com/seaweedfs/seaweedfs/weed/util"
  9. "github.com/aws/aws-sdk-go/service/s3"
  10. "github.com/aws/aws-sdk-go/service/s3/s3iface"
  11. "github.com/google/uuid"
  12. "github.com/seaweedfs/seaweedfs/weed/glog"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  15. )
  16. func init() {
  17. backend.BackendStorageFactories["s3"] = &S3BackendFactory{}
  18. }
  19. type S3BackendFactory struct {
  20. }
  21. func (factory *S3BackendFactory) StorageType() backend.StorageType {
  22. return backend.StorageType("s3")
  23. }
  24. func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) {
  25. return newS3BackendStorage(configuration, configPrefix, id)
  26. }
  27. type S3BackendStorage struct {
  28. id string
  29. aws_access_key_id string
  30. aws_secret_access_key string
  31. region string
  32. bucket string
  33. endpoint string
  34. storageClass string
  35. forcePathStyle bool
  36. conn s3iface.S3API
  37. }
  38. func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) {
  39. s = &S3BackendStorage{}
  40. s.id = id
  41. s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id")
  42. s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key")
  43. s.region = configuration.GetString(configPrefix + "region")
  44. s.bucket = configuration.GetString(configPrefix + "bucket")
  45. s.endpoint = configuration.GetString(configPrefix + "endpoint")
  46. s.storageClass = configuration.GetString(configPrefix + "storage_class")
  47. s.forcePathStyle = util.ParseBool(configuration.GetString(configPrefix+"force_path_style"), true)
  48. if s.storageClass == "" {
  49. s.storageClass = "STANDARD_IA"
  50. }
  51. s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint, s.forcePathStyle)
  52. glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
  53. return
  54. }
  55. func (s *S3BackendStorage) ToProperties() map[string]string {
  56. m := make(map[string]string)
  57. m["aws_access_key_id"] = s.aws_access_key_id
  58. m["aws_secret_access_key"] = s.aws_secret_access_key
  59. m["region"] = s.region
  60. m["bucket"] = s.bucket
  61. m["endpoint"] = s.endpoint
  62. m["storage_class"] = s.storageClass
  63. m["force_path_style"] = util.BoolToString(s.forcePathStyle)
  64. return m
  65. }
  66. func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) backend.BackendStorageFile {
  67. if strings.HasPrefix(key, "/") {
  68. key = key[1:]
  69. }
  70. f := &S3BackendStorageFile{
  71. backendStorage: s,
  72. key: key,
  73. tierInfo: tierInfo,
  74. }
  75. return f
  76. }
  77. func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
  78. randomUuid, _ := uuid.NewRandom()
  79. key = randomUuid.String()
  80. glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
  81. util.Retry("upload to S3", func() error {
  82. size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, s.storageClass, fn)
  83. return err
  84. })
  85. return
  86. }
  87. func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) {
  88. glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
  89. size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn)
  90. return
  91. }
  92. func (s *S3BackendStorage) DeleteFile(key string) (err error) {
  93. glog.V(1).Infof("delete dat file %s from remote", key)
  94. err = deleteFromS3(s.conn, s.bucket, key)
  95. return
  96. }
  97. type S3BackendStorageFile struct {
  98. backendStorage *S3BackendStorage
  99. key string
  100. tierInfo *volume_server_pb.VolumeInfo
  101. }
  102. func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) {
  103. bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1)
  104. getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{
  105. Bucket: &s3backendStorageFile.backendStorage.bucket,
  106. Key: &s3backendStorageFile.key,
  107. Range: &bytesRange,
  108. })
  109. if getObjectErr != nil {
  110. return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr)
  111. }
  112. defer getObjectOutput.Body.Close()
  113. // glog.V(3).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
  114. // glog.V(3).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
  115. var readCount int
  116. for {
  117. p = p[readCount:]
  118. readCount, err = getObjectOutput.Body.Read(p)
  119. n += readCount
  120. if err != nil {
  121. break
  122. }
  123. }
  124. if err == io.EOF {
  125. err = nil
  126. }
  127. return
  128. }
  129. func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) {
  130. panic("not implemented")
  131. }
  132. func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error {
  133. panic("not implemented")
  134. }
  135. func (s3backendStorageFile S3BackendStorageFile) Close() error {
  136. return nil
  137. }
  138. func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) {
  139. files := s3backendStorageFile.tierInfo.GetFiles()
  140. if len(files) == 0 {
  141. err = fmt.Errorf("remote file info not found")
  142. return
  143. }
  144. datSize = int64(files[0].FileSize)
  145. modTime = time.Unix(int64(files[0].ModifiedTime), 0)
  146. return
  147. }
  148. func (s3backendStorageFile S3BackendStorageFile) Name() string {
  149. return s3backendStorageFile.key
  150. }
  151. func (s3backendStorageFile S3BackendStorageFile) Sync() error {
  152. return nil
  153. }