You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

196 lines
5.4 KiB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package s3_backend
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/util"
  5. "io"
  6. "os"
  7. "strings"
  8. "time"
  9. "github.com/aws/aws-sdk-go/service/s3"
  10. "github.com/aws/aws-sdk-go/service/s3/s3iface"
  11. "github.com/google/uuid"
  12. "github.com/seaweedfs/seaweedfs/weed/glog"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  15. )
  16. func init() {
  17. backend.BackendStorageFactories["s3"] = &S3BackendFactory{}
  18. }
  19. type S3BackendFactory struct {
  20. }
  21. func (factory *S3BackendFactory) StorageType() backend.StorageType {
  22. return backend.StorageType("s3")
  23. }
  24. func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) {
  25. return newS3BackendStorage(configuration, configPrefix, id)
  26. }
  27. type S3BackendStorage struct {
  28. id string
  29. aws_access_key_id string
  30. aws_secret_access_key string
  31. region string
  32. bucket string
  33. endpoint string
  34. storageClass string
  35. conn s3iface.S3API
  36. }
  37. func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) {
  38. s = &S3BackendStorage{}
  39. s.id = id
  40. s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id")
  41. s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key")
  42. s.region = configuration.GetString(configPrefix + "region")
  43. s.bucket = configuration.GetString(configPrefix + "bucket")
  44. s.endpoint = configuration.GetString(configPrefix + "endpoint")
  45. s.storageClass = configuration.GetString(configPrefix + "storage_class")
  46. if s.storageClass == "" {
  47. s.storageClass = "STANDARD_IA"
  48. }
  49. s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
  50. glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
  51. return
  52. }
  53. func (s *S3BackendStorage) ToProperties() map[string]string {
  54. m := make(map[string]string)
  55. m["aws_access_key_id"] = s.aws_access_key_id
  56. m["aws_secret_access_key"] = s.aws_secret_access_key
  57. m["region"] = s.region
  58. m["bucket"] = s.bucket
  59. m["endpoint"] = s.endpoint
  60. m["storage_class"] = s.storageClass
  61. return m
  62. }
  63. func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) backend.BackendStorageFile {
  64. if strings.HasPrefix(key, "/") {
  65. key = key[1:]
  66. }
  67. f := &S3BackendStorageFile{
  68. backendStorage: s,
  69. key: key,
  70. tierInfo: tierInfo,
  71. }
  72. return f
  73. }
  74. func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
  75. randomUuid, _ := uuid.NewRandom()
  76. key = randomUuid.String()
  77. glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
  78. util.Retry("upload to S3", func() error {
  79. size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, s.storageClass, fn)
  80. return err
  81. })
  82. return
  83. }
  84. func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) {
  85. glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
  86. size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn)
  87. return
  88. }
  89. func (s *S3BackendStorage) DeleteFile(key string) (err error) {
  90. glog.V(1).Infof("delete dat file %s from remote", key)
  91. err = deleteFromS3(s.conn, s.bucket, key)
  92. return
  93. }
  94. type S3BackendStorageFile struct {
  95. backendStorage *S3BackendStorage
  96. key string
  97. tierInfo *volume_server_pb.VolumeInfo
  98. }
  99. func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) {
  100. bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1)
  101. getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{
  102. Bucket: &s3backendStorageFile.backendStorage.bucket,
  103. Key: &s3backendStorageFile.key,
  104. Range: &bytesRange,
  105. })
  106. if getObjectErr != nil {
  107. return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr)
  108. }
  109. defer getObjectOutput.Body.Close()
  110. // glog.V(3).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
  111. // glog.V(3).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
  112. var readCount int
  113. for {
  114. p = p[readCount:]
  115. readCount, err = getObjectOutput.Body.Read(p)
  116. n += readCount
  117. if err != nil {
  118. break
  119. }
  120. }
  121. if err == io.EOF {
  122. err = nil
  123. }
  124. return
  125. }
  126. func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) {
  127. panic("not implemented")
  128. }
  129. func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error {
  130. panic("not implemented")
  131. }
  132. func (s3backendStorageFile S3BackendStorageFile) Close() error {
  133. return nil
  134. }
  135. func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) {
  136. files := s3backendStorageFile.tierInfo.GetFiles()
  137. if len(files) == 0 {
  138. err = fmt.Errorf("remote file info not found")
  139. return
  140. }
  141. datSize = int64(files[0].FileSize)
  142. modTime = time.Unix(int64(files[0].ModifiedTime), 0)
  143. return
  144. }
  145. func (s3backendStorageFile S3BackendStorageFile) Name() string {
  146. return s3backendStorageFile.key
  147. }
  148. func (s3backendStorageFile S3BackendStorageFile) Sync() error {
  149. return nil
  150. }