You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

177 lines
4.9 KiB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package s3_backend
  2. import (
  3. "fmt"
  4. "io"
  5. "os"
  6. "strings"
  7. "time"
  8. "github.com/aws/aws-sdk-go/service/s3"
  9. "github.com/aws/aws-sdk-go/service/s3/s3iface"
  10. "github.com/google/uuid"
  11. "github.com/chrislusf/seaweedfs/weed/glog"
  12. "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
  13. "github.com/chrislusf/seaweedfs/weed/storage/backend"
  14. )
  15. func init() {
  16. backend.BackendStorageFactories["s3"] = &S3BackendFactory{}
  17. }
  18. type S3BackendFactory struct {
  19. }
  20. func (factory *S3BackendFactory) StorageType() backend.StorageType {
  21. return backend.StorageType("s3")
  22. }
  23. func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) {
  24. return newS3BackendStorage(configuration, configPrefix, id)
  25. }
  26. type S3BackendStorage struct {
  27. id string
  28. aws_access_key_id string
  29. aws_secret_access_key string
  30. region string
  31. bucket string
  32. conn s3iface.S3API
  33. }
  34. func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) {
  35. s = &S3BackendStorage{}
  36. s.id = id
  37. s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id")
  38. s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key")
  39. s.region = configuration.GetString(configPrefix + "region")
  40. s.bucket = configuration.GetString(configPrefix + "bucket")
  41. s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region)
  42. glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
  43. return
  44. }
  45. func (s *S3BackendStorage) ToProperties() map[string]string {
  46. m := make(map[string]string)
  47. m["aws_access_key_id"] = s.aws_access_key_id
  48. m["aws_secret_access_key"] = s.aws_secret_access_key
  49. m["region"] = s.region
  50. m["bucket"] = s.bucket
  51. return m
  52. }
  53. func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) backend.BackendStorageFile {
  54. if strings.HasPrefix(key, "/") {
  55. key = key[1:]
  56. }
  57. f := &S3BackendStorageFile{
  58. backendStorage: s,
  59. key: key,
  60. tierInfo: tierInfo,
  61. }
  62. return f
  63. }
  64. func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
  65. randomUuid, _ := uuid.NewRandom()
  66. key = randomUuid.String()
  67. glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
  68. size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn)
  69. return
  70. }
  71. func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) {
  72. glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
  73. size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn)
  74. return
  75. }
  76. func (s *S3BackendStorage) DeleteFile(key string) (err error) {
  77. glog.V(1).Infof("delete dat file %s from remote", key)
  78. err = deleteFromS3(s.conn, s.bucket, key)
  79. return
  80. }
  81. type S3BackendStorageFile struct {
  82. backendStorage *S3BackendStorage
  83. key string
  84. tierInfo *volume_server_pb.VolumeInfo
  85. }
  86. func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) {
  87. bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1)
  88. // glog.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
  89. getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{
  90. Bucket: &s3backendStorageFile.backendStorage.bucket,
  91. Key: &s3backendStorageFile.key,
  92. Range: &bytesRange,
  93. })
  94. if getObjectErr != nil {
  95. return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr)
  96. }
  97. defer getObjectOutput.Body.Close()
  98. glog.V(4).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
  99. glog.V(4).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
  100. for {
  101. if n, err = getObjectOutput.Body.Read(p); err == nil && n < len(p) {
  102. p = p[n:]
  103. } else {
  104. break
  105. }
  106. }
  107. if err == io.EOF {
  108. err = nil
  109. }
  110. return
  111. }
  112. func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) {
  113. panic("not implemented")
  114. }
  115. func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error {
  116. panic("not implemented")
  117. }
  118. func (s3backendStorageFile S3BackendStorageFile) Close() error {
  119. return nil
  120. }
  121. func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) {
  122. files := s3backendStorageFile.tierInfo.GetFiles()
  123. if len(files) == 0 {
  124. err = fmt.Errorf("remote file info not found")
  125. return
  126. }
  127. datSize = int64(files[0].FileSize)
  128. modTime = time.Unix(int64(files[0].ModifiedTime), 0)
  129. return
  130. }
  131. func (s3backendStorageFile S3BackendStorageFile) Name() string {
  132. return s3backendStorageFile.key
  133. }