You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

137 lines
3.5 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. package azuresink
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "net/url"
  7. "github.com/Azure/azure-storage-blob-go/azblob"
  8. "github.com/chrislusf/seaweedfs/weed/filer2"
  9. "github.com/chrislusf/seaweedfs/weed/glog"
  10. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  11. "github.com/chrislusf/seaweedfs/weed/replication/sink"
  12. "github.com/chrislusf/seaweedfs/weed/replication/source"
  13. "github.com/chrislusf/seaweedfs/weed/util"
  14. )
  15. type AzureSink struct {
  16. containerURL azblob.ContainerURL
  17. container string
  18. dir string
  19. filerSource *source.FilerSource
  20. }
  21. func init() {
  22. sink.Sinks = append(sink.Sinks, &AzureSink{})
  23. }
  24. func (g *AzureSink) GetName() string {
  25. return "azure"
  26. }
  27. func (g *AzureSink) GetSinkToDirectory() string {
  28. return g.dir
  29. }
  30. func (g *AzureSink) Initialize(configuration util.Configuration) error {
  31. return g.initialize(
  32. configuration.GetString("account_name"),
  33. configuration.GetString("account_key"),
  34. configuration.GetString("container"),
  35. configuration.GetString("directory"),
  36. )
  37. }
  38. func (g *AzureSink) SetSourceFiler(s *source.FilerSource) {
  39. g.filerSource = s
  40. }
  41. func (g *AzureSink) initialize(accountName, accountKey, container, dir string) error {
  42. g.container = container
  43. g.dir = dir
  44. // Use your Storage account's name and key to create a credential object.
  45. credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
  46. if err != nil {
  47. glog.Fatalf("failed to create Azure credential with account name:%s key:%s", accountName, accountKey)
  48. }
  49. // Create a request pipeline that is used to process HTTP(S) requests and responses.
  50. p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
  51. // Create an ServiceURL object that wraps the service URL and a request pipeline.
  52. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
  53. serviceURL := azblob.NewServiceURL(*u, p)
  54. g.containerURL = serviceURL.NewContainerURL(g.container)
  55. return nil
  56. }
  57. func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
  58. if isDirectory {
  59. key = key + "/"
  60. }
  61. ctx := context.Background()
  62. if _, err := g.containerURL.NewBlobURL(key).Delete(ctx,
  63. azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
  64. return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err)
  65. }
  66. return nil
  67. }
  68. func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
  69. if entry.IsDirectory {
  70. return nil
  71. }
  72. totalSize := filer2.TotalSize(entry.Chunks)
  73. chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
  74. ctx := context.Background()
  75. // Create a URL that references a to-be-created blob in your
  76. // Azure Storage account's container.
  77. appendBlobURL := g.containerURL.NewAppendBlobURL(key)
  78. _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
  79. if err != nil {
  80. return err
  81. }
  82. for _, chunk := range chunkViews {
  83. fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
  84. if err != nil {
  85. return err
  86. }
  87. var writeErr error
  88. _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
  89. _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
  90. })
  91. if readErr != nil {
  92. return readErr
  93. }
  94. if writeErr != nil {
  95. return writeErr
  96. }
  97. }
  98. return nil
  99. }
  100. func (g *AzureSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
  101. // TODO improve efficiency
  102. return false, nil
  103. }