You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

181 lines
4.7 KiB

  1. package filesys
  2. import (
  3. "bazil.org/fuse/fs"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  6. "github.com/chrislusf/seaweedfs/weed/filer2"
  7. "context"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "bazil.org/fuse"
  10. "bytes"
  11. "github.com/chrislusf/seaweedfs/weed/operation"
  12. "time"
  13. )
  14. type FileHandle struct {
  15. // cache file has been written to
  16. dirty bool
  17. cachePath string
  18. handle uint64
  19. wfs *WFS
  20. dirPath string
  21. name string
  22. RequestId fuse.RequestID // unique ID for request
  23. NodeId fuse.NodeID // file or directory the request is about
  24. Uid uint32 // user ID of process making request
  25. Gid uint32 // group ID of process making request
  26. attributes *filer_pb.FuseAttributes
  27. Chunks []*filer_pb.FileChunk
  28. }
  29. var _ = fs.Handle(&FileHandle{})
  30. var _ = fs.HandleReadAller(&FileHandle{})
  31. // var _ = fs.HandleReader(&FileHandle{})
  32. var _ = fs.HandleFlusher(&FileHandle{})
  33. var _ = fs.HandleWriter(&FileHandle{})
  34. var _ = fs.HandleReleaser(&FileHandle{})
  35. func (fh *FileHandle) ReadAll(ctx context.Context) (content []byte, err error) {
  36. glog.V(3).Infof("%v/%v read all fh ", fh.dirPath, fh.name)
  37. if len(fh.Chunks) == 0 {
  38. glog.V(0).Infof("empty fh %v/%v", fh.dirPath, fh.name)
  39. return
  40. }
  41. err = fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  42. // FIXME: need to either use Read() or implement differently
  43. chunks, _ := filer2.CompactFileChunks(fh.Chunks)
  44. glog.V(1).Infof("read fh %v/%v %d/%d chunks", fh.dirPath, fh.name, len(chunks), len(fh.Chunks))
  45. for i, chunk := range chunks {
  46. glog.V(1).Infof("read fh %v/%v %d/%d chunk %s [%d,%d)", fh.dirPath, fh.name, i, len(chunks), chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
  47. }
  48. request := &filer_pb.GetFileContentRequest{
  49. FileId: chunks[0].FileId,
  50. }
  51. glog.V(1).Infof("read fh content %d chunk %s [%d,%d): %v", len(chunks),
  52. chunks[0].FileId, chunks[0].Offset, chunks[0].Offset+int64(chunks[0].Size), request)
  53. resp, err := client.GetFileContent(ctx, request)
  54. if err != nil {
  55. return err
  56. }
  57. content = resp.Content
  58. return nil
  59. })
  60. return content, err
  61. }
  62. // Write to the file handle
  63. func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
  64. // write the request to volume servers
  65. glog.V(3).Infof("%+v/%v write fh: %+v", fh.dirPath, fh.name, req)
  66. var fileId, host string
  67. if err := fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  68. request := &filer_pb.AssignVolumeRequest{
  69. Count: 1,
  70. Replication: "000",
  71. Collection: "",
  72. }
  73. glog.V(1).Infof("assign volume: %v", request)
  74. resp, err := client.AssignVolume(ctx, request)
  75. if err != nil {
  76. return err
  77. }
  78. fileId, host = resp.FileId, resp.Url
  79. return nil
  80. }); err != nil {
  81. return fmt.Errorf("filer assign volume: %v", err)
  82. }
  83. fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
  84. bufReader := bytes.NewReader(req.Data)
  85. uploadResult, err := operation.Upload(fileUrl, fh.name, bufReader, false, "application/octet-stream", nil, "")
  86. if err != nil {
  87. return fmt.Errorf("upload data: %v", err)
  88. }
  89. if uploadResult.Error != "" {
  90. return fmt.Errorf("upload result: %v", uploadResult.Error)
  91. }
  92. resp.Size = int(uploadResult.Size)
  93. fh.Chunks = append(fh.Chunks, &filer_pb.FileChunk{
  94. FileId: fileId,
  95. Offset: req.Offset,
  96. Size: uint64(uploadResult.Size),
  97. Mtime: time.Now().UnixNano(),
  98. })
  99. glog.V(1).Infof("uploaded %s/%s to: %v, [%d,%d)", fh.dirPath, fh.name, fileUrl, req.Offset, req.Offset+int64(resp.Size))
  100. fh.dirty = true
  101. return nil
  102. }
  103. func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
  104. glog.V(3).Infof("%+v/%v release fh", fh.dirPath, fh.name)
  105. return nil
  106. }
  107. // Flush - experimenting with uploading at flush, this slows operations down till it has been
  108. // completely flushed
  109. func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
  110. // fflush works at fh level
  111. // send the data to the OS
  112. glog.V(3).Infof("%s/%s fh flush %v", fh.dirPath, fh.name, req)
  113. if !fh.dirty {
  114. return nil
  115. }
  116. if len(fh.Chunks) == 0 {
  117. glog.V(2).Infof("fh %s/%s flush skipping empty: %v", fh.dirPath, fh.name, req)
  118. return nil
  119. }
  120. err := fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  121. request := &filer_pb.UpdateEntryRequest{
  122. Directory: fh.dirPath,
  123. Entry: &filer_pb.Entry{
  124. Name: fh.name,
  125. Attributes: fh.attributes,
  126. Chunks: fh.Chunks,
  127. },
  128. }
  129. glog.V(1).Infof("%s/%s set chunks: %v", fh.dirPath, fh.name, len(fh.Chunks))
  130. if _, err := client.UpdateEntry(ctx, request); err != nil {
  131. return fmt.Errorf("update fh: %v", err)
  132. }
  133. return nil
  134. })
  135. if err == nil {
  136. fh.dirty = false
  137. }
  138. return err
  139. }