You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

103 lines
2.8 KiB

3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
  1. package filesys
  2. import (
  3. "fmt"
  4. "github.com/chrislusf/seaweedfs/weed/filesys/page_writer"
  5. "github.com/chrislusf/seaweedfs/weed/glog"
  6. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  7. "io"
  8. "sync"
  9. "time"
  10. )
  11. type ChunkedDirtyPages struct {
  12. fh *FileHandle
  13. writeWaitGroup sync.WaitGroup
  14. chunkAddLock sync.Mutex
  15. lastErr error
  16. collection string
  17. replication string
  18. uploadPipeline *page_writer.UploadPipeline
  19. hasWrites bool
  20. }
  21. var (
  22. _ = page_writer.DirtyPages(&ChunkedDirtyPages{})
  23. )
  24. func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *ChunkedDirtyPages {
  25. dirtyPages := &ChunkedDirtyPages{
  26. fh: fh,
  27. }
  28. swapFileDir := fh.f.wfs.option.getTempFilePageDir()
  29. dirtyPages.uploadPipeline = page_writer.NewUploadPipeline(fh.f.fullpath(),
  30. fh.f.wfs.concurrentWriters, chunkSize, dirtyPages.saveChunkedFileIntevalToStorage, swapFileDir)
  31. return dirtyPages
  32. }
  33. func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte) {
  34. pages.hasWrites = true
  35. glog.V(4).Infof("%v memory AddPage [%d, %d)", pages.fh.f.fullpath(), offset, offset+int64(len(data)))
  36. pages.uploadPipeline.SaveDataAt(data, offset)
  37. return
  38. }
  39. func (pages *ChunkedDirtyPages) FlushData() error {
  40. if !pages.hasWrites {
  41. return nil
  42. }
  43. pages.uploadPipeline.FlushAll()
  44. if pages.lastErr != nil {
  45. return fmt.Errorf("flush data: %v", pages.lastErr)
  46. }
  47. return nil
  48. }
  49. func (pages *ChunkedDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) {
  50. if !pages.hasWrites {
  51. return
  52. }
  53. return pages.uploadPipeline.MaybeReadDataAt(data, startOffset)
  54. }
  55. func (pages *ChunkedDirtyPages) GetStorageOptions() (collection, replication string) {
  56. return pages.collection, pages.replication
  57. }
  58. func (pages *ChunkedDirtyPages) saveChunkedFileIntevalToStorage(reader io.Reader, offset int64, size int64, cleanupFn func()) {
  59. mtime := time.Now().UnixNano()
  60. defer cleanupFn()
  61. chunk, collection, replication, err := pages.fh.f.wfs.saveDataAsChunk(pages.fh.f.fullpath())(reader, pages.fh.f.Name, offset)
  62. if err != nil {
  63. glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.fh.f.fullpath(), offset, offset+size, err)
  64. pages.lastErr = err
  65. return
  66. }
  67. chunk.Mtime = mtime
  68. pages.collection, pages.replication = collection, replication
  69. pages.chunkAddLock.Lock()
  70. pages.fh.f.addChunks([]*filer_pb.FileChunk{chunk})
  71. pages.fh.entryViewCache = nil
  72. glog.V(3).Infof("%s saveToStorage %s [%d,%d)", pages.fh.f.fullpath(), chunk.FileId, offset, offset+size)
  73. pages.chunkAddLock.Unlock()
  74. }
  75. func (pages ChunkedDirtyPages) Destroy() {
  76. pages.uploadPipeline.Shutdown()
  77. }
  78. func (pages *ChunkedDirtyPages) LockForRead(startOffset, stopOffset int64) {
  79. pages.uploadPipeline.LockForRead(startOffset, stopOffset)
  80. }
  81. func (pages *ChunkedDirtyPages) UnlockForRead(startOffset, stopOffset int64) {
  82. pages.uploadPipeline.UnlockForRead(startOffset, stopOffset)
  83. }