You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

106 lines
2.7 KiB

  1. package filesys
  2. import (
  3. "fmt"
  4. "github.com/chrislusf/seaweedfs/weed/filesys/page_writer"
  5. "github.com/chrislusf/seaweedfs/weed/glog"
  6. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  7. "io"
  8. "sync"
  9. "time"
  10. )
  11. type StreamDirtyPages struct {
  12. f *File
  13. writeWaitGroup sync.WaitGroup
  14. pageAddLock sync.Mutex
  15. chunkAddLock sync.Mutex
  16. lastErr error
  17. collection string
  18. replication string
  19. chunkedStream *page_writer.ChunkedStreamWriter
  20. }
  21. func newStreamDirtyPages(file *File, chunkSize int64) *StreamDirtyPages {
  22. dirtyPages := &StreamDirtyPages{
  23. f: file,
  24. chunkedStream: page_writer.NewChunkedStreamWriter(chunkSize),
  25. }
  26. dirtyPages.chunkedStream.SetSaveToStorageFunction(dirtyPages.saveChunkedFileIntevalToStorage)
  27. return dirtyPages
  28. }
  29. func (pages *StreamDirtyPages) AddPage(offset int64, data []byte) {
  30. pages.pageAddLock.Lock()
  31. defer pages.pageAddLock.Unlock()
  32. glog.V(4).Infof("%v stream AddPage [%d, %d)", pages.f.fullpath(), offset, offset+int64(len(data)))
  33. if _, err := pages.chunkedStream.WriteAt(data, offset); err != nil {
  34. pages.lastErr = err
  35. }
  36. return
  37. }
  38. func (pages *StreamDirtyPages) FlushData() error {
  39. pages.saveChunkedFileToStorage()
  40. pages.writeWaitGroup.Wait()
  41. if pages.lastErr != nil {
  42. return fmt.Errorf("flush data: %v", pages.lastErr)
  43. }
  44. pages.chunkedStream.Reset()
  45. return nil
  46. }
  47. func (pages *StreamDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) {
  48. return pages.chunkedStream.ReadDataAt(data, startOffset)
  49. }
  50. func (pages *StreamDirtyPages) GetStorageOptions() (collection, replication string) {
  51. return pages.collection, pages.replication
  52. }
  53. func (pages *StreamDirtyPages) saveChunkedFileToStorage() {
  54. pages.chunkedStream.FlushAll()
  55. }
  56. func (pages *StreamDirtyPages) saveChunkedFileIntevalToStorage(reader io.Reader, offset int64, size int64, cleanupFn func()) {
  57. mtime := time.Now().UnixNano()
  58. pages.writeWaitGroup.Add(1)
  59. writer := func() {
  60. defer pages.writeWaitGroup.Done()
  61. defer cleanupFn()
  62. chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
  63. if err != nil {
  64. glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
  65. pages.lastErr = err
  66. return
  67. }
  68. chunk.Mtime = mtime
  69. pages.collection, pages.replication = collection, replication
  70. pages.chunkAddLock.Lock()
  71. pages.f.addChunks([]*filer_pb.FileChunk{chunk})
  72. glog.V(3).Infof("%s saveToStorage %s [%d,%d)", pages.f.fullpath(), chunk.FileId, offset, offset+size)
  73. pages.chunkAddLock.Unlock()
  74. }
  75. if pages.f.wfs.concurrentWriters != nil {
  76. pages.f.wfs.concurrentWriters.Execute(writer)
  77. } else {
  78. go writer()
  79. }
  80. }
  81. func (pages StreamDirtyPages) Destroy() {
  82. pages.chunkedStream.Reset()
  83. }