You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

115 lines
2.8 KiB

3 years ago
  1. package filesys
  2. import (
  3. "github.com/chrislusf/seaweedfs/weed/filesys/page_writer"
  4. "github.com/chrislusf/seaweedfs/weed/glog"
  5. )
  6. type PageWriter struct {
  7. f *File
  8. collection string
  9. replication string
  10. chunkSize int64
  11. writerPattern *WriterPattern
  12. randomWriter page_writer.DirtyPages
  13. streamWriter page_writer.DirtyPages
  14. }
  15. var (
  16. _ = page_writer.DirtyPages(&PageWriter{})
  17. )
  18. func newPageWriter(file *File, chunkSize int64) *PageWriter {
  19. pw := &PageWriter{
  20. f: file,
  21. chunkSize: chunkSize,
  22. writerPattern: NewWriterPattern(chunkSize),
  23. randomWriter: newTempFileDirtyPages(file, chunkSize),
  24. streamWriter: newStreamDirtyPages(file, chunkSize),
  25. //streamWriter: newContinuousDirtyPages(file),
  26. //streamWriter: nil,
  27. }
  28. return pw
  29. }
  30. func (pw *PageWriter) AddPage(offset int64, data []byte) {
  31. glog.V(4).Infof("%v AddPage [%d, %d) streaming:%v", pw.f.fullpath(), offset, offset+int64(len(data)), pw.writerPattern.IsStreamingMode())
  32. chunkIndex := offset / pw.chunkSize
  33. for i := chunkIndex; len(data) > 0; i++ {
  34. writeSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset)
  35. pw.addToOneChunk(i, offset, data[:writeSize])
  36. offset += writeSize
  37. data = data[writeSize:]
  38. }
  39. }
  40. func (pw *PageWriter) addToOneChunk(chunkIndex, offset int64, data []byte) {
  41. if chunkIndex > 0 {
  42. if pw.writerPattern.IsStreamingMode() && pw.streamWriter != nil {
  43. pw.streamWriter.AddPage(offset, data)
  44. return
  45. }
  46. }
  47. pw.randomWriter.AddPage(offset, data)
  48. }
  49. func (pw *PageWriter) FlushData() error {
  50. pw.writerPattern.Reset()
  51. if pw.streamWriter != nil {
  52. if err := pw.streamWriter.FlushData(); err != nil {
  53. return err
  54. }
  55. }
  56. return pw.randomWriter.FlushData()
  57. }
  58. func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64) (maxStop int64) {
  59. glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.f.fullpath(), offset, offset+int64(len(data)))
  60. chunkIndex := offset / pw.chunkSize
  61. for i := chunkIndex; len(data) > 0; i++ {
  62. readSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset)
  63. if pw.streamWriter != nil {
  64. m1 := pw.streamWriter.ReadDirtyDataAt(data[:readSize], offset)
  65. maxStop = max(maxStop, m1)
  66. }
  67. m2 := pw.randomWriter.ReadDirtyDataAt(data[:readSize], offset)
  68. maxStop = max(maxStop, m2)
  69. offset += readSize
  70. data = data[readSize:]
  71. }
  72. return
  73. }
  74. func (pw *PageWriter) GetStorageOptions() (collection, replication string) {
  75. if pw.writerPattern.IsStreamingMode() && pw.streamWriter != nil {
  76. return pw.streamWriter.GetStorageOptions()
  77. }
  78. return pw.randomWriter.GetStorageOptions()
  79. }
  80. func (pw *PageWriter) Destroy() {
  81. if pw.streamWriter != nil {
  82. pw.streamWriter.Destroy()
  83. }
  84. pw.randomWriter.Destroy()
  85. }
  86. func max(x, y int64) int64 {
  87. if x > y {
  88. return x
  89. }
  90. return y
  91. }
  92. func min(x, y int64) int64 {
  93. if x < y {
  94. return x
  95. }
  96. return y
  97. }