You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

196 lines
6.1 KiB

3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
  1. package page_writer
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/glog"
  5. "github.com/seaweedfs/seaweedfs/weed/util"
  6. "sync"
  7. "sync/atomic"
  8. "time"
  9. )
  10. type LogicChunkIndex int
  11. type UploadPipeline struct {
  12. filepath util.FullPath
  13. ChunkSize int64
  14. writableChunks map[LogicChunkIndex]PageChunk
  15. writableChunksLock sync.Mutex
  16. sealedChunks map[LogicChunkIndex]*SealedChunk
  17. sealedChunksLock sync.Mutex
  18. uploaders *util.LimitedConcurrentExecutor
  19. uploaderCount int32
  20. uploaderCountCond *sync.Cond
  21. saveToStorageFn SaveToStorageFunc
  22. activeReadChunks map[LogicChunkIndex]int
  23. activeReadChunksLock sync.Mutex
  24. writableChunkLimit int
  25. swapFile *SwapFile
  26. }
  27. type SealedChunk struct {
  28. chunk PageChunk
  29. referenceCounter int // track uploading or reading processes
  30. }
  31. func (sc *SealedChunk) FreeReference(messageOnFree string) {
  32. sc.referenceCounter--
  33. if sc.referenceCounter == 0 {
  34. glog.V(4).Infof("Free sealed chunk: %s", messageOnFree)
  35. sc.chunk.FreeResource()
  36. }
  37. }
  38. func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline {
  39. return &UploadPipeline{
  40. ChunkSize: chunkSize,
  41. writableChunks: make(map[LogicChunkIndex]PageChunk),
  42. sealedChunks: make(map[LogicChunkIndex]*SealedChunk),
  43. uploaders: writers,
  44. uploaderCountCond: sync.NewCond(&sync.Mutex{}),
  45. saveToStorageFn: saveToStorageFn,
  46. activeReadChunks: make(map[LogicChunkIndex]int),
  47. writableChunkLimit: bufferChunkLimit,
  48. swapFile: NewSwapFile(swapFileDir, chunkSize),
  49. }
  50. }
  51. func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) {
  52. up.writableChunksLock.Lock()
  53. defer up.writableChunksLock.Unlock()
  54. logicChunkIndex := LogicChunkIndex(off / up.ChunkSize)
  55. pageChunk, found := up.writableChunks[logicChunkIndex]
  56. if !found {
  57. if len(up.writableChunks) > up.writableChunkLimit {
  58. // if current file chunks is over the per file buffer count limit
  59. fullestChunkIndex, fullness := LogicChunkIndex(-1), int64(0)
  60. for lci, mc := range up.writableChunks {
  61. chunkFullness := mc.WrittenSize()
  62. if fullness < chunkFullness {
  63. fullestChunkIndex = lci
  64. fullness = chunkFullness
  65. }
  66. }
  67. up.moveToSealed(up.writableChunks[fullestChunkIndex], fullestChunkIndex)
  68. delete(up.writableChunks, fullestChunkIndex)
  69. // fmt.Printf("flush chunk %d with %d bytes written\n", logicChunkIndex, fullness)
  70. }
  71. if isSequential &&
  72. len(up.writableChunks) < up.writableChunkLimit &&
  73. atomic.LoadInt64(&memChunkCounter) < 4*int64(up.writableChunkLimit) {
  74. pageChunk = NewMemChunk(logicChunkIndex, up.ChunkSize)
  75. } else {
  76. pageChunk = up.swapFile.NewTempFileChunk(logicChunkIndex)
  77. }
  78. up.writableChunks[logicChunkIndex] = pageChunk
  79. }
  80. n = pageChunk.WriteDataAt(p, off)
  81. up.maybeMoveToSealed(pageChunk, logicChunkIndex)
  82. return
  83. }
  84. func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
  85. logicChunkIndex := LogicChunkIndex(off / up.ChunkSize)
  86. // read from sealed chunks first
  87. up.sealedChunksLock.Lock()
  88. sealedChunk, found := up.sealedChunks[logicChunkIndex]
  89. if found {
  90. sealedChunk.referenceCounter++
  91. }
  92. up.sealedChunksLock.Unlock()
  93. if found {
  94. maxStop = sealedChunk.chunk.ReadDataAt(p, off)
  95. glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop)
  96. sealedChunk.FreeReference(fmt.Sprintf("%s finish reading chunk %d", up.filepath, logicChunkIndex))
  97. }
  98. // read from writable chunks last
  99. up.writableChunksLock.Lock()
  100. defer up.writableChunksLock.Unlock()
  101. writableChunk, found := up.writableChunks[logicChunkIndex]
  102. if !found {
  103. return
  104. }
  105. writableMaxStop := writableChunk.ReadDataAt(p, off)
  106. glog.V(4).Infof("%s read writable memchunk [%d,%d)", up.filepath, off, writableMaxStop)
  107. maxStop = max(maxStop, writableMaxStop)
  108. return
  109. }
  110. func (up *UploadPipeline) FlushAll() {
  111. up.writableChunksLock.Lock()
  112. defer up.writableChunksLock.Unlock()
  113. for logicChunkIndex, memChunk := range up.writableChunks {
  114. up.moveToSealed(memChunk, logicChunkIndex)
  115. }
  116. up.waitForCurrentWritersToComplete()
  117. }
  118. func (up *UploadPipeline) maybeMoveToSealed(memChunk PageChunk, logicChunkIndex LogicChunkIndex) {
  119. if memChunk.IsComplete() {
  120. up.moveToSealed(memChunk, logicChunkIndex)
  121. }
  122. }
  123. func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex LogicChunkIndex) {
  124. atomic.AddInt32(&up.uploaderCount, 1)
  125. glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount)
  126. up.sealedChunksLock.Lock()
  127. if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found {
  128. oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex))
  129. }
  130. sealedChunk := &SealedChunk{
  131. chunk: memChunk,
  132. referenceCounter: 1, // default 1 is for uploading process
  133. }
  134. up.sealedChunks[logicChunkIndex] = sealedChunk
  135. delete(up.writableChunks, logicChunkIndex)
  136. up.sealedChunksLock.Unlock()
  137. up.uploaders.Execute(func() {
  138. // first add to the file chunks
  139. sealedChunk.chunk.SaveContent(up.saveToStorageFn)
  140. // notify waiting process
  141. atomic.AddInt32(&up.uploaderCount, -1)
  142. glog.V(4).Infof("%s uploaderCount %d --> %d", up.filepath, up.uploaderCount+1, up.uploaderCount)
  143. // Lock and Unlock are not required,
  144. // but it may signal multiple times during one wakeup,
  145. // and the waiting goroutine may miss some of them!
  146. up.uploaderCountCond.L.Lock()
  147. up.uploaderCountCond.Broadcast()
  148. up.uploaderCountCond.L.Unlock()
  149. // wait for readers
  150. for up.IsLocked(logicChunkIndex) {
  151. time.Sleep(59 * time.Millisecond)
  152. }
  153. // then remove from sealed chunks
  154. up.sealedChunksLock.Lock()
  155. defer up.sealedChunksLock.Unlock()
  156. delete(up.sealedChunks, logicChunkIndex)
  157. sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex))
  158. })
  159. }
  160. func (up *UploadPipeline) Shutdown() {
  161. up.swapFile.FreeResource()
  162. up.sealedChunksLock.Lock()
  163. defer up.sealedChunksLock.Unlock()
  164. for logicChunkIndex, sealedChunk := range up.sealedChunks {
  165. sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex))
  166. }
  167. }