Browse Source

mount: adjust locking for upload pipeline

pull/4077/head
chrislu 2 years ago
parent
commit
28d479e5c0
  1. 64
      weed/mount/page_writer/upload_pipeline.go
  2. 6
      weed/mount/page_writer/upload_pipeline_lock.go

64
weed/mount/page_writer/upload_pipeline.go

@ -6,26 +6,24 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
) )
type LogicChunkIndex int type LogicChunkIndex int
type UploadPipeline struct { type UploadPipeline struct {
uploaderCount int32
uploaderCountCond *sync.Cond
filepath util.FullPath
ChunkSize int64
writableChunks map[LogicChunkIndex]PageChunk
writableChunksLock sync.Mutex
sealedChunks map[LogicChunkIndex]*SealedChunk
sealedChunksLock sync.Mutex
uploaders *util.LimitedConcurrentExecutor
saveToStorageFn SaveToStorageFunc
activeReadChunks map[LogicChunkIndex]int
activeReadChunksLock sync.Mutex
writableChunkLimit int
swapFile *SwapFile
uploaderCount int32
uploaderCountCond *sync.Cond
filepath util.FullPath
ChunkSize int64
uploaders *util.LimitedConcurrentExecutor
saveToStorageFn SaveToStorageFunc
writableChunkLimit int
swapFile *SwapFile
chunksLock sync.Mutex
writableChunks map[LogicChunkIndex]PageChunk
sealedChunks map[LogicChunkIndex]*SealedChunk
activeReadChunks map[LogicChunkIndex]int
readerCountCond *sync.Cond
} }
type SealedChunk struct { type SealedChunk struct {
@ -42,7 +40,7 @@ func (sc *SealedChunk) FreeReference(messageOnFree string) {
} }
func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline { func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline {
return &UploadPipeline{
t := &UploadPipeline{
ChunkSize: chunkSize, ChunkSize: chunkSize,
writableChunks: make(map[LogicChunkIndex]PageChunk), writableChunks: make(map[LogicChunkIndex]PageChunk),
sealedChunks: make(map[LogicChunkIndex]*SealedChunk), sealedChunks: make(map[LogicChunkIndex]*SealedChunk),
@ -53,11 +51,13 @@ func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64,
writableChunkLimit: bufferChunkLimit, writableChunkLimit: bufferChunkLimit,
swapFile: NewSwapFile(swapFileDir, chunkSize), swapFile: NewSwapFile(swapFileDir, chunkSize),
} }
t.readerCountCond = sync.NewCond(&t.chunksLock)
return t
} }
func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) { func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) {
up.writableChunksLock.Lock()
defer up.writableChunksLock.Unlock()
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) logicChunkIndex := LogicChunkIndex(off / up.ChunkSize)
@ -95,13 +95,17 @@ func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n
func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) logicChunkIndex := LogicChunkIndex(off / up.ChunkSize)
up.chunksLock.Lock()
defer func() {
up.readerCountCond.Signal()
up.chunksLock.Unlock()
}()
// read from sealed chunks first // read from sealed chunks first
up.sealedChunksLock.Lock()
sealedChunk, found := up.sealedChunks[logicChunkIndex] sealedChunk, found := up.sealedChunks[logicChunkIndex]
if found { if found {
sealedChunk.referenceCounter++ sealedChunk.referenceCounter++
} }
up.sealedChunksLock.Unlock()
if found { if found {
maxStop = sealedChunk.chunk.ReadDataAt(p, off) maxStop = sealedChunk.chunk.ReadDataAt(p, off)
glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop) glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop)
@ -109,8 +113,6 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
} }
// read from writable chunks last // read from writable chunks last
up.writableChunksLock.Lock()
defer up.writableChunksLock.Unlock()
writableChunk, found := up.writableChunks[logicChunkIndex] writableChunk, found := up.writableChunks[logicChunkIndex]
if !found { if !found {
return return
@ -123,8 +125,8 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
} }
func (up *UploadPipeline) FlushAll() { func (up *UploadPipeline) FlushAll() {
up.writableChunksLock.Lock()
defer up.writableChunksLock.Unlock()
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
for logicChunkIndex, memChunk := range up.writableChunks { for logicChunkIndex, memChunk := range up.writableChunks {
up.moveToSealed(memChunk, logicChunkIndex) up.moveToSealed(memChunk, logicChunkIndex)
@ -143,8 +145,6 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic
atomic.AddInt32(&up.uploaderCount, 1) atomic.AddInt32(&up.uploaderCount, 1)
glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount) glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount)
up.sealedChunksLock.Lock()
if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found { if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found {
oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex)) oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex))
} }
@ -155,8 +155,6 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic
up.sealedChunks[logicChunkIndex] = sealedChunk up.sealedChunks[logicChunkIndex] = sealedChunk
delete(up.writableChunks, logicChunkIndex) delete(up.writableChunks, logicChunkIndex)
up.sealedChunksLock.Unlock()
up.uploaders.Execute(func() { up.uploaders.Execute(func() {
// first add to the file chunks // first add to the file chunks
sealedChunk.chunk.SaveContent(up.saveToStorageFn) sealedChunk.chunk.SaveContent(up.saveToStorageFn)
@ -172,13 +170,13 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic
up.uploaderCountCond.L.Unlock() up.uploaderCountCond.L.Unlock()
// wait for readers // wait for readers
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
for up.IsLocked(logicChunkIndex) { for up.IsLocked(logicChunkIndex) {
time.Sleep(59 * time.Millisecond)
up.readerCountCond.Wait()
} }
// then remove from sealed chunks // then remove from sealed chunks
up.sealedChunksLock.Lock()
defer up.sealedChunksLock.Unlock()
delete(up.sealedChunks, logicChunkIndex) delete(up.sealedChunks, logicChunkIndex)
sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex)) sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex))
@ -188,8 +186,8 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic
func (up *UploadPipeline) Shutdown() { func (up *UploadPipeline) Shutdown() {
up.swapFile.FreeResource() up.swapFile.FreeResource()
up.sealedChunksLock.Lock()
defer up.sealedChunksLock.Unlock()
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
for logicChunkIndex, sealedChunk := range up.sealedChunks { for logicChunkIndex, sealedChunk := range up.sealedChunks {
sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex)) sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex))
} }

6
weed/mount/page_writer/upload_pipeline_lock.go

@ -10,8 +10,6 @@ func (up *UploadPipeline) LockForRead(startOffset, stopOffset int64) {
if stopOffset%up.ChunkSize > 0 { if stopOffset%up.ChunkSize > 0 {
stopLogicChunkIndex += 1 stopLogicChunkIndex += 1
} }
up.activeReadChunksLock.Lock()
defer up.activeReadChunksLock.Unlock()
for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ { for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ {
if count, found := up.activeReadChunks[i]; found { if count, found := up.activeReadChunks[i]; found {
up.activeReadChunks[i] = count + 1 up.activeReadChunks[i] = count + 1
@ -27,8 +25,6 @@ func (up *UploadPipeline) UnlockForRead(startOffset, stopOffset int64) {
if stopOffset%up.ChunkSize > 0 { if stopOffset%up.ChunkSize > 0 {
stopLogicChunkIndex += 1 stopLogicChunkIndex += 1
} }
up.activeReadChunksLock.Lock()
defer up.activeReadChunksLock.Unlock()
for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ { for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ {
if count, found := up.activeReadChunks[i]; found { if count, found := up.activeReadChunks[i]; found {
if count == 1 { if count == 1 {
@ -41,8 +37,6 @@ func (up *UploadPipeline) UnlockForRead(startOffset, stopOffset int64) {
} }
func (up *UploadPipeline) IsLocked(logicChunkIndex LogicChunkIndex) bool { func (up *UploadPipeline) IsLocked(logicChunkIndex LogicChunkIndex) bool {
up.activeReadChunksLock.Lock()
defer up.activeReadChunksLock.Unlock()
if count, found := up.activeReadChunks[logicChunkIndex]; found { if count, found := up.activeReadChunks[logicChunkIndex]; found {
return count > 0 return count > 0
} }

Loading…
Cancel
Save