|
@ -6,7 +6,6 @@ import ( |
|
|
"github.com/seaweedfs/seaweedfs/weed/util" |
|
|
"github.com/seaweedfs/seaweedfs/weed/util" |
|
|
"sync" |
|
|
"sync" |
|
|
"sync/atomic" |
|
|
"sync/atomic" |
|
|
"time" |
|
|
|
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
type LogicChunkIndex int |
|
|
type LogicChunkIndex int |
|
@ -16,16 +15,15 @@ type UploadPipeline struct { |
|
|
uploaderCountCond *sync.Cond |
|
|
uploaderCountCond *sync.Cond |
|
|
filepath util.FullPath |
|
|
filepath util.FullPath |
|
|
ChunkSize int64 |
|
|
ChunkSize int64 |
|
|
writableChunks map[LogicChunkIndex]PageChunk |
|
|
|
|
|
writableChunksLock sync.Mutex |
|
|
|
|
|
sealedChunks map[LogicChunkIndex]*SealedChunk |
|
|
|
|
|
sealedChunksLock sync.Mutex |
|
|
|
|
|
uploaders *util.LimitedConcurrentExecutor |
|
|
uploaders *util.LimitedConcurrentExecutor |
|
|
saveToStorageFn SaveToStorageFunc |
|
|
saveToStorageFn SaveToStorageFunc |
|
|
activeReadChunks map[LogicChunkIndex]int |
|
|
|
|
|
activeReadChunksLock sync.Mutex |
|
|
|
|
|
writableChunkLimit int |
|
|
writableChunkLimit int |
|
|
swapFile *SwapFile |
|
|
swapFile *SwapFile |
|
|
|
|
|
chunksLock sync.Mutex |
|
|
|
|
|
writableChunks map[LogicChunkIndex]PageChunk |
|
|
|
|
|
sealedChunks map[LogicChunkIndex]*SealedChunk |
|
|
|
|
|
activeReadChunks map[LogicChunkIndex]int |
|
|
|
|
|
readerCountCond *sync.Cond |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
type SealedChunk struct { |
|
|
type SealedChunk struct { |
|
@ -42,7 +40,7 @@ func (sc *SealedChunk) FreeReference(messageOnFree string) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline { |
|
|
func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline { |
|
|
return &UploadPipeline{ |
|
|
|
|
|
|
|
|
t := &UploadPipeline{ |
|
|
ChunkSize: chunkSize, |
|
|
ChunkSize: chunkSize, |
|
|
writableChunks: make(map[LogicChunkIndex]PageChunk), |
|
|
writableChunks: make(map[LogicChunkIndex]PageChunk), |
|
|
sealedChunks: make(map[LogicChunkIndex]*SealedChunk), |
|
|
sealedChunks: make(map[LogicChunkIndex]*SealedChunk), |
|
@ -53,11 +51,13 @@ func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, |
|
|
writableChunkLimit: bufferChunkLimit, |
|
|
writableChunkLimit: bufferChunkLimit, |
|
|
swapFile: NewSwapFile(swapFileDir, chunkSize), |
|
|
swapFile: NewSwapFile(swapFileDir, chunkSize), |
|
|
} |
|
|
} |
|
|
|
|
|
t.readerCountCond = sync.NewCond(&t.chunksLock) |
|
|
|
|
|
return t |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) { |
|
|
func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) { |
|
|
up.writableChunksLock.Lock() |
|
|
|
|
|
defer up.writableChunksLock.Unlock() |
|
|
|
|
|
|
|
|
up.chunksLock.Lock() |
|
|
|
|
|
defer up.chunksLock.Unlock() |
|
|
|
|
|
|
|
|
logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) |
|
|
logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) |
|
|
|
|
|
|
|
@ -95,13 +95,17 @@ func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n |
|
|
func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { |
|
|
func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { |
|
|
logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) |
|
|
logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) |
|
|
|
|
|
|
|
|
|
|
|
up.chunksLock.Lock() |
|
|
|
|
|
defer func() { |
|
|
|
|
|
up.readerCountCond.Signal() |
|
|
|
|
|
up.chunksLock.Unlock() |
|
|
|
|
|
}() |
|
|
|
|
|
|
|
|
// read from sealed chunks first
|
|
|
// read from sealed chunks first
|
|
|
up.sealedChunksLock.Lock() |
|
|
|
|
|
sealedChunk, found := up.sealedChunks[logicChunkIndex] |
|
|
sealedChunk, found := up.sealedChunks[logicChunkIndex] |
|
|
if found { |
|
|
if found { |
|
|
sealedChunk.referenceCounter++ |
|
|
sealedChunk.referenceCounter++ |
|
|
} |
|
|
} |
|
|
up.sealedChunksLock.Unlock() |
|
|
|
|
|
if found { |
|
|
if found { |
|
|
maxStop = sealedChunk.chunk.ReadDataAt(p, off) |
|
|
maxStop = sealedChunk.chunk.ReadDataAt(p, off) |
|
|
glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop) |
|
|
glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop) |
|
@ -109,8 +113,6 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// read from writable chunks last
|
|
|
// read from writable chunks last
|
|
|
up.writableChunksLock.Lock() |
|
|
|
|
|
defer up.writableChunksLock.Unlock() |
|
|
|
|
|
writableChunk, found := up.writableChunks[logicChunkIndex] |
|
|
writableChunk, found := up.writableChunks[logicChunkIndex] |
|
|
if !found { |
|
|
if !found { |
|
|
return |
|
|
return |
|
@ -123,8 +125,8 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func (up *UploadPipeline) FlushAll() { |
|
|
func (up *UploadPipeline) FlushAll() { |
|
|
up.writableChunksLock.Lock() |
|
|
|
|
|
defer up.writableChunksLock.Unlock() |
|
|
|
|
|
|
|
|
up.chunksLock.Lock() |
|
|
|
|
|
defer up.chunksLock.Unlock() |
|
|
|
|
|
|
|
|
for logicChunkIndex, memChunk := range up.writableChunks { |
|
|
for logicChunkIndex, memChunk := range up.writableChunks { |
|
|
up.moveToSealed(memChunk, logicChunkIndex) |
|
|
up.moveToSealed(memChunk, logicChunkIndex) |
|
@ -143,8 +145,6 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic |
|
|
atomic.AddInt32(&up.uploaderCount, 1) |
|
|
atomic.AddInt32(&up.uploaderCount, 1) |
|
|
glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount) |
|
|
glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount) |
|
|
|
|
|
|
|
|
up.sealedChunksLock.Lock() |
|
|
|
|
|
|
|
|
|
|
|
if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found { |
|
|
if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found { |
|
|
oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex)) |
|
|
oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex)) |
|
|
} |
|
|
} |
|
@ -155,8 +155,6 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic |
|
|
up.sealedChunks[logicChunkIndex] = sealedChunk |
|
|
up.sealedChunks[logicChunkIndex] = sealedChunk |
|
|
delete(up.writableChunks, logicChunkIndex) |
|
|
delete(up.writableChunks, logicChunkIndex) |
|
|
|
|
|
|
|
|
up.sealedChunksLock.Unlock() |
|
|
|
|
|
|
|
|
|
|
|
up.uploaders.Execute(func() { |
|
|
up.uploaders.Execute(func() { |
|
|
// first add to the file chunks
|
|
|
// first add to the file chunks
|
|
|
sealedChunk.chunk.SaveContent(up.saveToStorageFn) |
|
|
sealedChunk.chunk.SaveContent(up.saveToStorageFn) |
|
@ -172,13 +170,13 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic |
|
|
up.uploaderCountCond.L.Unlock() |
|
|
up.uploaderCountCond.L.Unlock() |
|
|
|
|
|
|
|
|
// wait for readers
|
|
|
// wait for readers
|
|
|
|
|
|
up.chunksLock.Lock() |
|
|
|
|
|
defer up.chunksLock.Unlock() |
|
|
for up.IsLocked(logicChunkIndex) { |
|
|
for up.IsLocked(logicChunkIndex) { |
|
|
time.Sleep(59 * time.Millisecond) |
|
|
|
|
|
|
|
|
up.readerCountCond.Wait() |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// then remove from sealed chunks
|
|
|
// then remove from sealed chunks
|
|
|
up.sealedChunksLock.Lock() |
|
|
|
|
|
defer up.sealedChunksLock.Unlock() |
|
|
|
|
|
delete(up.sealedChunks, logicChunkIndex) |
|
|
delete(up.sealedChunks, logicChunkIndex) |
|
|
sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex)) |
|
|
sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex)) |
|
|
|
|
|
|
|
@ -188,8 +186,8 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic |
|
|
func (up *UploadPipeline) Shutdown() { |
|
|
func (up *UploadPipeline) Shutdown() { |
|
|
up.swapFile.FreeResource() |
|
|
up.swapFile.FreeResource() |
|
|
|
|
|
|
|
|
up.sealedChunksLock.Lock() |
|
|
|
|
|
defer up.sealedChunksLock.Unlock() |
|
|
|
|
|
|
|
|
up.chunksLock.Lock() |
|
|
|
|
|
defer up.chunksLock.Unlock() |
|
|
for logicChunkIndex, sealedChunk := range up.sealedChunks { |
|
|
for logicChunkIndex, sealedChunk := range up.sealedChunks { |
|
|
sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex)) |
|
|
sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex)) |
|
|
} |
|
|
} |
|
|