diff --git a/weed/filer/filechunk_group.go b/weed/filer/filechunk_group.go index 9b86c5e47..3dadfbe93 100644 --- a/weed/filer/filechunk_group.go +++ b/weed/filer/filechunk_group.go @@ -20,23 +20,19 @@ type ChunkGroup struct { concurrentReaders int } -// NewChunkGroup creates a ChunkGroup with default concurrency settings. -// For better read performance, use NewChunkGroupWithConcurrency instead. -func NewChunkGroup(lookupFn wdclient.LookupFileIdFunctionType, chunkCache chunk_cache.ChunkCache, chunks []*filer_pb.FileChunk) (*ChunkGroup, error) { - return NewChunkGroupWithConcurrency(lookupFn, chunkCache, chunks, 16) -} - -// NewChunkGroupWithConcurrency creates a ChunkGroup with configurable concurrency. +// NewChunkGroup creates a ChunkGroup with configurable concurrency. +// concurrentReaders is optional; if not provided or <= 0, defaults to 16. // concurrentReaders controls: // - Maximum parallel chunk fetches during read operations // - Read-ahead prefetch parallelism // - Number of concurrent section reads for large files -func NewChunkGroupWithConcurrency(lookupFn wdclient.LookupFileIdFunctionType, chunkCache chunk_cache.ChunkCache, chunks []*filer_pb.FileChunk, concurrentReaders int) (*ChunkGroup, error) { - if concurrentReaders <= 0 { - concurrentReaders = 16 +func NewChunkGroup(lookupFn wdclient.LookupFileIdFunctionType, chunkCache chunk_cache.ChunkCache, chunks []*filer_pb.FileChunk, concurrentReaders ...int) (*ChunkGroup, error) { + readers := 16 + if len(concurrentReaders) > 0 && concurrentReaders[0] > 0 { + readers = concurrentReaders[0] } // ReaderCache limit should be at least concurrentReaders to allow parallel prefetching - readerCacheLimit := concurrentReaders * 2 + readerCacheLimit := readers * 2 if readerCacheLimit < 32 { readerCacheLimit = 32 } @@ -44,7 +40,7 @@ func NewChunkGroupWithConcurrency(lookupFn wdclient.LookupFileIdFunctionType, ch lookupFn: lookupFn, sections: make(map[SectionIndex]*FileChunkSection), readerCache: NewReaderCache(readerCacheLimit, chunkCache, lookupFn), - concurrentReaders: concurrentReaders, + concurrentReaders: readers, } err := group.SetChunks(chunks) diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go index a19b901f7..e912fe310 100644 --- a/weed/mount/filehandle.go +++ b/weed/mount/filehandle.go @@ -81,7 +81,7 @@ func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) { fileSize := filer.FileSize(entry) entry.Attributes.FileSize = fileSize var resolveManifestErr error - fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroupWithConcurrency(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks, fh.wfs.option.ConcurrentReaders) + fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks, fh.wfs.option.ConcurrentReaders) if resolveManifestErr != nil { glog.Warningf("failed to resolve manifest chunks in %+v", entry) }