From 7d304659ae496c747fda078eaa3454246a87008b Mon Sep 17 00:00:00 2001 From: chrislu Date: Thu, 4 Dec 2025 21:45:07 -0800 Subject: [PATCH] filer: consistent timestamp handling in sequential read path Use max(ts, task.chunk.ModifiedTsNs) in sequential path to match parallel path behavior. Also update ts before error check so that on failure, the returned timestamp reflects the max of all chunks processed so far. --- weed/filer/reader_at.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go index 4e2719e00..b13690e0c 100644 --- a/weed/filer/reader_at.go +++ b/weed/filer/reader_at.go @@ -244,12 +244,12 @@ func (c *ChunkReadAt) doReadAt(ctx context.Context, p []byte, offset int64) (n i if len(tasks) <= 1 || c.readerPattern.IsRandomMode() { for _, task := range tasks { copied, readErr := c.readChunkSliceAt(ctx, p[task.bufferStart:task.bufferEnd], task.chunk, nil, task.chunkOffset) + ts = max(ts, task.chunk.ModifiedTsNs) if readErr != nil { glog.Errorf("fetching chunk %+v: %v\n", task.chunk, readErr) - return n + copied, task.chunk.ModifiedTsNs, readErr + return n + copied, ts, readErr } n += copied - ts = task.chunk.ModifiedTsNs } } else { // Parallel chunk fetching for multiple chunks