Browse Source

fmt

pull/7329/head
chrislu 4 weeks ago
parent
commit
7e46abf052
  1. 6
      weed/util/log_buffer/log_buffer.go
  2. 56
      weed/util/log_buffer/log_read_stateless.go

6
weed/util/log_buffer/log_buffer.go

@ -35,9 +35,9 @@ type LogReadFromDiskFuncType func(startPosition MessagePosition, stopTsNs int64,
// DiskChunkCache caches chunks of historical data read from disk // DiskChunkCache caches chunks of historical data read from disk
type DiskChunkCache struct { type DiskChunkCache struct {
mu sync.RWMutex
chunks map[int64]*CachedDiskChunk // Key: chunk start offset (aligned to chunkSize)
maxChunks int // Maximum number of chunks to cache
mu sync.RWMutex
chunks map[int64]*CachedDiskChunk // Key: chunk start offset (aligned to chunkSize)
maxChunks int // Maximum number of chunks to cache
} }
// CachedDiskChunk represents a cached chunk of disk data // CachedDiskChunk represents a cached chunk of disk data

56
weed/util/log_buffer/log_read_stateless.go

@ -182,62 +182,62 @@ func readHistoricalDataFromDisk(
highWaterMark int64, highWaterMark int64,
) (messages []*filer_pb.LogEntry, nextOffset int64, err error) { ) (messages []*filer_pb.LogEntry, nextOffset int64, err error) {
const chunkSize = 1000 // Size of each cached chunk const chunkSize = 1000 // Size of each cached chunk
// Calculate chunk start offset (aligned to chunkSize boundary) // Calculate chunk start offset (aligned to chunkSize boundary)
chunkStartOffset := (startOffset / chunkSize) * chunkSize chunkStartOffset := (startOffset / chunkSize) * chunkSize
// Try to get from cache first // Try to get from cache first
cachedMessages, cacheHit := getCachedDiskChunk(logBuffer, chunkStartOffset) cachedMessages, cacheHit := getCachedDiskChunk(logBuffer, chunkStartOffset)
if cacheHit { if cacheHit {
// Found in cache - extract requested messages // Found in cache - extract requested messages
glog.V(3).Infof("[DiskCache] Cache HIT for chunk starting at offset %d (requested: %d)", glog.V(3).Infof("[DiskCache] Cache HIT for chunk starting at offset %d (requested: %d)",
chunkStartOffset, startOffset) chunkStartOffset, startOffset)
return extractMessagesFromCache(cachedMessages, startOffset, maxMessages, maxBytes) return extractMessagesFromCache(cachedMessages, startOffset, maxMessages, maxBytes)
} }
glog.V(3).Infof("[DiskCache] Cache MISS for chunk starting at offset %d, reading from disk", glog.V(3).Infof("[DiskCache] Cache MISS for chunk starting at offset %d, reading from disk",
chunkStartOffset) chunkStartOffset)
// Not in cache - read entire chunk from disk for caching // Not in cache - read entire chunk from disk for caching
chunkMessages := make([]*filer_pb.LogEntry, 0, chunkSize) chunkMessages := make([]*filer_pb.LogEntry, 0, chunkSize)
chunkNextOffset := chunkStartOffset chunkNextOffset := chunkStartOffset
// Create a position for the chunk start // Create a position for the chunk start
chunkPosition := MessagePosition{ chunkPosition := MessagePosition{
IsOffsetBased: true, IsOffsetBased: true,
Offset: chunkStartOffset, Offset: chunkStartOffset,
} }
// Define callback to collect the entire chunk // Define callback to collect the entire chunk
eachMessageFn := func(logEntry *filer_pb.LogEntry) (isDone bool, err error) { eachMessageFn := func(logEntry *filer_pb.LogEntry) (isDone bool, err error) {
// Read up to chunkSize messages for caching // Read up to chunkSize messages for caching
if len(chunkMessages) >= chunkSize { if len(chunkMessages) >= chunkSize {
return true, nil return true, nil
} }
chunkMessages = append(chunkMessages, logEntry) chunkMessages = append(chunkMessages, logEntry)
chunkNextOffset++ chunkNextOffset++
// Continue reading the chunk // Continue reading the chunk
return false, nil return false, nil
} }
// Read chunk from disk // Read chunk from disk
_, _, readErr := logBuffer.ReadFromDiskFn(chunkPosition, 0, eachMessageFn) _, _, readErr := logBuffer.ReadFromDiskFn(chunkPosition, 0, eachMessageFn)
if readErr != nil { if readErr != nil {
return nil, startOffset, fmt.Errorf("failed to read from disk: %w", readErr) return nil, startOffset, fmt.Errorf("failed to read from disk: %w", readErr)
} }
// Cache the chunk for future reads // Cache the chunk for future reads
if len(chunkMessages) > 0 { if len(chunkMessages) > 0 {
cacheDiskChunk(logBuffer, chunkStartOffset, chunkNextOffset-1, chunkMessages) cacheDiskChunk(logBuffer, chunkStartOffset, chunkNextOffset-1, chunkMessages)
glog.V(3).Infof("[DiskCache] Cached chunk: offsets %d-%d (%d messages)", glog.V(3).Infof("[DiskCache] Cached chunk: offsets %d-%d (%d messages)",
chunkStartOffset, chunkNextOffset-1, len(chunkMessages)) chunkStartOffset, chunkNextOffset-1, len(chunkMessages))
} }
// Extract requested messages from the chunk // Extract requested messages from the chunk
return extractMessagesFromCache(chunkMessages, startOffset, maxMessages, maxBytes) return extractMessagesFromCache(chunkMessages, startOffset, maxMessages, maxBytes)
} }
@ -246,13 +246,13 @@ func readHistoricalDataFromDisk(
func getCachedDiskChunk(logBuffer *LogBuffer, chunkStartOffset int64) ([]*filer_pb.LogEntry, bool) { func getCachedDiskChunk(logBuffer *LogBuffer, chunkStartOffset int64) ([]*filer_pb.LogEntry, bool) {
logBuffer.diskChunkCache.mu.RLock() logBuffer.diskChunkCache.mu.RLock()
defer logBuffer.diskChunkCache.mu.RUnlock() defer logBuffer.diskChunkCache.mu.RUnlock()
if chunk, exists := logBuffer.diskChunkCache.chunks[chunkStartOffset]; exists { if chunk, exists := logBuffer.diskChunkCache.chunks[chunkStartOffset]; exists {
// Update last access time // Update last access time
chunk.lastAccess = time.Now() chunk.lastAccess = time.Now()
return chunk.messages, true return chunk.messages, true
} }
return nil, false return nil, false
} }
@ -260,14 +260,14 @@ func getCachedDiskChunk(logBuffer *LogBuffer, chunkStartOffset int64) ([]*filer_
func cacheDiskChunk(logBuffer *LogBuffer, startOffset, endOffset int64, messages []*filer_pb.LogEntry) { func cacheDiskChunk(logBuffer *LogBuffer, startOffset, endOffset int64, messages []*filer_pb.LogEntry) {
logBuffer.diskChunkCache.mu.Lock() logBuffer.diskChunkCache.mu.Lock()
defer logBuffer.diskChunkCache.mu.Unlock() defer logBuffer.diskChunkCache.mu.Unlock()
// Check if we need to evict old chunks (LRU policy) // Check if we need to evict old chunks (LRU policy)
if len(logBuffer.diskChunkCache.chunks) >= logBuffer.diskChunkCache.maxChunks { if len(logBuffer.diskChunkCache.chunks) >= logBuffer.diskChunkCache.maxChunks {
// Find least recently used chunk // Find least recently used chunk
var oldestOffset int64 var oldestOffset int64
var oldestTime time.Time var oldestTime time.Time
first := true first := true
for offset, chunk := range logBuffer.diskChunkCache.chunks { for offset, chunk := range logBuffer.diskChunkCache.chunks {
if first || chunk.lastAccess.Before(oldestTime) { if first || chunk.lastAccess.Before(oldestTime) {
oldestOffset = offset oldestOffset = offset
@ -275,12 +275,12 @@ func cacheDiskChunk(logBuffer *LogBuffer, startOffset, endOffset int64, messages
first = false first = false
} }
} }
// Evict oldest chunk // Evict oldest chunk
delete(logBuffer.diskChunkCache.chunks, oldestOffset) delete(logBuffer.diskChunkCache.chunks, oldestOffset)
glog.V(4).Infof("[DiskCache] Evicted chunk at offset %d (LRU)", oldestOffset) glog.V(4).Infof("[DiskCache] Evicted chunk at offset %d (LRU)", oldestOffset)
} }
// Store new chunk // Store new chunk
logBuffer.diskChunkCache.chunks[startOffset] = &CachedDiskChunk{ logBuffer.diskChunkCache.chunks[startOffset] = &CachedDiskChunk{
startOffset: startOffset, startOffset: startOffset,
@ -296,39 +296,39 @@ func cacheDiskChunk(logBuffer *LogBuffer, startOffset, endOffset int64, messages
func extractMessagesFromCache(chunkMessages []*filer_pb.LogEntry, startOffset int64, maxMessages, maxBytes int) ([]*filer_pb.LogEntry, int64, error) { func extractMessagesFromCache(chunkMessages []*filer_pb.LogEntry, startOffset int64, maxMessages, maxBytes int) ([]*filer_pb.LogEntry, int64, error) {
const chunkSize = 1000 const chunkSize = 1000
chunkStartOffset := (startOffset / chunkSize) * chunkSize chunkStartOffset := (startOffset / chunkSize) * chunkSize
// Calculate position within chunk // Calculate position within chunk
positionInChunk := int(startOffset - chunkStartOffset) positionInChunk := int(startOffset - chunkStartOffset)
// Check if requested offset is within the chunk // Check if requested offset is within the chunk
if positionInChunk < 0 || positionInChunk >= len(chunkMessages) { if positionInChunk < 0 || positionInChunk >= len(chunkMessages) {
glog.V(4).Infof("[DiskCache] Requested offset %d out of chunk range (chunk start: %d, size: %d)", glog.V(4).Infof("[DiskCache] Requested offset %d out of chunk range (chunk start: %d, size: %d)",
startOffset, chunkStartOffset, len(chunkMessages)) startOffset, chunkStartOffset, len(chunkMessages))
return nil, startOffset, nil // Return empty but no error return nil, startOffset, nil // Return empty but no error
} }
// Extract messages starting from the requested position // Extract messages starting from the requested position
messages := make([]*filer_pb.LogEntry, 0, maxMessages) messages := make([]*filer_pb.LogEntry, 0, maxMessages)
nextOffset := startOffset nextOffset := startOffset
totalBytes := 0 totalBytes := 0
for i := positionInChunk; i < len(chunkMessages) && len(messages) < maxMessages; i++ { for i := positionInChunk; i < len(chunkMessages) && len(messages) < maxMessages; i++ {
entry := chunkMessages[i] entry := chunkMessages[i]
entrySize := proto.Size(entry) entrySize := proto.Size(entry)
// Check byte limit // Check byte limit
if totalBytes > 0 && totalBytes+entrySize > maxBytes { if totalBytes > 0 && totalBytes+entrySize > maxBytes {
break break
} }
messages = append(messages, entry) messages = append(messages, entry)
totalBytes += entrySize totalBytes += entrySize
nextOffset++ nextOffset++
} }
glog.V(4).Infof("[DiskCache] Extracted %d messages from cache (offset %d-%d, bytes=%d)", glog.V(4).Infof("[DiskCache] Extracted %d messages from cache (offset %d-%d, bytes=%d)",
len(messages), startOffset, nextOffset-1, totalBytes) len(messages), startOffset, nextOffset-1, totalBytes)
return messages, nextOffset, nil return messages, nextOffset, nil
} }

Loading…
Cancel
Save