diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 911895ff2..6dd760859 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -734,7 +734,7 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R // Get chunks and validate BEFORE setting headers chunks := entry.GetChunks() - glog.Infof("streamFromVolumeServers: entry has %d chunks, totalSize=%d, isRange=%v, offset=%d, size=%d", + glog.V(4).Infof("streamFromVolumeServers: entry has %d chunks, totalSize=%d, isRange=%v, offset=%d, size=%d", len(chunks), totalSize, isRangeRequest, offset, size) if len(chunks) == 0 { @@ -752,9 +752,11 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R return nil } - // Log chunk details - for i, chunk := range chunks { - glog.Infof(" GET Chunk[%d]: fid=%s, offset=%d, size=%d", i, chunk.GetFileIdString(), chunk.Offset, chunk.Size) + // Log chunk details (verbose only - high frequency) + if glog.V(4) { + for i, chunk := range chunks { + glog.Infof(" GET Chunk[%d]: fid=%s, offset=%d, size=%d", i, chunk.GetFileIdString(), chunk.Offset, chunk.Size) + } } // CRITICAL: Resolve chunks and prepare stream BEFORE WriteHeader @@ -816,13 +818,13 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R // Stream directly to response tStreamExec := time.Now() - glog.Infof("streamFromVolumeServers: starting streamFn, offset=%d, size=%d", offset, size) + glog.V(4).Infof("streamFromVolumeServers: starting streamFn, offset=%d, size=%d", offset, size) err = streamFn(w) streamExecTime = time.Since(tStreamExec) if err != nil { glog.Errorf("streamFromVolumeServers: streamFn failed: %v", err) } else { - glog.Infof("streamFromVolumeServers: streamFn completed successfully") + glog.V(4).Infof("streamFromVolumeServers: streamFn completed successfully") } return err } diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go index 106b3019b..6181c34d6 100644 --- a/weed/s3api/s3api_object_handlers_put.go +++ b/weed/s3api/s3api_object_handlers_put.go @@ -372,12 +372,14 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader // Step 3: Calculate MD5 hash and add SSE metadata to chunks md5Sum := chunkResult.Md5Hash.Sum(nil) - glog.Infof("putToFiler: Chunked upload SUCCESS - path=%s, chunks=%d, size=%d", + glog.V(4).Infof("putToFiler: Chunked upload SUCCESS - path=%s, chunks=%d, size=%d", filePath, len(chunkResult.FileChunks), chunkResult.TotalSize) - // Log chunk details for debugging - for i, chunk := range chunkResult.FileChunks { - glog.Infof(" PUT Chunk[%d]: fid=%s, offset=%d, size=%d", i, chunk.GetFileIdString(), chunk.Offset, chunk.Size) + // Log chunk details for debugging (verbose only - high frequency) + if glog.V(4) { + for i, chunk := range chunkResult.FileChunks { + glog.Infof(" PUT Chunk[%d]: fid=%s, offset=%d, size=%d", i, chunk.GetFileIdString(), chunk.Offset, chunk.Size) + } } // Add SSE metadata to all chunks if present @@ -460,7 +462,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader // For single chunk: uses entry.Attributes.Md5 // For multiple chunks: uses filer.ETagChunks() which returns "-" etag = filer.ETag(entry) - glog.Infof("putToFiler: Calculated ETag=%s for %d chunks", etag, len(chunkResult.FileChunks)) + glog.V(4).Infof("putToFiler: Calculated ETag=%s for %d chunks", etag, len(chunkResult.FileChunks)) // Set object owner amzAccountId := r.Header.Get(s3_constants.AmzAccountId)