Browse Source

less logs

pull/7481/head
chrislu 3 weeks ago
parent
commit
4afab0eca2
  1. 10
      weed/s3api/s3api_object_handlers.go
  2. 8
      weed/s3api/s3api_object_handlers_put.go

10
weed/s3api/s3api_object_handlers.go

@ -734,7 +734,7 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R
// Get chunks and validate BEFORE setting headers
chunks := entry.GetChunks()
glog.Infof("streamFromVolumeServers: entry has %d chunks, totalSize=%d, isRange=%v, offset=%d, size=%d",
glog.V(4).Infof("streamFromVolumeServers: entry has %d chunks, totalSize=%d, isRange=%v, offset=%d, size=%d",
len(chunks), totalSize, isRangeRequest, offset, size)
if len(chunks) == 0 {
@ -752,10 +752,12 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R
return nil
}
// Log chunk details
// Log chunk details (verbose only - high frequency)
if glog.V(4) {
for i, chunk := range chunks {
glog.Infof(" GET Chunk[%d]: fid=%s, offset=%d, size=%d", i, chunk.GetFileIdString(), chunk.Offset, chunk.Size)
}
}
// CRITICAL: Resolve chunks and prepare stream BEFORE WriteHeader
// This ensures we can write proper error responses if these operations fail
@ -816,13 +818,13 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R
// Stream directly to response
tStreamExec := time.Now()
glog.Infof("streamFromVolumeServers: starting streamFn, offset=%d, size=%d", offset, size)
glog.V(4).Infof("streamFromVolumeServers: starting streamFn, offset=%d, size=%d", offset, size)
err = streamFn(w)
streamExecTime = time.Since(tStreamExec)
if err != nil {
glog.Errorf("streamFromVolumeServers: streamFn failed: %v", err)
} else {
glog.Infof("streamFromVolumeServers: streamFn completed successfully")
glog.V(4).Infof("streamFromVolumeServers: streamFn completed successfully")
}
return err
}

8
weed/s3api/s3api_object_handlers_put.go

@ -372,13 +372,15 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
// Step 3: Calculate MD5 hash and add SSE metadata to chunks
md5Sum := chunkResult.Md5Hash.Sum(nil)
glog.Infof("putToFiler: Chunked upload SUCCESS - path=%s, chunks=%d, size=%d",
glog.V(4).Infof("putToFiler: Chunked upload SUCCESS - path=%s, chunks=%d, size=%d",
filePath, len(chunkResult.FileChunks), chunkResult.TotalSize)
// Log chunk details for debugging
// Log chunk details for debugging (verbose only - high frequency)
if glog.V(4) {
for i, chunk := range chunkResult.FileChunks {
glog.Infof(" PUT Chunk[%d]: fid=%s, offset=%d, size=%d", i, chunk.GetFileIdString(), chunk.Offset, chunk.Size)
}
}
// Add SSE metadata to all chunks if present
if customerKey != nil {
@ -460,7 +462,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
// For single chunk: uses entry.Attributes.Md5
// For multiple chunks: uses filer.ETagChunks() which returns "<hash>-<count>"
etag = filer.ETag(entry)
glog.Infof("putToFiler: Calculated ETag=%s for %d chunks", etag, len(chunkResult.FileChunks))
glog.V(4).Infof("putToFiler: Calculated ETag=%s for %d chunks", etag, len(chunkResult.FileChunks))
// Set object owner
amzAccountId := r.Header.Get(s3_constants.AmzAccountId)

Loading…
Cancel
Save