Browse Source

adjust logs

pull/7481/head
chrislu 1 month ago
parent
commit
1b546819aa
  1. 2
      .github/workflows/test-s3-over-https-using-awscli.yml
  2. 18
      weed/s3api/s3api_object_handlers.go
  3. 5
      weed/s3api/s3api_object_handlers_put.go

2
.github/workflows/test-s3-over-https-using-awscli.yml

@ -34,7 +34,7 @@ jobs:
run: | run: |
set -e set -e
mkdir -p /tmp/data mkdir -p /tmp/data
./weed server -s3 -dir=/tmp/data -s3.config=../docker/compose/s3.json &
./weed -v=3 server -s3 -dir=/tmp/data -s3.config=../docker/compose/s3.json &
until curl -s http://localhost:8333/ > /dev/null; do sleep 1; done until curl -s http://localhost:8333/ > /dev/null; do sleep 1; done
- name: Setup Caddy - name: Setup Caddy

18
weed/s3api/s3api_object_handlers.go

@ -677,6 +677,9 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R
// Get chunks // Get chunks
chunks := entry.GetChunks() chunks := entry.GetChunks()
glog.V(3).Infof("streamFromVolumeServers: entry has %d chunks, totalSize=%d, isRange=%v, offset=%d, size=%d",
len(chunks), totalSize, isRangeRequest, offset, size)
if len(chunks) == 0 { if len(chunks) == 0 {
// BUG FIX: If totalSize > 0 but no chunks and no content, this is a data integrity issue // BUG FIX: If totalSize > 0 but no chunks and no content, this is a data integrity issue
if totalSize > 0 && len(entry.Content) == 0 { if totalSize > 0 && len(entry.Content) == 0 {
@ -691,6 +694,11 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R
return nil return nil
} }
// Log chunk details
for i, chunk := range chunks {
glog.V(3).Infof(" Chunk[%d]: fid=%s, offset=%d, size=%d", i, chunk.GetFileIdString(), chunk.Offset, chunk.Size)
}
// Create lookup function via filer client (reuse shared helper) // Create lookup function via filer client (reuse shared helper)
ctx := r.Context() ctx := r.Context()
lookupFileIdFn := s3a.createLookupFileIdFunction() lookupFileIdFn := s3a.createLookupFileIdFunction()
@ -700,9 +708,8 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R
resolvedChunks, _, err := filer.ResolveChunkManifest(ctx, lookupFileIdFn, chunks, offset, offset+size) resolvedChunks, _, err := filer.ResolveChunkManifest(ctx, lookupFileIdFn, chunks, offset, offset+size)
chunkResolveTime = time.Since(tChunkResolve) chunkResolveTime = time.Since(tChunkResolve)
if err != nil { if err != nil {
if !isRangeRequest {
w.WriteHeader(http.StatusInternalServerError)
}
glog.Errorf("streamFromVolumeServers: failed to resolve chunks: %v", err)
// Don't try to write headers if we already wrote them for range request
return fmt.Errorf("failed to resolve chunks: %v", err) return fmt.Errorf("failed to resolve chunks: %v", err)
} }
@ -723,9 +730,8 @@ func (s3a *S3ApiServer) streamFromVolumeServers(w http.ResponseWriter, r *http.R
) )
streamPrepTime = time.Since(tStreamPrep) streamPrepTime = time.Since(tStreamPrep)
if err != nil { if err != nil {
if !isRangeRequest {
w.WriteHeader(http.StatusInternalServerError)
}
glog.Errorf("streamFromVolumeServers: failed to prepare stream: %v", err)
// Don't try to write headers if we already wrote them for range request
return fmt.Errorf("failed to prepare stream: %v", err) return fmt.Errorf("failed to prepare stream: %v", err)
} }

5
weed/s3api/s3api_object_handlers_put.go

@ -378,6 +378,11 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
glog.V(3).Infof("putToFiler: Chunked upload SUCCESS - path=%s, chunks=%d, size=%d, etag=%s", glog.V(3).Infof("putToFiler: Chunked upload SUCCESS - path=%s, chunks=%d, size=%d, etag=%s",
filePath, len(chunkResult.FileChunks), chunkResult.TotalSize, etag) filePath, len(chunkResult.FileChunks), chunkResult.TotalSize, etag)
// Log chunk details for debugging
for i, chunk := range chunkResult.FileChunks {
glog.V(3).Infof(" Chunk[%d]: fid=%s, offset=%d, size=%d", i, chunk.GetFileIdString(), chunk.Offset, chunk.Size)
}
// Add SSE metadata to all chunks if present // Add SSE metadata to all chunks if present
if customerKey != nil { if customerKey != nil {

Loading…
Cancel
Save