diff --git a/docker/README.md b/docker/README.md index 007e86d45..07ccbdefa 100644 --- a/docker/README.md +++ b/docker/README.md @@ -44,7 +44,7 @@ docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,l docker buildx stop $BUILDER ``` -## Minio debuging +## Minio debugging ``` mc config host add local http://127.0.0.1:9000 some_access_key1 some_secret_key1 mc admin trace --all --verbose local diff --git a/docker/compose/notification.toml b/docker/compose/notification.toml index dcd5f2c6f..d93d2ba87 100644 --- a/docker/compose/notification.toml +++ b/docker/compose/notification.toml @@ -1,5 +1,5 @@ [notification.log] -# this is only for debugging perpose and does not work with "weed filer.replicate" +# this is only for debugging purpose and does not work with "weed filer.replicate" enabled = false diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 2c3a89ada..b37254bb5 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -22,7 +22,7 @@ var cmdScaffold = &Command{ export WEED_MYSQL_PASSWORD=some_password Environment variable rules: * Prefix the variable name with "WEED_" - * Upppercase the reset of variable name. + * Uppercase the reset of variable name. * Replace '.' with '_' `, diff --git a/weed/command/scaffold/filer.toml b/weed/command/scaffold/filer.toml index 860d8b291..d8833a917 100644 --- a/weed/command/scaffold/filer.toml +++ b/weed/command/scaffold/filer.toml @@ -314,7 +314,7 @@ dialTimeOut = 10 # To add path-specific filer store: # # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp -# 2. Add a location configuraiton. E.g., location = "/tmp/" +# 2. Add a location configuration. E.g., location = "/tmp/" # 3. Copy and customize all other configurations. # Make sure they are not the same if using the same store type! # 4. Set enabled to true diff --git a/weed/command/scaffold/notification.toml b/weed/command/scaffold/notification.toml index f35101edd..4ddb3d4f6 100644 --- a/weed/command/scaffold/notification.toml +++ b/weed/command/scaffold/notification.toml @@ -10,7 +10,7 @@ # send and receive filer updates for each file to an external message queue #################################################### [notification.log] -# this is only for debugging perpose and does not work with "weed filer.replicate" +# this is only for debugging purpose and does not work with "weed filer.replicate" enabled = false diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go index f6279740c..c948ae713 100644 --- a/weed/filer/filer_deletion.go +++ b/weed/filer/filer_deletion.go @@ -94,10 +94,10 @@ func (f *Filer) doDeleteFileIds(fileIds []string) { } func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) { - var fildIdsToDelete []string + var fileIdsToDelete []string for _, chunk := range chunks { if !chunk.IsChunkManifest { - fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString()) + fileIdsToDelete = append(fileIdsToDelete, chunk.GetFileIdString()) continue } dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) @@ -105,12 +105,12 @@ func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) { glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) } for _, dChunk := range dataChunks { - fildIdsToDelete = append(fildIdsToDelete, dChunk.GetFileIdString()) + fileIdsToDelete = append(fileIdsToDelete, dChunk.GetFileIdString()) } - fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString()) + fileIdsToDelete = append(fileIdsToDelete, chunk.GetFileIdString()) } - f.doDeleteFileIds(fildIdsToDelete) + f.doDeleteFileIds(fileIdsToDelete) } func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { diff --git a/weed/mount/meta_cache/cache_config.go b/weed/mount/meta_cache/cache_config.go index b806df76c..5063bd400 100644 --- a/weed/mount/meta_cache/cache_config.go +++ b/weed/mount/meta_cache/cache_config.go @@ -6,7 +6,7 @@ var ( _ = util.Configuration(&cacheConfig{}) ) -// implementing util.Configuraion +// implementing util.Configuration type cacheConfig struct { dir string } diff --git a/weed/mount/meta_cache/meta_cache_subscribe.go b/weed/mount/meta_cache/meta_cache_subscribe.go index 22f02c8c7..241777371 100644 --- a/weed/mount/meta_cache/meta_cache_subscribe.go +++ b/weed/mount/meta_cache/meta_cache_subscribe.go @@ -46,7 +46,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil mc.invalidateFunc(newKey, message.NewEntry) } } else if filer_pb.IsCreate(resp) { - // no need to invaalidate + // no need to invalidate } else if filer_pb.IsDelete(resp) { oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name) mc.invalidateFunc(oldKey, message.OldEntry) diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 1193cc6f9..80b7ba0f3 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -117,7 +117,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } } } - + entryName := filepath.Base(*input.Key) dirName := filepath.Dir(*input.Key) if dirName == "." { @@ -147,6 +147,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } else if mime != "" { entry.Attributes.Mime = mime } + entry.Attributes.FileSize = uint64(offset) }) if err != nil { @@ -244,6 +245,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput KeyMarker: input.KeyMarker, MaxUploads: input.MaxUploads, Prefix: input.Prefix, + IsTruncated: aws.Bool(false), } entries, _, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, math.MaxInt32) diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 3849eac19..49058c5b6 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -94,13 +94,13 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti ctx, cancel := context.WithCancel(context.Background()) defer cancel() - grpcConection, err := pb.GrpcDial(ctx, masterAddress.ToGrpcAddress(), false, grpcDialOption) + grpcConnection, err := pb.GrpcDial(ctx, masterAddress.ToGrpcAddress(), false, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterAddress, err) } - defer grpcConection.Close() + defer grpcConnection.Close() - client := master_pb.NewSeaweedClient(grpcConection) + client := master_pb.NewSeaweedClient(grpcConnection) stream, err := client.SendHeartbeat(ctx) if err != nil { glog.V(0).Infof("SendHeartbeat to %s: %v", masterAddress, err) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index bbca859ac..1f6fc0568 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -205,7 +205,7 @@ func GetWritableRemoteReplications(s *storage.Store, grpcDialOption grpc.DialOpt // has one local and has remote replications copyCount := v.ReplicaPlacement.GetCopyCount() if len(lookupResult.Locations) < copyCount { - err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]", + err = fmt.Errorf("replicating operations [%d] is less than volume %d replication copy count [%d]", len(lookupResult.Locations), volumeId, copyCount) } } diff --git a/weed/topology/volume_layout_test.go b/weed/topology/volume_layout_test.go index ce2fa0d19..b5646fb13 100644 --- a/weed/topology/volume_layout_test.go +++ b/weed/topology/volume_layout_test.go @@ -54,7 +54,7 @@ func TestVolumesBinaryState(t *testing.T) { expectResultAfterUpdate []bool }{ { - name: "mark true when exist copies", + name: "mark true when copies exist", state: state_exist, expectResult: []bool{true, true, true, false, true}, update: func() { @@ -67,7 +67,7 @@ func TestVolumesBinaryState(t *testing.T) { expectResultAfterUpdate: []bool{true, false, true, false, false}, }, { - name: "mark true when inexist copies", + name: "mark true when no copies exist", state: state_no, expectResult: []bool{false, true, true, false, true}, update: func() { @@ -92,7 +92,7 @@ func TestVolumesBinaryState(t *testing.T) { } for index, val := range result { if val != test.expectResult[index] { - t.Fatalf("result not matched, index %d, got %v, expect %v\n", + t.Fatalf("result not matched, index %d, got %v, expected %v\n", index, val, test.expectResult[index]) } } @@ -107,7 +107,7 @@ func TestVolumesBinaryState(t *testing.T) { } for index, val := range updateResult { if val != test.expectResultAfterUpdate[index] { - t.Fatalf("update result not matched, index %d, got %v, expect %v\n", + t.Fatalf("update result not matched, index %d, got %v, expected %v\n", index, val, test.expectResultAfterUpdate[index]) } } diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go index 9252f99eb..3fc9cf0b4 100644 --- a/weed/util/chunk_cache/chunk_cache_on_disk.go +++ b/weed/util/chunk_cache/chunk_cache_on_disk.go @@ -128,7 +128,7 @@ func (v *ChunkCacheVolume) getNeedleSlice(key types.NeedleId, offset, length uin } wanted := min(int(length), int(nv.Size)-int(offset)) if wanted < 0 { - // should never happen, but better than panicing + // should never happen, but better than panicking return nil, ErrorOutOfBounds } data := make([]byte, wanted) @@ -151,7 +151,7 @@ func (v *ChunkCacheVolume) readNeedleSliceAt(data []byte, key types.NeedleId, of } wanted := min(len(data), int(nv.Size)-int(offset)) if wanted < 0 { - // should never happen, but better than panicing + // should never happen, but better than panicking return 0, ErrorOutOfBounds } if n, err = v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()+int64(offset)); err != nil { diff --git a/weed/wdclient/resource_pool/semaphore.go b/weed/wdclient/resource_pool/semaphore.go index ff35d5bc5..9bd6afc33 100644 --- a/weed/wdclient/resource_pool/semaphore.go +++ b/weed/wdclient/resource_pool/semaphore.go @@ -105,7 +105,7 @@ func (s *unboundedSemaphore) Release() { s.lock.Lock() s.counter += 1 if s.counter > 0 { - // Not broadcasting here since it's unlike we can satify all waiting + // Not broadcasting here since it's unlike we can satisfy all waiting // goroutines. Instead, we will Signal again if there are left over // quota after Acquire, in case of lost wakeups. s.cond.Signal()