From a23bcbb7ecf93fcda35976f4f2fb42a67830e718 Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Thu, 14 Oct 2021 12:27:58 +0800 Subject: [PATCH] refactor: move from io/ioutil to io and os package The io/ioutil package has been deprecated as of Go 1.16, see https://golang.org/doc/go1.16#ioutil. This commit replaces the existing io/ioutil functions with their new definitions in io and os packages. Signed-off-by: Eng Zer Jun --- test/s3/basic/basic_test.go | 15 ++++----- .../bench_filer_upload/bench_filer_upload.go | 5 ++- .../stress_filer_upload.go | 5 ++- weed/command/download.go | 12 +++---- weed/command/filer_copy.go | 10 +++--- weed/command/scaffold.go | 7 +++-- weed/filer/leveldb/leveldb_store_test.go | 7 ++--- weed/filer/leveldb2/leveldb2_store_test.go | 5 ++- weed/filer/leveldb3/leveldb3_store_test.go | 5 ++- weed/filer/rocksdb/rocksdb_store_test.go | 8 ++--- weed/images/orientation_test.go | 5 ++- weed/images/resizing_test.go | 5 ++- weed/operation/chunked_file.go | 5 ++- weed/operation/upload_content.go | 5 ++- .../azure/azure_storage_client.go | 12 +++---- weed/remote_storage/gcs/gcs_storage_client.go | 12 +++---- weed/replication/sub/notification_kafka.go | 6 ++-- weed/s3api/auth_credentials.go | 9 +++--- weed/s3api/auth_signature_v4.go | 13 ++++---- weed/s3api/auto_signature_v4_test.go | 8 ++--- weed/s3api/s3api_object_handlers.go | 10 +++--- .../s3api/s3api_object_handlers_postpolicy.go | 16 +++++----- weed/s3api/s3api_object_tagging_handlers.go | 8 ++--- weed/security/tls.go | 16 +++++----- .../filer_server_handlers_write_upload.go | 5 ++- weed/server/volume_grpc_copy.go | 7 ++--- weed/server/volume_grpc_erasure_coding.go | 9 +++--- weed/shell/command_volume_fsck.go | 9 +++--- weed/storage/disk_location.go | 31 +++++++++---------- weed/storage/disk_location_ec.go | 13 ++++---- weed/storage/needle/needle_parse_upload.go | 5 ++- weed/storage/needle/needle_read_write_test.go | 3 +- weed/storage/needle_map_metric_test.go | 4 +-- weed/storage/volume_info/volume_info.go | 11 +++---- weed/storage/volume_vacuum_test.go | 5 ++- weed/storage/volume_write_test.go | 10 +++--- .../chunk_cache/chunk_cache_on_disk_test.go | 3 +- weed/util/http_util.go | 15 +++++---- 38 files changed, 160 insertions(+), 179 deletions(-) diff --git a/test/s3/basic/basic_test.go b/test/s3/basic/basic_test.go index 653fa1237..b2a6ae14b 100644 --- a/test/s3/basic/basic_test.go +++ b/test/s3/basic/basic_test.go @@ -2,14 +2,15 @@ package basic import ( "fmt" + "io" + "os" + "strings" + "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "io/ioutil" - "os" - "strings" - "testing" ) var ( @@ -108,8 +109,8 @@ func TestListBucket(t *testing.T) { func TestListObjectV2(t *testing.T) { listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{ - Bucket: aws.String(Bucket), - Prefix: aws.String("foo"), + Bucket: aws.String(Bucket), + Prefix: aws.String("foo"), Delimiter: aws.String("/"), }) if err != nil { @@ -169,7 +170,7 @@ func TestObjectOp(t *testing.T) { exitErrorf("Unable to get copy object, %v", err) } - data, err := ioutil.ReadAll(getObj.Body) + data, err := io.ReadAll(getObj.Body) if err != nil { exitErrorf("Unable to read object data, %v", err) } diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go index 2ee8028f2..a41bf1da1 100644 --- a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go +++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go @@ -5,7 +5,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" "math/rand" "mime/multipart" @@ -45,7 +44,7 @@ func main() { defer wg.Done() client := &http.Client{Transport: &http.Transport{ - MaxIdleConns: 1024, + MaxIdleConns: 1024, MaxIdleConnsPerHost: 1024, }} r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x))) @@ -128,7 +127,7 @@ func uploadFileToFiler(client *http.Client, data []byte, filename, destination s if err != nil { return 0, fmt.Errorf("read http POST %s response: %v", uri, err) } - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } diff --git a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go index 8b986b546..83df54dc3 100644 --- a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go +++ b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go @@ -5,7 +5,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" "math/rand" "mime/multipart" @@ -36,7 +35,7 @@ func main() { var fileNames []string - files, err := ioutil.ReadDir(*dir) + files, err := os.ReadDir(*dir) if err != nil { log.Fatalf("fail to read dir %v: %v", *dir, err) } @@ -142,7 +141,7 @@ func uploadFileToFiler(client *http.Client, filename, destination string) (size if err != nil { return 0, fmt.Errorf("read http POST %s response: %v", uri, err) } - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } diff --git a/weed/command/download.go b/weed/command/download.go index 1d8a72d31..a3c05b53d 100644 --- a/weed/command/download.go +++ b/weed/command/download.go @@ -2,17 +2,17 @@ package command import ( "fmt" - "github.com/chrislusf/seaweedfs/weed/pb" - "github.com/chrislusf/seaweedfs/weed/security" - "google.golang.org/grpc" "io" - "io/ioutil" "net/http" "os" "path" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -82,7 +82,7 @@ func downloadToFile(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpti } defer f.Close() if isFileList { - content, err := ioutil.ReadAll(rc.Body) + content, err := io.ReadAll(rc.Body) if err != nil { return err } @@ -119,7 +119,7 @@ func fetchContent(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption return "", nil, e } defer util.CloseResponse(rc) - content, e = ioutil.ReadAll(rc.Body) + content, e = io.ReadAll(rc.Body) return } diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 2f3b69da6..8a8701828 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -3,9 +3,7 @@ package command import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer" "io" - "io/ioutil" "net/http" "os" "path/filepath" @@ -16,14 +14,14 @@ import ( "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/util/grace" - + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/grace" "github.com/chrislusf/seaweedfs/weed/wdclient" ) @@ -212,7 +210,7 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi } if mode.IsDir() { - files, _ := ioutil.ReadDir(fileOrDir) + files, _ := os.ReadDir(fileOrDir) for _, subFileOrDir := range files { cleanedDestDirectory := filepath.Clean(destPath + fi.Name()) if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), cleanedDestDirectory+"/", fileCopyTaskChan); err != nil { @@ -339,7 +337,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 { mimeType = detectMimeType(f) - data, err := ioutil.ReadAll(f) + data, err := io.ReadAll(f) if err != nil { return err } diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 886c0ac5e..6fcbd7efb 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -2,9 +2,10 @@ package command import ( "fmt" - "github.com/chrislusf/seaweedfs/weed/command/scaffold" - "io/ioutil" + "os" "path/filepath" + + "github.com/chrislusf/seaweedfs/weed/command/scaffold" ) func init() { @@ -55,7 +56,7 @@ func runScaffold(cmd *Command, args []string) bool { } if *outputPath != "" { - ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644) + os.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644) } else { fmt.Println(content) } diff --git a/weed/filer/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go index 7149d84d2..2476e063c 100644 --- a/weed/filer/leveldb/leveldb_store_test.go +++ b/weed/filer/leveldb/leveldb_store_test.go @@ -3,7 +3,6 @@ package leveldb import ( "context" "fmt" - "io/ioutil" "os" "testing" "time" @@ -14,7 +13,7 @@ import ( func TestCreateAndFind(t *testing.T) { testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDBStore{} store.initialize(dir) @@ -68,7 +67,7 @@ func TestCreateAndFind(t *testing.T) { func TestEmptyRoot(t *testing.T) { testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDBStore{} store.initialize(dir) @@ -91,7 +90,7 @@ func TestEmptyRoot(t *testing.T) { func BenchmarkInsertEntry(b *testing.B) { testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_bench") defer os.RemoveAll(dir) store := &LevelDBStore{} store.initialize(dir) diff --git a/weed/filer/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go index 9564feaab..93c622fd9 100644 --- a/weed/filer/leveldb2/leveldb2_store_test.go +++ b/weed/filer/leveldb2/leveldb2_store_test.go @@ -2,7 +2,6 @@ package leveldb import ( "context" - "io/ioutil" "os" "testing" @@ -12,7 +11,7 @@ import ( func TestCreateAndFind(t *testing.T) { testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDB2Store{} store.initialize(dir, 2) @@ -66,7 +65,7 @@ func TestCreateAndFind(t *testing.T) { func TestEmptyRoot(t *testing.T) { testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDB2Store{} store.initialize(dir, 2) diff --git a/weed/filer/leveldb3/leveldb3_store_test.go b/weed/filer/leveldb3/leveldb3_store_test.go index 6e7acf51c..a5e97cf10 100644 --- a/weed/filer/leveldb3/leveldb3_store_test.go +++ b/weed/filer/leveldb3/leveldb3_store_test.go @@ -2,7 +2,6 @@ package leveldb import ( "context" - "io/ioutil" "os" "testing" @@ -12,7 +11,7 @@ import ( func TestCreateAndFind(t *testing.T) { testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDB3Store{} store.initialize(dir) @@ -66,7 +65,7 @@ func TestCreateAndFind(t *testing.T) { func TestEmptyRoot(t *testing.T) { testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDB3Store{} store.initialize(dir) diff --git a/weed/filer/rocksdb/rocksdb_store_test.go b/weed/filer/rocksdb/rocksdb_store_test.go index f6e755b4b..fbf8b3112 100644 --- a/weed/filer/rocksdb/rocksdb_store_test.go +++ b/weed/filer/rocksdb/rocksdb_store_test.go @@ -1,3 +1,4 @@ +//go:build rocksdb // +build rocksdb package rocksdb @@ -5,7 +6,6 @@ package rocksdb import ( "context" "fmt" - "io/ioutil" "os" "testing" "time" @@ -16,7 +16,7 @@ import ( func TestCreateAndFind(t *testing.T) { testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &RocksDBStore{} store.initialize(dir) @@ -70,7 +70,7 @@ func TestCreateAndFind(t *testing.T) { func TestEmptyRoot(t *testing.T) { testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &RocksDBStore{} store.initialize(dir) @@ -93,7 +93,7 @@ func TestEmptyRoot(t *testing.T) { func BenchmarkInsertEntry(b *testing.B) { testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) - dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") + dir, _ := os.MkdirTemp("", "seaweedfs_filer_bench") defer os.RemoveAll(dir) store := &RocksDBStore{} store.initialize(dir) diff --git a/weed/images/orientation_test.go b/weed/images/orientation_test.go index 32fa38f76..e9743bc0c 100644 --- a/weed/images/orientation_test.go +++ b/weed/images/orientation_test.go @@ -1,7 +1,6 @@ package images import ( - "io/ioutil" "os" "testing" ) @@ -9,11 +8,11 @@ import ( func TestXYZ(t *testing.T) { fname := "sample1.jpg" - dat, _ := ioutil.ReadFile(fname) + dat, _ := os.ReadFile(fname) fixed_data := FixJpgOrientation(dat) - ioutil.WriteFile("fixed1.jpg", fixed_data, 0644) + os.WriteFile("fixed1.jpg", fixed_data, 0644) os.Remove("fixed1.jpg") diff --git a/weed/images/resizing_test.go b/weed/images/resizing_test.go index d12f799d8..33b904445 100644 --- a/weed/images/resizing_test.go +++ b/weed/images/resizing_test.go @@ -2,7 +2,6 @@ package images import ( "bytes" - "io/ioutil" "os" "testing" ) @@ -10,13 +9,13 @@ import ( func TestResizing(t *testing.T) { fname := "sample2.webp" - dat, _ := ioutil.ReadFile(fname) + dat, _ := os.ReadFile(fname) resized, _, _ := Resized(".webp", bytes.NewReader(dat), 100, 30, "") buf := new(bytes.Buffer) buf.ReadFrom(resized) - ioutil.WriteFile("resized1.png", buf.Bytes(), 0644) + os.WriteFile("resized1.png", buf.Bytes(), 0644) os.Remove("resized1.png") diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 0227db1bf..45068bbcc 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -4,9 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "github.com/chrislusf/seaweedfs/weed/pb" "io" - "io/ioutil" "net/http" "sort" "sync" @@ -14,6 +12,7 @@ import ( "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -108,7 +107,7 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (wri return written, err } defer func() { - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() }() diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 33f6fba10..ade324005 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -91,7 +90,7 @@ func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResul if ok { data = bytesReader.Bytes } else { - data, err = ioutil.ReadAll(reader) + data, err = io.ReadAll(reader) if err != nil { err = fmt.Errorf("read input: %v", err) return @@ -278,7 +277,7 @@ func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize return &ret, nil } - resp_body, ra_err := ioutil.ReadAll(resp.Body) + resp_body, ra_err := io.ReadAll(resp.Body) if ra_err != nil { return nil, fmt.Errorf("read response body %v: %v", option.UploadUrl, ra_err) } diff --git a/weed/remote_storage/azure/azure_storage_client.go b/weed/remote_storage/azure/azure_storage_client.go index 2fab3adb7..1a259a3e2 100644 --- a/weed/remote_storage/azure/azure_storage_client.go +++ b/weed/remote_storage/azure/azure_storage_client.go @@ -3,17 +3,17 @@ package azure import ( "context" "fmt" + "io" + "net/url" + "os" + "reflect" + "github.com/Azure/azure-storage-blob-go/azblob" "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" "github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/util" - "io" - "io/ioutil" - "net/url" - "os" - "reflect" ) func init() { @@ -115,7 +115,7 @@ func (az *azureRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocatio bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}) defer bodyStream.Close() - data, err = ioutil.ReadAll(bodyStream) + data, err = io.ReadAll(bodyStream) if err != nil { return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err) diff --git a/weed/remote_storage/gcs/gcs_storage_client.go b/weed/remote_storage/gcs/gcs_storage_client.go index fce7ba945..788d4b1e0 100644 --- a/weed/remote_storage/gcs/gcs_storage_client.go +++ b/weed/remote_storage/gcs/gcs_storage_client.go @@ -1,9 +1,13 @@ package gcs import ( - "cloud.google.com/go/storage" "context" "fmt" + "io" + "os" + "reflect" + + "cloud.google.com/go/storage" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" @@ -11,10 +15,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/api/iterator" "google.golang.org/api/option" - "io" - "io/ioutil" - "os" - "reflect" ) func init() { @@ -110,7 +110,7 @@ func (gcs *gcsRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation if readErr != nil { return nil, readErr } - data, err = ioutil.ReadAll(rangeReader) + data, err = io.ReadAll(rangeReader) if err != nil { return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err) diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 622a759ea..41a4caaf3 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -3,7 +3,7 @@ package sub import ( "encoding/json" "fmt" - "io/ioutil" + "os" "sync" "time" @@ -119,7 +119,7 @@ type KafkaProgress struct { func loadProgress(offsetFile string) *KafkaProgress { progress := &KafkaProgress{} - data, err := ioutil.ReadFile(offsetFile) + data, err := os.ReadFile(offsetFile) if err != nil { glog.Warningf("failed to read kafka progress file: %s", offsetFile) return nil @@ -137,7 +137,7 @@ func (progress *KafkaProgress) saveProgress() error { if err != nil { return fmt.Errorf("failed to marshal progress: %v", err) } - err = ioutil.WriteFile(progress.offsetFile, data, 0640) + err = os.WriteFile(progress.offsetFile, data, 0640) if err != nil { return fmt.Errorf("failed to save progress to %s: %v", progress.offsetFile, err) } diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index cd1b3adfb..998a74625 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -2,6 +2,10 @@ package s3api import ( "fmt" + "net/http" + "os" + "strings" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb" @@ -10,9 +14,6 @@ import ( xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" - "io/ioutil" - "net/http" - "strings" ) type Action string @@ -91,7 +92,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3A } func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error { - content, readErr := ioutil.ReadFile(fileName) + content, readErr := os.ReadFile(fileName) if readErr != nil { glog.Warningf("fail to read %s : %v", fileName, readErr) return fmt.Errorf("fail to read %s : %v", fileName, readErr) diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go index 0df26e6fc..a49caad06 100644 --- a/weed/s3api/auth_signature_v4.go +++ b/weed/s3api/auth_signature_v4.go @@ -23,8 +23,7 @@ import ( "crypto/sha256" "crypto/subtle" "encoding/hex" - "github.com/chrislusf/seaweedfs/weed/s3api/s3err" - "io/ioutil" + "io" "net/http" "net/url" "regexp" @@ -33,6 +32,8 @@ import ( "strings" "time" "unicode/utf8" + + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) { @@ -135,9 +136,9 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r // Get hashed Payload if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil { - buf, _ := ioutil.ReadAll(r.Body) - r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) - b, _ := ioutil.ReadAll(bytes.NewBuffer(buf)) + buf, _ := io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewBuffer(buf)) + b, _ := io.ReadAll(bytes.NewBuffer(buf)) if len(b) != 0 { bodyHash := sha256.Sum256(b) hashedPayload = hex.EncodeToString(bodyHash[:]) @@ -433,7 +434,7 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s } } - /// Verify finally if signature is same. + // / Verify finally if signature is same. // Get canonical request. presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go index b47cd5f2d..a58551187 100644 --- a/weed/s3api/auto_signature_v4_test.go +++ b/weed/s3api/auto_signature_v4_test.go @@ -8,9 +8,7 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "io" - "io/ioutil" "net/http" "net/url" "sort" @@ -19,6 +17,8 @@ import ( "testing" "time" "unicode/utf8" + + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) // TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. @@ -86,7 +86,7 @@ func TestIsReqAuthenticated(t *testing.T) { // Validates all testcases. for i, testCase := range testCases { if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error { - ioutil.ReadAll(testCase.req.Body) + io.ReadAll(testCase.req.Body) t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error) } } @@ -167,7 +167,7 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek case body == nil: hashedPayload = getSHA256Hash([]byte{}) default: - payloadBytes, err := ioutil.ReadAll(body) + payloadBytes, err := io.ReadAll(body) if err != nil { return nil, err } diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 54b6da61c..0b7e8043f 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -5,16 +5,16 @@ import ( "encoding/json" "encoding/xml" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/pquerna/cachecontrol/cacheobject" "io" - "io/ioutil" "net/http" "net/url" "sort" "strings" "time" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/pquerna/cachecontrol/cacheobject" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/gorilla/mux" @@ -198,7 +198,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h bucket, _ := getBucketAndObject(r) glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket) - deleteXMLBytes, err := ioutil.ReadAll(r.Body) + deleteXMLBytes, err := io.ReadAll(r.Body) if err != nil { s3err.WriteErrorResponse(w, s3err.ErrInternalError, r) return @@ -394,7 +394,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader etag = fmt.Sprintf("%x", hash.Sum(nil)) - resp_body, ra_err := ioutil.ReadAll(resp.Body) + resp_body, ra_err := io.ReadAll(resp.Body) if ra_err != nil { glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) return etag, s3err.ErrInternalError diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go index c0e2589ae..cccbd2442 100644 --- a/weed/s3api/s3api_object_handlers_postpolicy.go +++ b/weed/s3api/s3api_object_handlers_postpolicy.go @@ -5,17 +5,17 @@ import ( "encoding/base64" "errors" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/s3api/policy" - "github.com/chrislusf/seaweedfs/weed/s3api/s3err" - "github.com/dustin/go-humanize" - "github.com/gorilla/mux" "io" - "io/ioutil" "mime/multipart" "net/http" "net/url" "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/policy" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/dustin/go-humanize" + "github.com/gorilla/mux" ) func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) { @@ -152,7 +152,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R // Extract form fields and file data from a HTTP POST Policy func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { - /// HTML Form values + // / HTML Form values fileName = "" // Canonicalize the form values into http.Header. @@ -175,7 +175,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, b.WriteString(v) } fileSize = int64(b.Len()) - filePart = ioutil.NopCloser(b) + filePart = io.NopCloser(b) return filePart, fileName, fileSize, formValues, nil } diff --git a/weed/s3api/s3api_object_tagging_handlers.go b/weed/s3api/s3api_object_tagging_handlers.go index 2ee339e29..4daee5485 100644 --- a/weed/s3api/s3api_object_tagging_handlers.go +++ b/weed/s3api/s3api_object_tagging_handlers.go @@ -3,13 +3,13 @@ package s3api import ( "encoding/xml" "fmt" + "io" + "net/http" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/util" - "io" - "io/ioutil" - "net/http" ) // GetObjectTaggingHandler - GET object tagging @@ -49,7 +49,7 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R dir, name := target.DirAndName() tagging := &Tagging{} - input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) + input, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength)) if err != nil { glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err) s3err.WriteErrorResponse(w, s3err.ErrInternalError, r) diff --git a/weed/security/tls.go b/weed/security/tls.go index 7d3ffcdca..2f01af1e7 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -4,18 +4,18 @@ import ( "context" "crypto/tls" "crypto/x509" - "github.com/chrislusf/seaweedfs/weed/util" - grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" - "io/ioutil" + "os" "strings" + grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" ) type Authenticator struct { @@ -37,7 +37,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption err) return nil, nil } - caCert, err := ioutil.ReadFile(config.GetString("grpc.ca")) + caCert, err := os.ReadFile(config.GetString("grpc.ca")) if err != nil { glog.V(1).Infof("read ca cert file %s error: %v", config.GetString("grpc.ca"), err) return nil, nil @@ -82,7 +82,7 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { glog.V(1).Infof("load cert/key error: %v", err) return grpc.WithInsecure() } - caCert, err := ioutil.ReadFile(caFileName) + caCert, err := os.ReadFile(caFileName) if err != nil { glog.V(1).Infof("read ca cert file error: %v", err) return grpc.WithInsecure() diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go index 196d7638e..a7716ef02 100644 --- a/weed/server/filer_server_handlers_write_upload.go +++ b/weed/server/filer_server_handlers_write_upload.go @@ -5,7 +5,6 @@ import ( "crypto/md5" "hash" "io" - "io/ioutil" "net/http" "sort" "strings" @@ -31,7 +30,7 @@ var bufPool = sync.Pool{ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { md5Hash = md5.New() - var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) + var partReader = io.NopCloser(io.TeeReader(reader, md5Hash)) var wg sync.WaitGroup var bytesBufferCounter int64 @@ -57,7 +56,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque dataSize, err := bytesBuffer.ReadFrom(limitedReader) - // data, err := ioutil.ReadAll(limitedReader) + // data, err := io.ReadAll(limitedReader) if err != nil || dataSize == 0 { bufPool.Put(bytesBuffer) atomic.AddInt64(&bytesBufferCounter, -1) diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index e046481fb..10a4ec473 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -3,20 +3,19 @@ package weed_server import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/pb" - "github.com/chrislusf/seaweedfs/weed/storage/types" "io" - "io/ioutil" "math" "os" "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -68,7 +67,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo dataBaseFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId)) indexBaseFileName = storage.VolumeFileName(location.IdxDirectory, volFileInfoResp.Collection, int(req.VolumeId)) - ioutil.WriteFile(dataBaseFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755) + os.WriteFile(dataBaseFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755) defer func() { if err != nil { diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 653883c8e..72c65c04c 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -3,10 +3,7 @@ package weed_server import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/pb" - "github.com/chrislusf/seaweedfs/weed/storage/volume_info" "io" - "io/ioutil" "math" "os" "path" @@ -14,11 +11,13 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/storage/volume_info" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -200,12 +199,12 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se existingShardCount := 0 for _, location := range vs.store.Locations { - fileInfos, err := ioutil.ReadDir(location.Directory) + fileInfos, err := os.ReadDir(location.Directory) if err != nil { continue } if location.IdxDirectory != location.Directory { - idxFileInfos, err := ioutil.ReadDir(location.IdxDirectory) + idxFileInfos, err := os.ReadDir(location.IdxDirectory) if err != nil { continue } diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go index ce5ea66bf..119973a5b 100644 --- a/weed/shell/command_volume_fsck.go +++ b/weed/shell/command_volume_fsck.go @@ -5,10 +5,7 @@ import ( "context" "flag" "fmt" - "github.com/chrislusf/seaweedfs/weed/pb" - "github.com/chrislusf/seaweedfs/weed/storage/needle" "io" - "io/ioutil" "math" "os" "path/filepath" @@ -16,9 +13,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" @@ -74,7 +73,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io. c.env = commandEnv // create a temp folder - tempFolder, err := ioutil.TempDir("", "sw_fsck") + tempFolder, err := os.MkdirTemp("", "sw_fsck") if err != nil { return fmt.Errorf("failed to create temp folder: %v", err) } @@ -402,7 +401,7 @@ func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder stri return } - filerFileIdsData, err := ioutil.ReadFile(getFilerFileIdFile(tempFolder, volumeId)) + filerFileIdsData, err := os.ReadFile(getFilerFileIdFile(tempFolder, volumeId)) if err != nil { return } diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index c6fceb2c2..a32a0093d 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -2,8 +2,6 @@ package storage import ( "fmt" - "github.com/chrislusf/seaweedfs/weed/storage/types" - "io/ioutil" "os" "path/filepath" "strings" @@ -14,6 +12,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -85,9 +84,9 @@ func getValidVolumeName(basename string) string { return "" } -func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapKind) bool { - basename := fileInfo.Name() - if fileInfo.IsDir() { +func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind) bool { + basename := dirEntry.Name() + if dirEntry.IsDir() { return false } volumeName := getValidVolumeName(basename) @@ -103,7 +102,7 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne // check for incomplete volume noteFile := l.Directory + "/" + volumeName + ".note" if util.FileExists(noteFile) { - note, _ := ioutil.ReadFile(noteFile) + note, _ := os.ReadFile(noteFile) glog.Warningf("volume %s was not completed: %s", volumeName, string(note)) removeVolumeFiles(l.Directory + "/" + volumeName) removeVolumeFiles(l.IdxDirectory + "/" + volumeName) @@ -143,18 +142,18 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int) { - task_queue := make(chan os.FileInfo, 10*concurrency) + task_queue := make(chan os.DirEntry, 10*concurrency) go func() { foundVolumeNames := make(map[string]bool) - if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { - for _, fi := range fileInfos { - volumeName := getValidVolumeName(fi.Name()) + if dirEntries, err := os.ReadDir(l.Directory); err == nil { + for _, entry := range dirEntries { + volumeName := getValidVolumeName(entry.Name()) if volumeName == "" { continue } if _, found := foundVolumeNames[volumeName]; !found { foundVolumeNames[volumeName] = true - task_queue <- fi + task_queue <- entry } } } @@ -332,12 +331,12 @@ func (l *DiskLocation) Close() { return } -func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) { - if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { - for _, fileInfo := range fileInfos { - volId, _, err := volumeIdFromFileName(fileInfo.Name()) +func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) { + if dirEntries, err := os.ReadDir(l.Directory); err == nil { + for _, entry := range dirEntries { + volId, _, err := volumeIdFromFileName(entry.Name()) if vid == volId && err == nil { - return fileInfo, true + return entry, true } } } diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go index 91c7d86a6..3f56d797b 100644 --- a/weed/storage/disk_location_ec.go +++ b/weed/storage/disk_location_ec.go @@ -2,7 +2,6 @@ package storage import ( "fmt" - "io/ioutil" "os" "path" "regexp" @@ -118,25 +117,25 @@ func (l *DiskLocation) loadEcShards(shards []string, collection string, vid need func (l *DiskLocation) loadAllEcShards() (err error) { - fileInfos, err := ioutil.ReadDir(l.Directory) + dirEntries, err := os.ReadDir(l.Directory) if err != nil { return fmt.Errorf("load all ec shards in dir %s: %v", l.Directory, err) } if l.IdxDirectory != l.Directory { - indexFileInfos, err := ioutil.ReadDir(l.IdxDirectory) + indexDirEntries, err := os.ReadDir(l.IdxDirectory) if err != nil { return fmt.Errorf("load all ec shards in dir %s: %v", l.IdxDirectory, err) } - fileInfos = append(fileInfos, indexFileInfos...) + dirEntries = append(dirEntries, indexDirEntries...) } - sort.Slice(fileInfos, func(i, j int) bool { - return fileInfos[i].Name() < fileInfos[j].Name() + sort.Slice(dirEntries, func(i, j int) bool { + return dirEntries[i].Name() < dirEntries[j].Name() }) var sameVolumeShards []string var prevVolumeId needle.VolumeId - for _, fileInfo := range fileInfos { + for _, fileInfo := range dirEntries { if fileInfo.IsDir() { continue } diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go index bda58fbc3..a1f517b45 100644 --- a/weed/storage/needle/needle_parse_upload.go +++ b/weed/storage/needle/needle_parse_upload.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "mime" "net/http" "path" @@ -108,7 +107,7 @@ func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) error { pu.FileName = "" dataSize, err := pu.bytesBuffer.ReadFrom(io.LimitReader(r.Body, sizeLimit+1)) if err == io.EOF || dataSize == sizeLimit+1 { - io.Copy(ioutil.Discard, r.Body) + io.Copy(io.Discard, r.Body) } pu.Data = pu.bytesBuffer.Bytes() r.Body.Close() @@ -118,7 +117,7 @@ func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) error { func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { defer func() { if e != nil && r.Body != nil { - io.Copy(ioutil.Discard, r.Body) + io.Copy(io.Discard, r.Body) r.Body.Close() } }() diff --git a/weed/storage/needle/needle_read_write_test.go b/weed/storage/needle/needle_read_write_test.go index afcea5a05..20dbc2554 100644 --- a/weed/storage/needle/needle_read_write_test.go +++ b/weed/storage/needle/needle_read_write_test.go @@ -1,7 +1,6 @@ package needle import ( - "io/ioutil" "os" "testing" @@ -31,7 +30,7 @@ func TestAppend(t *testing.T) { Padding: nil, // Padding []byte `comment:"Aligned to 8 bytes"` } - tempFile, err := ioutil.TempFile("", ".dat") + tempFile, err := os.CreateTemp("", ".dat") if err != nil { t.Errorf("Fail TempFile. %v", err) return diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go index 362659a11..c04fd6c8b 100644 --- a/weed/storage/needle_map_metric_test.go +++ b/weed/storage/needle_map_metric_test.go @@ -1,8 +1,8 @@ package storage import ( - "io/ioutil" "math/rand" + "os" "testing" "github.com/chrislusf/seaweedfs/weed/glog" @@ -11,7 +11,7 @@ import ( func TestFastLoadingNeedleMapMetrics(t *testing.T) { - idxFile, _ := ioutil.TempFile("", "tmp.idx") + idxFile, _ := os.CreateTemp("", "tmp.idx") nm := NewCompactNeedleMap(idxFile) for i := 0; i < 10000; i++ { diff --git a/weed/storage/volume_info/volume_info.go b/weed/storage/volume_info/volume_info.go index b7ef75171..fa906e1d4 100644 --- a/weed/storage/volume_info/volume_info.go +++ b/weed/storage/volume_info/volume_info.go @@ -3,15 +3,14 @@ package volume_info import ( "bytes" "fmt" - "io/ioutil" - - _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" - "github.com/chrislusf/seaweedfs/weed/util" + "os" "github.com/golang/protobuf/jsonpb" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" + "github.com/chrislusf/seaweedfs/weed/util" ) // MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil @@ -36,7 +35,7 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn hasVolumeInfoFile = true glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) - tierData, readErr := ioutil.ReadFile(fileName) + tierData, readErr := os.ReadFile(fileName) if readErr != nil { glog.Warningf("fail to read %s : %v", fileName, readErr) err = fmt.Errorf("fail to read %s : %v", fileName, readErr) @@ -76,7 +75,7 @@ func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) er return fmt.Errorf("marshal to %s: %v", fileName, marshalErr) } - writeErr := ioutil.WriteFile(fileName, []byte(text), 0755) + writeErr := os.WriteFile(fileName, []byte(text), 0755) if writeErr != nil { return fmt.Errorf("fail to write %s : %v", fileName, writeErr) } diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index c9596d11d..64f4b3b60 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -1,7 +1,6 @@ package storage import ( - "io/ioutil" "math/rand" "os" "testing" @@ -45,7 +44,7 @@ preparing test prerequisite easier ) func TestMakeDiff(t *testing.T) { v := new(Volume) - //lastCompactIndexOffset value is the index file size before step 4 + // lastCompactIndexOffset value is the index file size before step 4 v.lastCompactIndexOffset = 96 v.SuperBlock.Version = 0x2 /* @@ -63,7 +62,7 @@ func TestMakeDiff(t *testing.T) { } func TestCompaction(t *testing.T) { - dir, err := ioutil.TempDir("", "example") + dir, err := os.MkdirTemp("", "example") if err != nil { t.Fatalf("temp dir creation: %v", err) } diff --git a/weed/storage/volume_write_test.go b/weed/storage/volume_write_test.go index 309f29657..9f661a27f 100644 --- a/weed/storage/volume_write_test.go +++ b/weed/storage/volume_write_test.go @@ -2,17 +2,17 @@ package storage import ( "fmt" - "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/chrislusf/seaweedfs/weed/storage/super_block" - "github.com/chrislusf/seaweedfs/weed/storage/types" - "io/ioutil" "os" "testing" "time" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" ) func TestSearchVolumesWithDeletedNeedles(t *testing.T) { - dir, err := ioutil.TempDir("", "example") + dir, err := os.MkdirTemp("", "example") if err != nil { t.Fatalf("temp dir creation: %v", err) } diff --git a/weed/util/chunk_cache/chunk_cache_on_disk_test.go b/weed/util/chunk_cache/chunk_cache_on_disk_test.go index f8325276e..7dccfd43f 100644 --- a/weed/util/chunk_cache/chunk_cache_on_disk_test.go +++ b/weed/util/chunk_cache/chunk_cache_on_disk_test.go @@ -3,7 +3,6 @@ package chunk_cache import ( "bytes" "fmt" - "io/ioutil" "math/rand" "os" "testing" @@ -11,7 +10,7 @@ import ( func TestOnDisk(t *testing.T) { - tmpDir, _ := ioutil.TempDir("", "c") + tmpDir, _ := os.MkdirTemp("", "c") defer os.RemoveAll(tmpDir) totalDiskSizeInKB := int64(32) diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 2efd6b5aa..f005e8d42 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -35,7 +34,7 @@ func Post(url string, values url.Values) ([]byte, error) { return nil, err } defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if r.StatusCode >= 400 { if err != nil { return nil, fmt.Errorf("%s: %d - %s", url, r.StatusCode, string(b)) @@ -71,7 +70,7 @@ func Get(url string) ([]byte, bool, error) { reader = response.Body } - b, err := ioutil.ReadAll(reader) + b, err := io.ReadAll(reader) if response.StatusCode >= 400 { retryable := response.StatusCode >= 500 return nil, retryable, fmt.Errorf("%s: %s", url, response.Status) @@ -107,7 +106,7 @@ func Delete(url string, jwt string) error { return e } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -137,7 +136,7 @@ func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err err return } defer resp.Body.Close() - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) if err != nil { return } @@ -271,7 +270,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC } } // drains the response body to avoid memory leak - data, _ := ioutil.ReadAll(reader) + data, _ := io.ReadAll(reader) if len(data) != 0 { glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) } @@ -393,11 +392,11 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e } func CloseResponse(resp *http.Response) { - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } func CloseRequest(req *http.Request) { - io.Copy(ioutil.Discard, req.Body) + io.Copy(io.Discard, req.Body) req.Body.Close() }