From 1d9e30d8c0606d4cf29a5989a12c0dad6150457b Mon Sep 17 00:00:00 2001
From: Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com>
Date: Thu, 31 Mar 2022 19:10:06 +0500
Subject: [PATCH 1/7] fsck replicas

---
 weed/shell/command_volume_fsck.go | 182 +++++++++++++++++-------------
 1 file changed, 102 insertions(+), 80 deletions(-)

diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index 7d3aa28a5..7318d475c 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -11,6 +11,7 @@ import (
 	"net/http"
 	"net/url"
 	"os"
+	"path"
 	"path/filepath"
 	"strings"
 	"sync"
@@ -65,8 +66,11 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
 	verbose := fsckCommand.Bool("v", false, "verbose mode")
 	findMissingChunksInFiler := fsckCommand.Bool("findMissingChunksInFiler", false, "see \"help volume.fsck\"")
 	findMissingChunksInFilerPath := fsckCommand.String("findMissingChunksInFilerPath", "/", "used together with findMissingChunksInFiler")
+	findMissingChunksInVolumeId := fsckCommand.Int("findMissingChunksInVolumeId", 0, "used together with findMissingChunksInFiler")
 	applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only!> after detection, delete missing data from volumes / delete missing file entries from filer")
 	purgeAbsent := fsckCommand.Bool("reallyDeleteFilerEntries", false, "<expert only!> delete missing file entries from filer if the corresponding volume is missing for any reason, please ensure all still existing/expected volumes are connected! used together with findMissingChunksInFiler")
+	tempPath := fsckCommand.String("tempPath", path.Join(os.TempDir()), "path for temporary idx files")
+
 	if err = fsckCommand.Parse(args); err != nil {
 		return nil
 	}
@@ -78,7 +82,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
 	c.env = commandEnv
 
 	// create a temp folder
-	tempFolder, err := os.MkdirTemp("", "sw_fsck")
+	tempFolder, err := os.MkdirTemp(*tempPath, "sw_fsck")
 	if err != nil {
 		return fmt.Errorf("failed to create temp folder: %v", err)
 	}
@@ -88,14 +92,14 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
 	defer os.RemoveAll(tempFolder)
 
 	// collect all volume id locations
-	volumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
+	dataNodeVolumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
 	if err != nil {
 		return fmt.Errorf("failed to collect all volume locations: %v", err)
 	}
 
 	isBucketsPath := false
 	var fillerBucketsPath string
-	if *findMissingChunksInFiler && *findMissingChunksInFilerPath != "" {
+	if *findMissingChunksInFiler && *findMissingChunksInFilerPath != "/" {
 		fillerBucketsPath, err = readFilerBucketsPath(commandEnv)
 		if err != nil {
 			return fmt.Errorf("read filer buckets path: %v", err)
@@ -109,33 +113,41 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
 	}
 
 	// collect each volume file ids
-	for volumeId, vinfo := range volumeIdToVInfo {
-		if isBucketsPath && !strings.HasPrefix(*findMissingChunksInFilerPath, fillerBucketsPath+"/"+vinfo.collection) {
-			delete(volumeIdToVInfo, volumeId)
-			continue
-		}
-		err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer)
-		if err != nil {
-			return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
+	for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
+		for volumeId, vinfo := range volumeIdToVInfo {
+			if *findMissingChunksInVolumeId > 0 && uint32(*findMissingChunksInVolumeId) != volumeId {
+				delete(volumeIdToVInfo, volumeId)
+				continue
+			}
+			if isBucketsPath && !strings.HasPrefix(*findMissingChunksInFilerPath, fillerBucketsPath+"/"+vinfo.collection) {
+				delete(volumeIdToVInfo, volumeId)
+				continue
+			}
+			err = c.collectOneVolumeFileIds(tempFolder, dataNodeId, volumeId, vinfo, *verbose, writer)
+			if err != nil {
+				return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
+			}
 		}
 	}
 
 	if *findMissingChunksInFiler {
 		// collect all filer file ids and paths
-		if err = c.collectFilerFileIdAndPaths(volumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent); err != nil {
+		if err = c.collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent); err != nil {
 			return fmt.Errorf("collectFilerFileIdAndPaths: %v", err)
 		}
-		// for each volume, check filer file ids
-		if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
-			return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
+		for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
+			// for each volume, check filer file ids
+			if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, dataNodeId, writer, *verbose, *applyPurging); err != nil {
+				return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
+			}
 		}
 	} else {
 		// collect all filer file ids
-		if err = c.collectFilerFileIds(volumeIdToVInfo, tempFolder, writer, *verbose); err != nil {
+		if err = c.collectFilerFileIds(dataNodeVolumeIdToVInfo, tempFolder, writer, *verbose); err != nil {
 			return fmt.Errorf("failed to collect file ids from filer: %v", err)
 		}
 		// volume file ids subtract filer file ids
-		if err = c.findExtraChunksInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
+		if err = c.findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
 			return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
 		}
 	}
@@ -143,19 +155,23 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
 	return nil
 }
 
-func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool) error {
+func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool) error {
 
 	if verbose {
 		fmt.Fprintf(writer, "checking each file from filer ...\n")
 	}
 
 	files := make(map[uint32]*os.File)
-	for vid := range volumeIdToServer {
-		dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
-		if openErr != nil {
-			return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+	for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
+		for vid := range volumeIdToServer {
+			dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+			if openErr != nil {
+				return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+			}
+			if _, ok := volumeIdToServer[vid]; !ok {
+				files[vid] = dst
+			}
 		}
-		files[vid] = dst
 	}
 	defer func() {
 		for _, f := range files {
@@ -210,10 +226,10 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint
 
 }
 
-func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
+func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, dataNodeId string, writer io.Writer, verbose bool, applyPurging bool) error {
 
 	for volumeId, vinfo := range volumeIdToVInfo {
-		checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, volumeId, writer, verbose, applyPurging)
+		checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, dataNodeId, volumeId, writer, verbose, applyPurging)
 		if checkErr != nil {
 			return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
 		}
@@ -221,55 +237,57 @@ func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInf
 	return nil
 }
 
-func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
+func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
 
 	var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
 
-	for volumeId, vinfo := range volumeIdToVInfo {
-		inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, verbose)
-		if checkErr != nil {
-			return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
-		}
-		totalInUseCount += inUseCount
-		totalOrphanChunkCount += uint64(len(orphanFileIds))
-		totalOrphanDataSize += orphanDataSize
-
-		if verbose {
-			for _, fid := range orphanFileIds {
-				fmt.Fprintf(writer, "%s\n", fid)
+	for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
+		for volumeId, vinfo := range volumeIdToVInfo {
+			inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, dataNodeId, volumeId, writer, verbose)
+			if checkErr != nil {
+				return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
 			}
-		}
+			totalInUseCount += inUseCount
+			totalOrphanChunkCount += uint64(len(orphanFileIds))
+			totalOrphanDataSize += orphanDataSize
 
-		if applyPurging && len(orphanFileIds) > 0 {
 			if verbose {
-				fmt.Fprintf(writer, "purging process for volume %d", volumeId)
-			}
-
-			if vinfo.isEcVolume {
-				fmt.Fprintf(writer, "skip purging for Erasure Coded volume %d.\n", volumeId)
-				continue
+				for _, fid := range orphanFileIds {
+					fmt.Fprintf(writer, "%s\n", fid)
+				}
 			}
 
-			needleVID := needle.VolumeId(volumeId)
+			if applyPurging && len(orphanFileIds) > 0 {
+				if verbose {
+					fmt.Fprintf(writer, "purging process for volume %d", volumeId)
+				}
 
-			if vinfo.isReadOnly {
-				err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, true)
-				if err != nil {
-					return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
+				if vinfo.isEcVolume {
+					fmt.Fprintf(writer, "skip purging for Erasure Coded volume %d.\n", volumeId)
+					continue
 				}
 
-				fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
-				defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, false)
-			}
+				needleVID := needle.VolumeId(volumeId)
 
-			fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
+				if vinfo.isReadOnly {
+					err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, true)
+					if err != nil {
+						return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
+					}
 
-			if verbose {
-				fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
-			}
+					fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
+					defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, false)
+				}
+
+				fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
 
-			if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
-				return fmt.Errorf("purging volume %d: %v", volumeId, err)
+				if verbose {
+					fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
+				}
+
+				if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
+					return fmt.Errorf("purging volume %d: %v", volumeId, err)
+				}
 			}
 		}
 	}
@@ -290,7 +308,7 @@ func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[u
 	return nil
 }
 
-func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
+func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, dataNodeId string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
 
 	if verbose {
 		fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
@@ -316,7 +334,7 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId
 			return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
 		}
 
-		err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId))
+		err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, dataNodeId, volumeId))
 		if err != nil {
 			return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err)
 		}
@@ -327,19 +345,21 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId
 
 }
 
-func (c *commandVolumeFsck) collectFilerFileIds(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool) error {
+func (c *commandVolumeFsck) collectFilerFileIds(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool) error {
 
 	if verbose {
 		fmt.Fprintf(writer, "collecting file ids from filer ...\n")
 	}
 
 	files := make(map[uint32]*os.File)
-	for vid := range volumeIdToServer {
-		dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
-		if openErr != nil {
-			return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+	for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
+		for vid := range volumeIdToServer {
+			dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+			if openErr != nil {
+				return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+			}
+			files[vid] = dst
 		}
-		files[vid] = dst
 	}
 	defer func() {
 		for _, f := range files {
@@ -377,7 +397,7 @@ func (c *commandVolumeFsck) collectFilerFileIds(volumeIdToServer map[uint32]VInf
 	})
 }
 
-func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
+func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
 
 	if verbose {
 		fmt.Fprintf(writer, "find missing file chunks in volume %d ...\n", volumeId)
@@ -386,7 +406,7 @@ func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, vo
 	db := needle_map.NewMemDb()
 	defer db.Close()
 
-	if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
+	if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)); err != nil {
 		return
 	}
 
@@ -473,12 +493,12 @@ func (c *commandVolumeFsck) httpDelete(path util.FullPath, verbose bool) {
 	}
 }
 
-func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
+func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
 
 	db := needle_map.NewMemDb()
 	defer db.Close()
 
-	if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
+	if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)); err != nil {
 		return
 	}
 
@@ -509,8 +529,8 @@ func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder stri
 
 	if orphanFileCount > 0 {
 		pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
-		fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
-			volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
+		fmt.Fprintf(writer, "dataNode:%s\tvolume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
+			dataNodeId, volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
 	}
 
 	return
@@ -524,13 +544,13 @@ type VInfo struct {
 	isReadOnly bool
 }
 
-func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
+func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[string]map[uint32]VInfo, err error) {
 
 	if verbose {
 		fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
 	}
 
-	volumeIdToServer = make(map[uint32]VInfo)
+	volumeIdToServer = make(map[string]map[uint32]VInfo)
 	// collect topology information
 	topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
 	if err != nil {
@@ -539,8 +559,10 @@ func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose boo
 
 	eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
 		for _, diskInfo := range t.DiskInfos {
+			dataNodeId := t.GetId()
+			volumeIdToServer[dataNodeId] = make(map[uint32]VInfo)
 			for _, vi := range diskInfo.VolumeInfos {
-				volumeIdToServer[vi.Id] = VInfo{
+				volumeIdToServer[dataNodeId][vi.Id] = VInfo{
 					server:     pb.NewServerAddressFromDataNode(t),
 					collection: vi.Collection,
 					isEcVolume: false,
@@ -548,7 +570,7 @@ func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose boo
 				}
 			}
 			for _, ecShardInfo := range diskInfo.EcShardInfos {
-				volumeIdToServer[ecShardInfo.Id] = VInfo{
+				volumeIdToServer[dataNodeId][ecShardInfo.Id] = VInfo{
 					server:     pb.NewServerAddressFromDataNode(t),
 					collection: ecShardInfo.Collection,
 					isEcVolume: true,
@@ -600,8 +622,8 @@ func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []
 	return
 }
 
-func getVolumeFileIdFile(tempFolder string, vid uint32) string {
-	return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid))
+func getVolumeFileIdFile(tempFolder string, dataNodeid string, vid uint32) string {
+	return filepath.Join(tempFolder, fmt.Sprintf("%s_%d.idx", dataNodeid, vid))
 }
 
 func getFilerFileIdFile(tempFolder string, vid uint32) string {

From 3cedb21bb728add11978d6cf45e7c2d8a2e94d12 Mon Sep 17 00:00:00 2001
From: Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com>
Date: Thu, 31 Mar 2022 21:36:10 +0500
Subject: [PATCH 2/7] skip new entities

---
 weed/shell/command_volume_fsck.go | 32 +++++++++++++++++--------------
 1 file changed, 18 insertions(+), 14 deletions(-)

diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index 7318d475c..d02033aa3 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -5,17 +5,6 @@ import (
 	"context"
 	"flag"
 	"fmt"
-	"io"
-	"io/ioutil"
-	"math"
-	"net/http"
-	"net/url"
-	"os"
-	"path"
-	"path/filepath"
-	"strings"
-	"sync"
-
 	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/operation"
 	"github.com/chrislusf/seaweedfs/weed/pb"
@@ -26,6 +15,17 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
 	"github.com/chrislusf/seaweedfs/weed/storage/types"
 	"github.com/chrislusf/seaweedfs/weed/util"
+	"io"
+	"io/ioutil"
+	"math"
+	"net/http"
+	"net/url"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
 )
 
 func init() {
@@ -112,6 +112,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
 		return fmt.Errorf("read filer buckets path: %v", err)
 	}
 
+	collectMtime := time.Now().Unix()
 	// collect each volume file ids
 	for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
 		for volumeId, vinfo := range volumeIdToVInfo {
@@ -132,7 +133,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
 
 	if *findMissingChunksInFiler {
 		// collect all filer file ids and paths
-		if err = c.collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent); err != nil {
+		if err = c.collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent, collectMtime); err != nil {
 			return fmt.Errorf("collectFilerFileIdAndPaths: %v", err)
 		}
 		for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
@@ -155,7 +156,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
 	return nil
 }
 
-func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool) error {
+func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool, collectMtime int64) error {
 
 	if verbose {
 		fmt.Fprintf(writer, "checking each file from filer ...\n")
@@ -195,6 +196,9 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo m
 		}
 		dataChunks = append(dataChunks, manifestChunks...)
 		for _, chunk := range dataChunks {
+			if chunk.Mtime > collectMtime {
+				continue
+			}
 			outputChan <- &Item{
 				vid:     chunk.Fid.VolumeId,
 				fileKey: chunk.Fid.FileKey,
@@ -400,7 +404,7 @@ func (c *commandVolumeFsck) collectFilerFileIds(dataNodeVolumeIdToVInfo map[stri
 func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
 
 	if verbose {
-		fmt.Fprintf(writer, "find missing file chunks in volume %d ...\n", volumeId)
+		fmt.Fprintf(writer, "find missing file chunks in dataNodeId %s volume %d ...\n", dataNodeId, volumeId)
 	}
 
 	db := needle_map.NewMemDb()

From 3817e05dd043f024854b2c9acae6daecf2ff14b8 Mon Sep 17 00:00:00 2001
From: Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com>
Date: Fri, 1 Apr 2022 10:17:09 +0500
Subject: [PATCH 3/7] fix collect filer files

---
 weed/shell/command_volume_fsck.go | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index d02033aa3..557c1e18e 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -165,13 +165,14 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo m
 	files := make(map[uint32]*os.File)
 	for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
 		for vid := range volumeIdToServer {
+			if _, ok := files[vid]; ok {
+				continue
+			}
 			dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
 			if openErr != nil {
 				return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
 			}
-			if _, ok := volumeIdToServer[vid]; !ok {
-				files[vid] = dst
-			}
+			files[vid] = dst
 		}
 	}
 	defer func() {

From 7f1383a41ef8011224f15fd9252aa0677af62324 Mon Sep 17 00:00:00 2001
From: Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com>
Date: Fri, 1 Apr 2022 14:45:41 +0500
Subject: [PATCH 4/7] findExtraChunksInVolumeServers in consideration of
 replication

---
 weed/shell/command_volume_fsck.go | 68 +++++++++++++++++++++++--------
 1 file changed, 52 insertions(+), 16 deletions(-)

diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index 557c1e18e..1aa33e054 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -245,13 +245,32 @@ func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInf
 func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
 
 	var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
-
+	volumeIdOrphanFileIds := make(map[uint32]map[string]bool)
+	isSeveralReplicas := make(map[uint32]bool)
+	isEcVolumeReplicas := make(map[uint32]bool)
+	isReadOnlyReplicas := make(map[uint32]bool)
+	serverReplicas := make(map[uint32][]pb.ServerAddress)
 	for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
 		for volumeId, vinfo := range volumeIdToVInfo {
 			inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, dataNodeId, volumeId, writer, verbose)
 			if checkErr != nil {
 				return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
 			}
+			isSeveralReplicas[volumeId] = false
+			if _, found := volumeIdOrphanFileIds[volumeId]; !found {
+				volumeIdOrphanFileIds[volumeId] = make(map[string]bool)
+			} else {
+				isSeveralReplicas[volumeId] = true
+			}
+			for _, fid := range orphanFileIds {
+				if isSeveralReplicas[volumeId] {
+					if _, found := volumeIdOrphanFileIds[volumeId][fid]; !found {
+						continue
+					}
+				}
+				volumeIdOrphanFileIds[volumeId][fid] = isSeveralReplicas[volumeId]
+			}
+
 			totalInUseCount += inUseCount
 			totalOrphanChunkCount += uint64(len(orphanFileIds))
 			totalOrphanDataSize += orphanDataSize
@@ -261,31 +280,48 @@ func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVIn
 					fmt.Fprintf(writer, "%s\n", fid)
 				}
 			}
+			isEcVolumeReplicas[volumeId] = vinfo.isEcVolume
+			if isReadOnly, found := isReadOnlyReplicas[volumeId]; !(found && isReadOnly) {
+				isReadOnlyReplicas[volumeId] = vinfo.isReadOnly
+			}
+			serverReplicas[volumeId] = append(serverReplicas[volumeId], vinfo.server)
+		}
 
-			if applyPurging && len(orphanFileIds) > 0 {
-				if verbose {
-					fmt.Fprintf(writer, "purging process for volume %d", volumeId)
-				}
-
-				if vinfo.isEcVolume {
-					fmt.Fprintf(writer, "skip purging for Erasure Coded volume %d.\n", volumeId)
-					continue
+		for volumeId, orphanReplicaFileIds := range volumeIdOrphanFileIds {
+			if !(applyPurging && len(orphanReplicaFileIds) > 0) {
+				continue
+			}
+			orphanFileIds := []string{}
+			for fid, foundInAllReplicas := range orphanReplicaFileIds {
+				if !isSeveralReplicas[volumeId] || (isSeveralReplicas[volumeId] && foundInAllReplicas) {
+					orphanFileIds = append(orphanFileIds, fid)
 				}
+			}
+			if !(len(orphanFileIds) > 0) {
+				continue
+			}
+			if verbose {
+				fmt.Fprintf(writer, "purging process for volume %d", volumeId)
+			}
 
+			if isEcVolumeReplicas[volumeId] {
+				fmt.Fprintf(writer, "skip purging for Erasure Coded volume %d.\n", volumeId)
+				continue
+			}
+			for _, server := range serverReplicas[volumeId] {
 				needleVID := needle.VolumeId(volumeId)
 
-				if vinfo.isReadOnly {
-					err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, true)
+				if isReadOnlyReplicas[volumeId] {
+					err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, server, true)
 					if err != nil {
 						return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
 					}
 
-					fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
-					defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, false)
-				}
-
-				fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
+					fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, server)
+					defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, server, false)
 
+					fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, server)
+				}
 				if verbose {
 					fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
 				}

From 2364fab927f76a79059e8ef12cec2ff3e1685a8e Mon Sep 17 00:00:00 2001
From: Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com>
Date: Tue, 19 Apr 2022 11:44:41 +0500
Subject: [PATCH 5/7] volume.list show only readonly, collectionPattern and
 volumeId

---
 weed/shell/command_volume_list.go | 52 +++++++++++++++++++++++++------
 1 file changed, 42 insertions(+), 10 deletions(-)

diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go
index 9150752d5..3a5633168 100644
--- a/weed/shell/command_volume_list.go
+++ b/weed/shell/command_volume_list.go
@@ -7,6 +7,7 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
 	"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
 	"golang.org/x/exp/slices"
+	"path/filepath"
 
 	"io"
 )
@@ -16,6 +17,9 @@ func init() {
 }
 
 type commandVolumeList struct {
+	collectionPattern *string
+	readonly          *bool
+	volumeId          *uint64
 }
 
 func (c *commandVolumeList) Name() string {
@@ -34,6 +38,10 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
 
 	volumeListCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
 	verbosityLevel := volumeListCommand.Int("v", 5, "verbose mode: 0, 1, 2, 3, 4, 5")
+	c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
+	c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly")
+	c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id")
+
 	if err = volumeListCommand.Parse(args); err != nil {
 		return nil
 	}
@@ -44,7 +52,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
 		return err
 	}
 
-	writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
+	c.writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
 	return nil
 }
 
@@ -65,53 +73,71 @@ func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
 	return buf.String()
 }
 
-func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
+func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
 	output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
 	slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
 		return a.Id < b.Id
 	})
 	var s statistics
 	for _, dc := range t.DataCenterInfos {
-		s = s.plus(writeDataCenterInfo(writer, dc, verbosityLevel))
+		s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel))
 	}
 	output(verbosityLevel >= 0, writer, "%+v \n", s)
 	return s
 }
-func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
+
+func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
 	output(verbosityLevel >= 1, writer, "  DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
 	var s statistics
 	slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
 		return a.Id < b.Id
 	})
 	for _, r := range t.RackInfos {
-		s = s.plus(writeRackInfo(writer, r, verbosityLevel))
+		s = s.plus(c.writeRackInfo(writer, r, verbosityLevel))
 	}
 	output(verbosityLevel >= 1, writer, "  DataCenter %s %+v \n", t.Id, s)
 	return s
 }
-func writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
+
+func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
 	output(verbosityLevel >= 2, writer, "    Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
 	var s statistics
 	slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
 		return a.Id < b.Id
 	})
 	for _, dn := range t.DataNodeInfos {
-		s = s.plus(writeDataNodeInfo(writer, dn, verbosityLevel))
+		s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel))
 	}
 	output(verbosityLevel >= 2, writer, "    Rack %s %+v \n", t.Id, s)
 	return s
 }
-func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
+
+func (c *commandVolumeList) writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
 	output(verbosityLevel >= 3, writer, "      DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
 	var s statistics
 	for _, diskInfo := range t.DiskInfos {
-		s = s.plus(writeDiskInfo(writer, diskInfo, verbosityLevel))
+		s = s.plus(c.writeDiskInfo(writer, diskInfo, verbosityLevel))
 	}
 	output(verbosityLevel >= 3, writer, "      DataNode %s %+v \n", t.Id, s)
 	return s
 }
 
-func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
+func (c *commandVolumeList) isNotMatchDiskInfo(readOnly bool, collection string, volumeId uint32) bool {
+	if *c.readonly && !readOnly {
+		return true
+	}
+	if *c.collectionPattern != "" {
+		if matched, _ := filepath.Match(*c.collectionPattern, collection); !matched {
+			return true
+		}
+	}
+	if *c.volumeId > 0 && *c.volumeId != uint64(volumeId) {
+		return true
+	}
+	return false
+}
+
+func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
 	var s statistics
 	diskType := t.Type
 	if diskType == "" {
@@ -122,9 +148,15 @@ func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int)
 		return a.Id < b.Id
 	})
 	for _, vi := range t.VolumeInfos {
+		if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
+			continue
+		}
 		s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
 	}
 	for _, ecShardInfo := range t.EcShardInfos {
+		if c.isNotMatchDiskInfo(false, ecShardInfo.Collection, ecShardInfo.Id) {
+			continue
+		}
 		output(verbosityLevel >= 5, writer, "          ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
 	}
 	output(verbosityLevel >= 4, writer, "        Disk %s %+v \n", diskType, s)

From 3d5fc72d5534d169deed87b8a2a7e24870ab0aab Mon Sep 17 00:00:00 2001
From: naveensrinivasan <172697+naveensrinivasan@users.noreply.github.com>
Date: Tue, 19 Apr 2022 20:04:54 -0500
Subject: [PATCH 6/7] chore(deps): Included dependency review

> Dependency Review GitHub Action in your repository to enforce dependency reviews on your pull requests.
> The action scans for vulnerable versions of dependencies introduced by package version changes in pull requests,
> and warns you about the associated security vulnerabilities.
> This gives you better visibility of what's changing in a pull request,
> and helps prevent vulnerabilities being added to your repository.

https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement

Signed-off-by: naveensrinivasan <172697+naveensrinivasan@users.noreply.github.com>
---
 .github/workflows/depsreview.yml | 14 ++++++++++++++
 1 file changed, 14 insertions(+)
 create mode 100644 .github/workflows/depsreview.yml

diff --git a/.github/workflows/depsreview.yml b/.github/workflows/depsreview.yml
new file mode 100644
index 000000000..626c5d154
--- /dev/null
+++ b/.github/workflows/depsreview.yml
@@ -0,0 +1,14 @@
+name: 'Dependency Review'
+on: [pull_request]
+
+permissions:
+  contents: read
+
+jobs:
+  dependency-review:
+    runs-on: ubuntu-latest
+    steps:
+      - name: 'Checkout Repository'
+        uses: actions/checkout@dcd71f646680f2efd8db4afa5ad64fdcba30e748
+      - name: 'Dependency Review'
+        uses: actions/dependency-review-action@3f943b86c9a289f4e632c632695e2e0898d9d67d

From a23629b2fe9112cfa9676206292f5a7de4c3a33a Mon Sep 17 00:00:00 2001
From: guosj <515878133@qq.com>
Date: Wed, 20 Apr 2022 10:12:33 +0800
Subject: [PATCH 7/7] handle implicit username

---
 weed/iamapi/iamapi_management_handlers.go | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go
index 94003c46e..5fea49f5c 100644
--- a/weed/iamapi/iamapi_management_handlers.go
+++ b/weed/iamapi/iamapi_management_handlers.go
@@ -377,6 +377,18 @@ func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, valu
 	return resp
 }
 
+// handleImplicitUsername adds username who signs the request to values if 'username' is not specified
+// According to https://awscli.amazonaws.com/v2/documentation/api/latest/reference/iam/create-access-key.html/
+// "If you do not specify a user name, IAM determines the user name implicitly based on the Amazon Web
+// Services access key ID signing the request."
+func handleImplicitUsername(r *http.Request, values url.Values) {
+	if values.Get("UserName") == "" {
+		// get username who signs the request
+		userName := strings.Split(r.Header["Authorization"][0], "/")[2]
+		values.Set("UserName", userName)
+	}
+}
+
 func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
 	if err := r.ParseForm(); err != nil {
 		s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
@@ -401,6 +413,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
 		response = iama.ListUsers(s3cfg, values)
 		changed = false
 	case "ListAccessKeys":
+		handleImplicitUsername(r, values)
 		response = iama.ListAccessKeys(s3cfg, values)
 		changed = false
 	case "CreateUser":
@@ -428,8 +441,10 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
 			return
 		}
 	case "CreateAccessKey":
+		handleImplicitUsername(r, values)
 		response = iama.CreateAccessKey(s3cfg, values)
 	case "DeleteAccessKey":
+		handleImplicitUsername(r, values)
 		response = iama.DeleteAccessKey(s3cfg, values)
 	case "CreatePolicy":
 		response, err = iama.CreatePolicy(s3cfg, values)