You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

294 lines
10 KiB

3 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
  1. package shell
  2. import (
  3. "bytes"
  4. "context"
  5. "flag"
  6. "fmt"
  7. "github.com/seaweedfs/seaweedfs/weed/operation"
  8. "github.com/seaweedfs/seaweedfs/weed/pb"
  9. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
  11. "golang.org/x/exp/slices"
  12. "google.golang.org/grpc"
  13. "io"
  14. "math"
  15. "time"
  16. )
  17. func init() {
  18. Commands = append(Commands, &commandVolumeCheckDisk{})
  19. }
  20. type commandVolumeCheckDisk struct {
  21. env *CommandEnv
  22. }
  23. func (c *commandVolumeCheckDisk) Name() string {
  24. return "volume.check.disk"
  25. }
  26. func (c *commandVolumeCheckDisk) Help() string {
  27. return `check all replicated volumes to find and fix inconsistencies. It is optional and resource intensive.
  28. How it works:
  29. find all volumes that are replicated
  30. for each volume id, if there are more than 2 replicas, find one pair with the largest 2 in file count.
  31. for the pair volume A and B
  32. append entries in A and not in B to B
  33. append entries in B and not in A to A
  34. `
  35. }
  36. func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  37. fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  38. slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same")
  39. verbose := fsckCommand.Bool("v", false, "verbose mode")
  40. volumeId := fsckCommand.Uint("volumeId", 0, "the volume id")
  41. applyChanges := fsckCommand.Bool("force", false, "apply the fix")
  42. nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit")
  43. if err = fsckCommand.Parse(args); err != nil {
  44. return nil
  45. }
  46. infoAboutSimulationMode(writer, *applyChanges, "-force")
  47. if err = commandEnv.confirmIsLocked(args); err != nil {
  48. return
  49. }
  50. c.env = commandEnv
  51. // collect topology information
  52. topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
  53. if err != nil {
  54. return err
  55. }
  56. volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
  57. // pick 1 pairs of volume replica
  58. fileCount := func(replica *VolumeReplica) uint64 {
  59. return replica.info.FileCount - replica.info.DeleteCount
  60. }
  61. for _, replicas := range volumeReplicas {
  62. if *volumeId > 0 && replicas[0].info.Id != uint32(*volumeId) {
  63. continue
  64. }
  65. slices.SortFunc(replicas, func(a, b *VolumeReplica) bool {
  66. return fileCount(a) > fileCount(b)
  67. })
  68. for len(replicas) >= 2 {
  69. a, b := replicas[0], replicas[1]
  70. if !*slowMode {
  71. if fileCount(a) == fileCount(b) {
  72. replicas = replicas[1:]
  73. continue
  74. }
  75. }
  76. if a.info.ReadOnly || b.info.ReadOnly {
  77. fmt.Fprintf(writer, "skipping readonly volume %d on %s and %s\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id)
  78. replicas = replicas[1:]
  79. continue
  80. }
  81. if err := c.syncTwoReplicas(a, b, *applyChanges, *nonRepairThreshold, *verbose, writer); err != nil {
  82. fmt.Fprintf(writer, "sync volume %d on %s and %s: %v\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, err)
  83. }
  84. replicas = replicas[1:]
  85. }
  86. }
  87. return nil
  88. }
  89. func (c *commandVolumeCheckDisk) syncTwoReplicas(a *VolumeReplica, b *VolumeReplica, applyChanges bool, nonRepairThreshold float64, verbose bool, writer io.Writer) (err error) {
  90. aHasChanges, bHasChanges := true, true
  91. for aHasChanges || bHasChanges {
  92. if aHasChanges, bHasChanges, err = c.checkBoth(a, b, applyChanges, nonRepairThreshold, verbose, writer); err != nil {
  93. return err
  94. }
  95. }
  96. return nil
  97. }
  98. func (c *commandVolumeCheckDisk) checkBoth(a *VolumeReplica, b *VolumeReplica, applyChanges bool, nonRepairThreshold float64, verbose bool, writer io.Writer) (aHasChanges bool, bHasChanges bool, err error) {
  99. aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb()
  100. defer func() {
  101. aDB.Close()
  102. bDB.Close()
  103. }()
  104. // read index db
  105. readIndexDbCutoffFrom := uint64(time.Now().UnixNano())
  106. if err = c.readIndexDatabase(aDB, a.info.Collection, a.info.Id, pb.NewServerAddressFromDataNode(a.location.dataNode), verbose, writer); err != nil {
  107. return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", a.location.dataNode, a.info.Id, err)
  108. }
  109. if err := c.readIndexDatabase(bDB, b.info.Collection, b.info.Id, pb.NewServerAddressFromDataNode(b.location.dataNode), verbose, writer); err != nil {
  110. return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", b.location.dataNode, b.info.Id, err)
  111. }
  112. // find and make up the differences
  113. if aHasChanges, err = c.doVolumeCheckDisk(bDB, aDB, b, a, verbose, writer, applyChanges, nonRepairThreshold, readIndexDbCutoffFrom); err != nil {
  114. return true, true, fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", b.location.dataNode.Id, a.location.dataNode.Id, b.info.Id, err)
  115. }
  116. if bHasChanges, err = c.doVolumeCheckDisk(aDB, bDB, a, b, verbose, writer, applyChanges, nonRepairThreshold, readIndexDbCutoffFrom); err != nil {
  117. return true, true, fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", a.location.dataNode.Id, b.location.dataNode.Id, a.info.Id, err)
  118. }
  119. return
  120. }
  121. func (c *commandVolumeCheckDisk) doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *VolumeReplica, verbose bool, writer io.Writer, applyChanges bool, nonRepairThreshold float64, cutoffFromAtNs uint64) (hasChanges bool, err error) {
  122. // find missing keys
  123. // hash join, can be more efficient
  124. var missingNeedles []needle_map.NeedleValue
  125. var counter int
  126. doCutoffOfLastNeedle := true
  127. minuend.DescendingVisit(func(value needle_map.NeedleValue) error {
  128. counter++
  129. if _, found := subtrahend.Get(value.Key); !found {
  130. if doCutoffOfLastNeedle {
  131. if needleMeta, err := readNeedleMeta(c.env.option.GrpcDialOption, pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, value); err == nil {
  132. // needles older than the cutoff time are not missing yet
  133. if needleMeta.AppendAtNs > cutoffFromAtNs {
  134. return nil
  135. }
  136. doCutoffOfLastNeedle = false
  137. }
  138. }
  139. missingNeedles = append(missingNeedles, value)
  140. } else if doCutoffOfLastNeedle {
  141. doCutoffOfLastNeedle = false
  142. }
  143. return nil
  144. })
  145. fmt.Fprintf(writer, "volume %d %s has %d entries, %s missed %d entries\n", source.info.Id, source.location.dataNode.Id, counter, target.location.dataNode.Id, len(missingNeedles))
  146. if counter == 0 || len(missingNeedles) == 0 {
  147. return false, nil
  148. }
  149. missingNeedlesFraction := float64(len(missingNeedles)) / float64(counter)
  150. if missingNeedlesFraction > nonRepairThreshold {
  151. return false, fmt.Errorf(
  152. "failed to start repair volume %d, percentage of missing keys is greater than the threshold: %.2f > %.2f",
  153. source.info.Id, missingNeedlesFraction, nonRepairThreshold)
  154. }
  155. for _, needleValue := range missingNeedles {
  156. needleBlob, err := readSourceNeedleBlob(c.env.option.GrpcDialOption, pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, needleValue)
  157. if err != nil {
  158. return hasChanges, err
  159. }
  160. if !applyChanges {
  161. continue
  162. }
  163. if verbose {
  164. fmt.Fprintf(writer, "read %d,%x %s => %s \n", source.info.Id, needleValue.Key, source.location.dataNode.Id, target.location.dataNode.Id)
  165. }
  166. hasChanges = true
  167. if err = c.writeNeedleBlobToTarget(pb.NewServerAddressFromDataNode(target.location.dataNode), source.info.Id, needleValue, needleBlob); err != nil {
  168. return hasChanges, err
  169. }
  170. }
  171. return
  172. }
  173. func readSourceNeedleBlob(grpcDialOption grpc.DialOption, sourceVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue) (needleBlob []byte, err error) {
  174. err = operation.WithVolumeServerClient(false, sourceVolumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
  175. resp, err := client.ReadNeedleBlob(context.Background(), &volume_server_pb.ReadNeedleBlobRequest{
  176. VolumeId: volumeId,
  177. Offset: needleValue.Offset.ToActualOffset(),
  178. Size: int32(needleValue.Size),
  179. })
  180. if err != nil {
  181. return err
  182. }
  183. needleBlob = resp.NeedleBlob
  184. return nil
  185. })
  186. return
  187. }
  188. func (c *commandVolumeCheckDisk) writeNeedleBlobToTarget(targetVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue, needleBlob []byte) error {
  189. return operation.WithVolumeServerClient(false, targetVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
  190. _, err := client.WriteNeedleBlob(context.Background(), &volume_server_pb.WriteNeedleBlobRequest{
  191. VolumeId: volumeId,
  192. NeedleId: uint64(needleValue.Key),
  193. Size: int32(needleValue.Size),
  194. NeedleBlob: needleBlob,
  195. })
  196. return err
  197. })
  198. }
  199. func (c *commandVolumeCheckDisk) readIndexDatabase(db *needle_map.MemDb, collection string, volumeId uint32, volumeServer pb.ServerAddress, verbose bool, writer io.Writer) error {
  200. var buf bytes.Buffer
  201. if err := c.copyVolumeIndexFile(collection, volumeId, volumeServer, &buf, verbose, writer); err != nil {
  202. return err
  203. }
  204. if verbose {
  205. fmt.Fprintf(writer, "load collection %s volume %d index size %d from %s ...\n", collection, volumeId, buf.Len(), volumeServer)
  206. }
  207. return db.LoadFromReaderAt(bytes.NewReader(buf.Bytes()))
  208. }
  209. func (c *commandVolumeCheckDisk) copyVolumeIndexFile(collection string, volumeId uint32, volumeServer pb.ServerAddress, buf *bytes.Buffer, verbose bool, writer io.Writer) error {
  210. return operation.WithVolumeServerClient(true, volumeServer, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  211. ext := ".idx"
  212. copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
  213. VolumeId: volumeId,
  214. Ext: ".idx",
  215. CompactionRevision: math.MaxUint32,
  216. StopOffset: math.MaxInt64,
  217. Collection: collection,
  218. IsEcVolume: false,
  219. IgnoreSourceFileNotFound: false,
  220. })
  221. if err != nil {
  222. return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
  223. }
  224. err = writeToBuffer(copyFileClient, buf)
  225. if err != nil {
  226. return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, volumeServer, err)
  227. }
  228. return nil
  229. })
  230. }
  231. func writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *bytes.Buffer) error {
  232. for {
  233. resp, receiveErr := client.Recv()
  234. if receiveErr == io.EOF {
  235. break
  236. }
  237. if receiveErr != nil {
  238. return fmt.Errorf("receiving: %v", receiveErr)
  239. }
  240. buf.Write(resp.FileContent)
  241. }
  242. return nil
  243. }