You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

269 lines
8.9 KiB

6 years ago
6 years ago
4 years ago
6 years ago
3 years ago
3 years ago
6 years ago
  1. package topology
  2. import (
  3. "context"
  4. "io"
  5. "sync/atomic"
  6. "time"
  7. "github.com/seaweedfs/seaweedfs/weed/pb"
  8. "google.golang.org/grpc"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/operation"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  13. )
  14. func (t *Topology) batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vid needle.VolumeId,
  15. locationlist *VolumeLocationList, garbageThreshold float64) (*VolumeLocationList, bool) {
  16. ch := make(chan int, locationlist.Length())
  17. errCount := int32(0)
  18. for index, dn := range locationlist.list {
  19. go func(index int, url pb.ServerAddress, vid needle.VolumeId) {
  20. err := operation.WithVolumeServerClient(false, url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  21. resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{
  22. VolumeId: uint32(vid),
  23. })
  24. if err != nil {
  25. atomic.AddInt32(&errCount, 1)
  26. ch <- -1
  27. return err
  28. }
  29. if resp.GarbageRatio >= garbageThreshold {
  30. ch <- index
  31. } else {
  32. ch <- -1
  33. }
  34. return nil
  35. })
  36. if err != nil {
  37. glog.V(0).Infof("Checking vacuuming %d on %s: %v", vid, url, err)
  38. }
  39. }(index, dn.ServerAddress(), vid)
  40. }
  41. vacuumLocationList := NewVolumeLocationList()
  42. waitTimeout := time.NewTimer(time.Minute * time.Duration(t.volumeSizeLimit/1024/1024/1000+1))
  43. defer waitTimeout.Stop()
  44. for range locationlist.list {
  45. select {
  46. case index := <-ch:
  47. if index != -1 {
  48. vacuumLocationList.list = append(vacuumLocationList.list, locationlist.list[index])
  49. }
  50. case <-waitTimeout.C:
  51. return vacuumLocationList, false
  52. }
  53. }
  54. return vacuumLocationList, errCount == 0 && len(vacuumLocationList.list) > 0
  55. }
  56. func (t *Topology) batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId,
  57. locationlist *VolumeLocationList, preallocate int64) bool {
  58. vl.accessLock.Lock()
  59. vl.removeFromWritable(vid)
  60. vl.accessLock.Unlock()
  61. ch := make(chan bool, locationlist.Length())
  62. for index, dn := range locationlist.list {
  63. go func(index int, url pb.ServerAddress, vid needle.VolumeId) {
  64. glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
  65. err := operation.WithVolumeServerClient(true, url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  66. stream, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
  67. VolumeId: uint32(vid),
  68. Preallocate: preallocate,
  69. })
  70. if err != nil {
  71. return err
  72. }
  73. for {
  74. resp, recvErr := stream.Recv()
  75. if recvErr != nil {
  76. if recvErr == io.EOF {
  77. break
  78. } else {
  79. return recvErr
  80. }
  81. }
  82. glog.V(0).Infof("%d vacuum %d on %s processed %d bytes, loadAvg %.02f%%",
  83. index, vid, url, resp.ProcessedBytes, resp.LoadAvg_1M*100)
  84. }
  85. return nil
  86. })
  87. if err != nil {
  88. glog.Errorf("Error when vacuuming %d on %s: %v", vid, url, err)
  89. ch <- false
  90. } else {
  91. glog.V(0).Infof("Complete vacuuming %d on %s", vid, url)
  92. ch <- true
  93. }
  94. }(index, dn.ServerAddress(), vid)
  95. }
  96. isVacuumSuccess := true
  97. waitTimeout := time.NewTimer(3 * time.Minute * time.Duration(t.volumeSizeLimit/1024/1024/1000+1))
  98. defer waitTimeout.Stop()
  99. for range locationlist.list {
  100. select {
  101. case canCommit := <-ch:
  102. isVacuumSuccess = isVacuumSuccess && canCommit
  103. case <-waitTimeout.C:
  104. return false
  105. }
  106. }
  107. return isVacuumSuccess
  108. }
  109. func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, vacuumLocationList, locationList *VolumeLocationList) bool {
  110. isCommitSuccess := true
  111. isReadOnly := false
  112. for _, dn := range vacuumLocationList.list {
  113. glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url())
  114. err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  115. resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{
  116. VolumeId: uint32(vid),
  117. })
  118. if resp != nil && resp.IsReadOnly {
  119. isReadOnly = true
  120. }
  121. return err
  122. })
  123. if err != nil {
  124. glog.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err)
  125. isCommitSuccess = false
  126. } else {
  127. glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url())
  128. }
  129. }
  130. //we should check the status of all replicas
  131. if len(locationList.list) > len(vacuumLocationList.list) {
  132. for _, dn := range locationList.list {
  133. isFound := false
  134. for _, dnVaccum := range vacuumLocationList.list {
  135. if dn.id == dnVaccum.id {
  136. isFound = true
  137. break
  138. }
  139. }
  140. if !isFound {
  141. err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  142. resp, err := volumeServerClient.VolumeStatus(context.Background(), &volume_server_pb.VolumeStatusRequest{
  143. VolumeId: uint32(vid),
  144. })
  145. if resp != nil && resp.IsReadOnly {
  146. isReadOnly = true
  147. }
  148. return err
  149. })
  150. if err != nil {
  151. glog.Errorf("Error when checking volume %d status on %s: %v", vid, dn.Url(), err)
  152. //we mark volume read-only, since the volume state is unknown
  153. isReadOnly = true
  154. }
  155. }
  156. }
  157. }
  158. if isCommitSuccess {
  159. for _, dn := range vacuumLocationList.list {
  160. vl.SetVolumeAvailable(dn, vid, isReadOnly)
  161. }
  162. }
  163. return isCommitSuccess
  164. }
  165. func (t *Topology) batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) {
  166. for _, dn := range locationlist.list {
  167. glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url())
  168. err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  169. _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{
  170. VolumeId: uint32(vid),
  171. })
  172. return err
  173. })
  174. if err != nil {
  175. glog.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err)
  176. } else {
  177. glog.V(0).Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url())
  178. }
  179. }
  180. }
  181. func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float64, volumeId uint32, collection string, preallocate int64) {
  182. // if there is vacuum going on, return immediately
  183. swapped := atomic.CompareAndSwapInt64(&t.vacuumLockCounter, 0, 1)
  184. if !swapped {
  185. return
  186. }
  187. defer atomic.StoreInt64(&t.vacuumLockCounter, 0)
  188. // now only one vacuum process going on
  189. glog.V(1).Infof("Start vacuum on demand with threshold: %f collection: %s volumeId: %d",
  190. garbageThreshold, collection, volumeId)
  191. for _, col := range t.collectionMap.Items() {
  192. c := col.(*Collection)
  193. if collection != "" && collection != c.Name {
  194. continue
  195. }
  196. for _, vl := range c.storageType2VolumeLayout.Items() {
  197. if vl != nil {
  198. volumeLayout := vl.(*VolumeLayout)
  199. if volumeId > 0 {
  200. vid := needle.VolumeId(volumeId)
  201. volumeLayout.accessLock.RLock()
  202. locationList, ok := volumeLayout.vid2location[vid]
  203. volumeLayout.accessLock.RUnlock()
  204. if ok {
  205. t.vacuumOneVolumeId(grpcDialOption, volumeLayout, c, garbageThreshold, locationList, vid, preallocate)
  206. }
  207. } else {
  208. t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate)
  209. }
  210. }
  211. }
  212. }
  213. }
  214. func (t *Topology) vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) {
  215. volumeLayout.accessLock.RLock()
  216. tmpMap := make(map[needle.VolumeId]*VolumeLocationList)
  217. for vid, locationList := range volumeLayout.vid2location {
  218. tmpMap[vid] = locationList.Copy()
  219. }
  220. volumeLayout.accessLock.RUnlock()
  221. for vid, locationList := range tmpMap {
  222. t.vacuumOneVolumeId(grpcDialOption, volumeLayout, c, garbageThreshold, locationList, vid, preallocate)
  223. }
  224. }
  225. func (t *Topology) vacuumOneVolumeId(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, locationList *VolumeLocationList, vid needle.VolumeId, preallocate int64) {
  226. volumeLayout.accessLock.RLock()
  227. isReadOnly := volumeLayout.readonlyVolumes.IsTrue(vid)
  228. isEnoughCopies := volumeLayout.enoughCopies(vid)
  229. volumeLayout.accessLock.RUnlock()
  230. if isReadOnly || !isEnoughCopies {
  231. return
  232. }
  233. glog.V(1).Infof("check vacuum on collection:%s volume:%d", c.Name, vid)
  234. if vacuumLocationList, needVacuum := t.batchVacuumVolumeCheck(
  235. grpcDialOption, vid, locationList, garbageThreshold); needVacuum {
  236. if t.batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) {
  237. t.batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, vacuumLocationList, locationList)
  238. } else {
  239. t.batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, vacuumLocationList)
  240. }
  241. }
  242. }