You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

240 lines
6.8 KiB

3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
Fix dead lock (#5815) * reduce locks to avoid dead lock Flush->FlushData->uplloadPipeline.FluahAll uploaderCount>0 goroutine 1 [sync.Cond.Wait, 71 minutes]: sync.runtime_notifyListWait(0xc0007ae4d0, 0x0) /usr/local/go/src/runtime/sema.go:569 +0x159 sync.(*Cond).Wait(0xc001a59290?) /usr/local/go/src/sync/cond.go:70 +0x85 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).waitForCurrentWritersToComplete(0xc0002ee4d0) /github/workspace/weed/mount/page_writer/upload_pipeline_lock.go:58 +0x32 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).FlushAll(0xc0002ee4d0) /github/workspace/weed/mount/page_writer/upload_pipeline.go:151 +0x25 github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).FlushData(0xc00087e840) /github/workspace/weed/mount/dirty_pages_chunked.go:54 +0x29 github.com/seaweedfs/seaweedfs/weed/mount.(*PageWriter).FlushData(...) /github/workspace/weed/mount/page_writer.go:50 github.com/seaweedfs/seaweedfs/weed/mount.(*WFS).doFlush(0xc0006ad600, 0xc00030d380, 0x0, 0x0) /github/workspace/weed/mount/weedfs_file_sync.go:101 +0x169 github.com/seaweedfs/seaweedfs/weed/mount.(*WFS).Flush(0xc0006ad600, 0xc001a594a8?, 0xc0004c1ca0) /github/workspace/weed/mount/weedfs_file_sync.go:59 +0x48 github.com/hanwen/go-fuse/v2/fuse.doFlush(0xc0000da870?, 0xc0004c1b08) SaveContent -> MemChunk.RLock -> ChunkedDirtyPages.saveChunkedFileIntervalToStorage pages.fh.AddChunks([]*filer_pb.FileChunk{chunk}) fh.entryLock.Lock() sync.(*RWMutex).Lock(0x0?) /usr/local/go/src/sync/rwmutex.go:146 +0x31 github.com/seaweedfs/seaweedfs/weed/mount.(*FileHandle).AddChunks(0xc00030d380, {0xc00028bdc8, 0x1, 0x1}) /github/workspace/weed/mount/filehandle.go:93 +0x45 github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).saveChunkedFileIntervalToStorage(0xc00087e840, {0x2be7ac0, 0xc00018d9e0}, 0x0, 0x121, 0x17e3c624565ace45, 0x1?) /github/workspace/weed/mount/dirty_pages_chunked.go:80 +0x2d4 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*MemChunk).SaveContent(0xc0008d9130, 0xc0008093e0) /github/workspace/weed/mount/page_writer/page_chunk_mem.go:115 +0x112 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).moveToSealed.func1() /github/workspace/weed/mount/page_writer/upload_pipeline.go:187 +0x55 github.com/seaweedfs/seaweedfs/weed/util.(*LimitedConcurrentExecutor).Execute.func1() /github/workspace/weed/util/limited_executor.go:38 +0x62 created by github.com/seaweedfs/seaweedfs/weed/util.(*LimitedConcurrentExecutor).Execute in goroutine 1 /github/workspace/weed/util/limited_executor.go:33 +0x97 On metadata update fh.entryLock.Lock() fh.dirtyPages.Destroy() up.chunksLock.Lock => each sealed chunk.FreeReference => MemChunk.Lock goroutine 134 [sync.RWMutex.Lock, 71 minutes]: sync.runtime_SemacquireRWMutex(0xc0007c3558?, 0xea?, 0x3fb0800?) /usr/local/go/src/runtime/sema.go:87 +0x25 sync.(*RWMutex).Lock(0xc0007c35a8?) /usr/local/go/src/sync/rwmutex.go:151 +0x6a github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*MemChunk).FreeResource(0xc0008d9130) /github/workspace/weed/mount/page_writer/page_chunk_mem.go:38 +0x2a github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*SealedChunk).FreeReference(0xc00071cdb0, {0xc0006ba1a0, 0x20}) /github/workspace/weed/mount/page_writer/upload_pipeline.go:38 +0xb7 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).Shutdown(0xc0002ee4d0) /github/workspace/weed/mount/page_writer/upload_pipeline.go:220 +0x185 github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).Destroy(0xc0008cea40?) /github/workspace/weed/mount/dirty_pages_chunked.go:87 +0x17 github.com/seaweedfs/seaweedfs/weed/mount.(*PageWriter).Destroy(...) /github/workspace/weed/mount/page_writer.go:78 github.com/seaweedfs/seaweedfs/weed/mount.NewSeaweedFileSystem.func3({0xc00069a6c0, 0x30}, 0x6?) /github/workspace/weed/mount/weedfs.go:119 +0x17a github.com/seaweedfs/seaweedfs/weed/mount/meta_cache.NewMetaCache.func1({0xc00069a6c0?, 0xc00069a480?}, 0x4015b40?) /github/workspace/weed/mount/meta_cache/meta_cache.go:37 +0x1c github.com/seaweedfs/seaweedfs/weed/mount/meta_cache.SubscribeMetaEvents.func1(0xc000661810) /github/workspace/weed/mount/meta_cache/meta_cache_subscribe.go:43 +0x570 * use locked entry everywhere * modifiable remote entry * skip locking after getting lock from fhLockTable
5 months ago
3 years ago
  1. package mount
  2. import (
  3. "context"
  4. "github.com/hanwen/go-fuse/v2/fuse"
  5. "github.com/seaweedfs/seaweedfs/weed/filer"
  6. "github.com/seaweedfs/seaweedfs/weed/glog"
  7. "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache"
  8. "math"
  9. "sync"
  10. )
  11. type DirectoryHandleId uint64
  12. const (
  13. directoryStreamBaseOffset = 2 // . & ..
  14. )
  15. type DirectoryHandle struct {
  16. isFinished bool
  17. entryStream []*filer.Entry
  18. entryStreamOffset uint64
  19. }
  20. func (dh *DirectoryHandle) reset() {
  21. *dh = DirectoryHandle{
  22. isFinished: false,
  23. entryStream: []*filer.Entry{},
  24. entryStreamOffset: directoryStreamBaseOffset,
  25. }
  26. }
  27. type DirectoryHandleToInode struct {
  28. // shares the file handle id sequencer with FileHandleToInode{nextFh}
  29. sync.Mutex
  30. dir2inode map[DirectoryHandleId]*DirectoryHandle
  31. }
  32. func NewDirectoryHandleToInode() *DirectoryHandleToInode {
  33. return &DirectoryHandleToInode{
  34. dir2inode: make(map[DirectoryHandleId]*DirectoryHandle),
  35. }
  36. }
  37. func (wfs *WFS) AcquireDirectoryHandle() (DirectoryHandleId, *DirectoryHandle) {
  38. wfs.fhmap.Lock()
  39. fh := wfs.fhmap.nextFh
  40. wfs.fhmap.nextFh++
  41. wfs.fhmap.Unlock()
  42. wfs.dhmap.Lock()
  43. defer wfs.dhmap.Unlock()
  44. dh := new(DirectoryHandle)
  45. dh.reset()
  46. wfs.dhmap.dir2inode[DirectoryHandleId(fh)] = dh
  47. return DirectoryHandleId(fh), dh
  48. }
  49. func (wfs *WFS) GetDirectoryHandle(dhid DirectoryHandleId) *DirectoryHandle {
  50. wfs.dhmap.Lock()
  51. defer wfs.dhmap.Unlock()
  52. if dh, found := wfs.dhmap.dir2inode[dhid]; found {
  53. return dh
  54. }
  55. dh := new(DirectoryHandle)
  56. dh.reset()
  57. wfs.dhmap.dir2inode[dhid] = dh
  58. return dh
  59. }
  60. func (wfs *WFS) ReleaseDirectoryHandle(dhid DirectoryHandleId) {
  61. wfs.dhmap.Lock()
  62. defer wfs.dhmap.Unlock()
  63. delete(wfs.dhmap.dir2inode, dhid)
  64. }
  65. // Directory handling
  66. /** Open directory
  67. *
  68. * Unless the 'default_permissions' mount option is given,
  69. * this method should check if opendir is permitted for this
  70. * directory. Optionally opendir may also return an arbitrary
  71. * filehandle in the fuse_file_info structure, which will be
  72. * passed to readdir, releasedir and fsyncdir.
  73. */
  74. func (wfs *WFS) OpenDir(cancel <-chan struct{}, input *fuse.OpenIn, out *fuse.OpenOut) (code fuse.Status) {
  75. if !wfs.inodeToPath.HasInode(input.NodeId) {
  76. return fuse.ENOENT
  77. }
  78. dhid, _ := wfs.AcquireDirectoryHandle()
  79. out.Fh = uint64(dhid)
  80. return fuse.OK
  81. }
  82. /** Release directory
  83. *
  84. * If the directory has been removed after the call to opendir, the
  85. * path parameter will be NULL.
  86. */
  87. func (wfs *WFS) ReleaseDir(input *fuse.ReleaseIn) {
  88. wfs.ReleaseDirectoryHandle(DirectoryHandleId(input.Fh))
  89. }
  90. /** Synchronize directory contents
  91. *
  92. * If the directory has been removed after the call to opendir, the
  93. * path parameter will be NULL.
  94. *
  95. * If the datasync parameter is non-zero, then only the user data
  96. * should be flushed, not the meta data
  97. */
  98. func (wfs *WFS) FsyncDir(cancel <-chan struct{}, input *fuse.FsyncIn) (code fuse.Status) {
  99. return fuse.OK
  100. }
  101. /** Read directory
  102. *
  103. * The filesystem may choose between two modes of operation:
  104. *
  105. * 1) The readdir implementation ignores the offset parameter, and
  106. * passes zero to the filler function's offset. The filler
  107. * function will not return '1' (unless an error happens), so the
  108. * whole directory is read in a single readdir operation.
  109. *
  110. * 2) The readdir implementation keeps track of the offsets of the
  111. * directory entries. It uses the offset parameter and always
  112. * passes non-zero offset to the filler function. When the buffer
  113. * is full (or an error happens) the filler function will return
  114. * '1'.
  115. */
  116. func (wfs *WFS) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) (code fuse.Status) {
  117. return wfs.doReadDirectory(input, out, false)
  118. }
  119. func (wfs *WFS) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) (code fuse.Status) {
  120. return wfs.doReadDirectory(input, out, true)
  121. }
  122. func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPlusMode bool) fuse.Status {
  123. dh := wfs.GetDirectoryHandle(DirectoryHandleId(input.Fh))
  124. if input.Offset == 0 {
  125. dh.reset()
  126. } else if dh.isFinished && input.Offset >= dh.entryStreamOffset {
  127. entryCurrentIndex := input.Offset - dh.entryStreamOffset
  128. if uint64(len(dh.entryStream)) <= entryCurrentIndex {
  129. return fuse.OK
  130. }
  131. }
  132. isEarlyTerminated := false
  133. dirPath, code := wfs.inodeToPath.GetPath(input.NodeId)
  134. if code != fuse.OK {
  135. return code
  136. }
  137. var dirEntry fuse.DirEntry
  138. processEachEntryFn := func(entry *filer.Entry) bool {
  139. dirEntry.Name = entry.Name()
  140. dirEntry.Mode = toSyscallMode(entry.Mode)
  141. inode := wfs.inodeToPath.Lookup(dirPath.Child(dirEntry.Name), entry.Crtime.Unix(), entry.IsDirectory(), len(entry.HardLinkId) > 0, entry.Inode, isPlusMode)
  142. dirEntry.Ino = inode
  143. if !isPlusMode {
  144. if !out.AddDirEntry(dirEntry) {
  145. isEarlyTerminated = true
  146. return false
  147. }
  148. } else {
  149. entryOut := out.AddDirLookupEntry(dirEntry)
  150. if entryOut == nil {
  151. isEarlyTerminated = true
  152. return false
  153. }
  154. if fh, found := wfs.fhmap.FindFileHandle(inode); found {
  155. glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name))
  156. entry = filer.FromPbEntry(string(dirPath), fh.GetEntry().GetEntry())
  157. }
  158. wfs.outputFilerEntry(entryOut, inode, entry)
  159. }
  160. return true
  161. }
  162. if input.Offset < directoryStreamBaseOffset {
  163. if !isPlusMode {
  164. if input.Offset == 0 {
  165. out.AddDirEntry(fuse.DirEntry{Mode: fuse.S_IFDIR, Name: "."})
  166. }
  167. out.AddDirEntry(fuse.DirEntry{Mode: fuse.S_IFDIR, Name: ".."})
  168. } else {
  169. if input.Offset == 0 {
  170. out.AddDirLookupEntry(fuse.DirEntry{Mode: fuse.S_IFDIR, Name: "."})
  171. }
  172. out.AddDirLookupEntry(fuse.DirEntry{Mode: fuse.S_IFDIR, Name: ".."})
  173. }
  174. input.Offset = directoryStreamBaseOffset
  175. }
  176. var lastEntryName string
  177. if input.Offset >= dh.entryStreamOffset {
  178. if input.Offset > dh.entryStreamOffset {
  179. entryPreviousIndex := (input.Offset - dh.entryStreamOffset) - 1
  180. if uint64(len(dh.entryStream)) > entryPreviousIndex {
  181. lastEntryName = dh.entryStream[entryPreviousIndex].Name()
  182. dh.entryStream = dh.entryStream[entryPreviousIndex:]
  183. dh.entryStreamOffset = input.Offset - 1
  184. }
  185. }
  186. entryCurrentIndex := input.Offset - dh.entryStreamOffset
  187. for uint64(len(dh.entryStream)) > entryCurrentIndex {
  188. entry := dh.entryStream[entryCurrentIndex]
  189. if processEachEntryFn(entry) {
  190. lastEntryName = entry.Name()
  191. entryCurrentIndex++
  192. } else {
  193. // early terminated
  194. return fuse.OK
  195. }
  196. }
  197. }
  198. var err error
  199. if err = meta_cache.EnsureVisited(wfs.metaCache, wfs, dirPath); err != nil {
  200. glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
  201. return fuse.EIO
  202. }
  203. listErr := wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, lastEntryName, false, int64(math.MaxInt32), func(entry *filer.Entry) bool {
  204. dh.entryStream = append(dh.entryStream, entry)
  205. return processEachEntryFn(entry)
  206. })
  207. if listErr != nil {
  208. glog.Errorf("list meta cache: %v", listErr)
  209. return fuse.EIO
  210. }
  211. if !isEarlyTerminated {
  212. dh.isFinished = true
  213. }
  214. return fuse.OK
  215. }