You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

268 lines
6.9 KiB

5 years ago
5 years ago
7 years ago
6 years ago
6 years ago
5 years ago
6 years ago
5 years ago
5 years ago
5 years ago
  1. package filesys
  2. import (
  3. "context"
  4. "fmt"
  5. "math"
  6. "os"
  7. "strings"
  8. "sync"
  9. "time"
  10. "github.com/karlseguin/ccache"
  11. "google.golang.org/grpc"
  12. "github.com/chrislusf/seaweedfs/weed/filer2"
  13. "github.com/chrislusf/seaweedfs/weed/glog"
  14. "github.com/chrislusf/seaweedfs/weed/pb"
  15. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  16. "github.com/seaweedfs/fuse"
  17. "github.com/seaweedfs/fuse/fs"
  18. )
  19. type Option struct {
  20. FilerGrpcAddress string
  21. GrpcDialOption grpc.DialOption
  22. FilerMountRootPath string
  23. Collection string
  24. Replication string
  25. TtlSec int32
  26. ChunkSizeLimit int64
  27. DataCenter string
  28. DirListCacheLimit int64
  29. EntryCacheTtl time.Duration
  30. Umask os.FileMode
  31. MountUid uint32
  32. MountGid uint32
  33. MountMode os.FileMode
  34. MountCtime time.Time
  35. MountMtime time.Time
  36. OutsideContainerClusterMode bool // whether the mount runs outside SeaweedFS containers
  37. Cipher bool // whether encrypt data on volume server
  38. }
  39. var _ = fs.FS(&WFS{})
  40. var _ = fs.FSStatfser(&WFS{})
  41. type WFS struct {
  42. option *Option
  43. listDirectoryEntriesCache *ccache.Cache
  44. // contains all open handles, protected by handlesLock
  45. handlesLock sync.Mutex
  46. handles []*FileHandle
  47. pathToHandleIndex map[filer2.FullPath]int
  48. bufPool sync.Pool
  49. stats statsCache
  50. // nodes, protected by nodesLock
  51. nodesLock sync.Mutex
  52. nodes map[uint64]fs.Node
  53. root fs.Node
  54. }
  55. type statsCache struct {
  56. filer_pb.StatisticsResponse
  57. lastChecked int64 // unix time in seconds
  58. }
  59. func NewSeaweedFileSystem(option *Option) *WFS {
  60. wfs := &WFS{
  61. option: option,
  62. listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)),
  63. pathToHandleIndex: make(map[filer2.FullPath]int),
  64. bufPool: sync.Pool{
  65. New: func() interface{} {
  66. return make([]byte, option.ChunkSizeLimit)
  67. },
  68. },
  69. nodes: make(map[uint64]fs.Node),
  70. }
  71. wfs.root = &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}
  72. return wfs
  73. }
  74. func (wfs *WFS) Root() (fs.Node, error) {
  75. return wfs.root, nil
  76. }
  77. func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
  78. err := pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
  79. client := filer_pb.NewSeaweedFilerClient(grpcConnection)
  80. return fn(client)
  81. }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
  82. if err == nil {
  83. return nil
  84. }
  85. return err
  86. }
  87. func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
  88. fullpath := file.fullpath()
  89. glog.V(4).Infof("%s AcquireHandle uid=%d gid=%d", fullpath, uid, gid)
  90. wfs.handlesLock.Lock()
  91. defer wfs.handlesLock.Unlock()
  92. index, found := wfs.pathToHandleIndex[fullpath]
  93. if found && wfs.handles[index] != nil {
  94. glog.V(2).Infoln(fullpath, "found fileHandle id", index)
  95. return wfs.handles[index]
  96. }
  97. fileHandle = newFileHandle(file, uid, gid)
  98. for i, h := range wfs.handles {
  99. if h == nil {
  100. wfs.handles[i] = fileHandle
  101. fileHandle.handle = uint64(i)
  102. wfs.pathToHandleIndex[fullpath] = i
  103. glog.V(4).Infof("%s reuse fh %d", fullpath, fileHandle.handle)
  104. return
  105. }
  106. }
  107. wfs.handles = append(wfs.handles, fileHandle)
  108. fileHandle.handle = uint64(len(wfs.handles) - 1)
  109. wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle)
  110. glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle)
  111. return
  112. }
  113. func (wfs *WFS) ReleaseHandle(fullpath filer2.FullPath, handleId fuse.HandleID) {
  114. wfs.handlesLock.Lock()
  115. defer wfs.handlesLock.Unlock()
  116. glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
  117. delete(wfs.pathToHandleIndex, fullpath)
  118. if int(handleId) < len(wfs.handles) {
  119. wfs.handles[int(handleId)] = nil
  120. }
  121. return
  122. }
  123. // Statfs is called to obtain file system metadata. Implements fuse.FSStatfser
  124. func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {
  125. glog.V(4).Infof("reading fs stats: %+v", req)
  126. if wfs.stats.lastChecked < time.Now().Unix()-20 {
  127. err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  128. request := &filer_pb.StatisticsRequest{
  129. Collection: wfs.option.Collection,
  130. Replication: wfs.option.Replication,
  131. Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec),
  132. }
  133. glog.V(4).Infof("reading filer stats: %+v", request)
  134. resp, err := client.Statistics(context.Background(), request)
  135. if err != nil {
  136. glog.V(0).Infof("reading filer stats %v: %v", request, err)
  137. return err
  138. }
  139. glog.V(4).Infof("read filer stats: %+v", resp)
  140. wfs.stats.TotalSize = resp.TotalSize
  141. wfs.stats.UsedSize = resp.UsedSize
  142. wfs.stats.FileCount = resp.FileCount
  143. wfs.stats.lastChecked = time.Now().Unix()
  144. return nil
  145. })
  146. if err != nil {
  147. glog.V(0).Infof("filer Statistics: %v", err)
  148. return err
  149. }
  150. }
  151. totalDiskSize := wfs.stats.TotalSize
  152. usedDiskSize := wfs.stats.UsedSize
  153. actualFileCount := wfs.stats.FileCount
  154. // Compute the total number of available blocks
  155. resp.Blocks = totalDiskSize / blockSize
  156. // Compute the number of used blocks
  157. numBlocks := uint64(usedDiskSize / blockSize)
  158. // Report the number of free and available blocks for the block size
  159. resp.Bfree = resp.Blocks - numBlocks
  160. resp.Bavail = resp.Blocks - numBlocks
  161. resp.Bsize = uint32(blockSize)
  162. // Report the total number of possible files in the file system (and those free)
  163. resp.Files = math.MaxInt64
  164. resp.Ffree = math.MaxInt64 - actualFileCount
  165. // Report the maximum length of a name and the minimum fragment size
  166. resp.Namelen = 1024
  167. resp.Frsize = uint32(blockSize)
  168. return nil
  169. }
  170. func (wfs *WFS) cacheGet(path filer2.FullPath) *filer_pb.Entry {
  171. item := wfs.listDirectoryEntriesCache.Get(string(path))
  172. if item != nil && !item.Expired() {
  173. return item.Value().(*filer_pb.Entry)
  174. }
  175. return nil
  176. }
  177. func (wfs *WFS) cacheSet(path filer2.FullPath, entry *filer_pb.Entry, ttl time.Duration) {
  178. if entry == nil {
  179. wfs.listDirectoryEntriesCache.Delete(string(path))
  180. } else {
  181. wfs.listDirectoryEntriesCache.Set(string(path), entry, ttl)
  182. }
  183. }
  184. func (wfs *WFS) cacheDelete(path filer2.FullPath) {
  185. wfs.listDirectoryEntriesCache.Delete(string(path))
  186. }
  187. func (wfs *WFS) getNode(fullpath filer2.FullPath, fn func() fs.Node) fs.Node {
  188. wfs.nodesLock.Lock()
  189. defer wfs.nodesLock.Unlock()
  190. node, found := wfs.nodes[fullpath.AsInode()]
  191. if found {
  192. return node
  193. }
  194. node = fn()
  195. if node != nil {
  196. wfs.nodes[fullpath.AsInode()] = node
  197. }
  198. return node
  199. }
  200. func (wfs *WFS) forgetNode(fullpath filer2.FullPath) {
  201. wfs.nodesLock.Lock()
  202. defer wfs.nodesLock.Unlock()
  203. delete(wfs.nodes, fullpath.AsInode())
  204. }
  205. func (wfs *WFS) AdjustedUrl(hostAndPort string) string {
  206. if !wfs.option.OutsideContainerClusterMode {
  207. return hostAndPort
  208. }
  209. commaIndex := strings.Index(hostAndPort, ":")
  210. if commaIndex < 0 {
  211. return hostAndPort
  212. }
  213. filerCommaIndex := strings.Index(wfs.option.FilerGrpcAddress, ":")
  214. return fmt.Sprintf("%s:%s", wfs.option.FilerGrpcAddress[:filerCommaIndex], hostAndPort[commaIndex+1:])
  215. }