You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

406 lines
10 KiB

7 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
7 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
6 years ago
6 years ago
4 years ago
6 years ago
6 years ago
6 years ago
4 years ago
4 years ago
  1. package filesys
  2. import (
  3. "context"
  4. "os"
  5. "sort"
  6. "time"
  7. "github.com/seaweedfs/fuse"
  8. "github.com/seaweedfs/fuse/fs"
  9. "github.com/chrislusf/seaweedfs/weed/filer"
  10. "github.com/chrislusf/seaweedfs/weed/glog"
  11. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  12. "github.com/chrislusf/seaweedfs/weed/util"
  13. )
  14. const blockSize = 512
  15. var _ = fs.Node(&File{})
  16. var _ = fs.NodeIdentifier(&File{})
  17. var _ = fs.NodeOpener(&File{})
  18. var _ = fs.NodeFsyncer(&File{})
  19. var _ = fs.NodeSetattrer(&File{})
  20. var _ = fs.NodeGetxattrer(&File{})
  21. var _ = fs.NodeSetxattrer(&File{})
  22. var _ = fs.NodeRemovexattrer(&File{})
  23. var _ = fs.NodeListxattrer(&File{})
  24. var _ = fs.NodeForgetter(&File{})
  25. type File struct {
  26. Name string
  27. dir *Dir
  28. wfs *WFS
  29. entry *filer_pb.Entry
  30. isOpen int
  31. dirtyMetadata bool
  32. id uint64
  33. }
  34. func (file *File) fullpath() util.FullPath {
  35. return util.NewFullPath(file.dir.FullPath(), file.Name)
  36. }
  37. func (file *File) Id() uint64 {
  38. return file.id
  39. }
  40. func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
  41. glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr)
  42. entry, err := file.maybeLoadEntry(ctx)
  43. if err != nil {
  44. return err
  45. }
  46. if entry == nil {
  47. return fuse.ENOENT
  48. }
  49. attr.Inode = file.Id()
  50. attr.Valid = time.Second
  51. attr.Mode = os.FileMode(entry.Attributes.FileMode)
  52. attr.Size = filer.FileSize(entry)
  53. if file.isOpen > 0 {
  54. attr.Size = entry.Attributes.FileSize
  55. glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
  56. }
  57. attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
  58. attr.Ctime = time.Unix(entry.Attributes.Mtime, 0)
  59. attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
  60. attr.Gid = entry.Attributes.Gid
  61. attr.Uid = entry.Attributes.Uid
  62. attr.Blocks = attr.Size/blockSize + 1
  63. attr.BlockSize = uint32(file.wfs.option.ChunkSizeLimit)
  64. if entry.HardLinkCounter > 0 {
  65. attr.Nlink = uint32(entry.HardLinkCounter)
  66. }
  67. return nil
  68. }
  69. func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
  70. // glog.V(4).Infof("file Getxattr %s", file.fullpath())
  71. entry, err := file.maybeLoadEntry(ctx)
  72. if err != nil {
  73. return err
  74. }
  75. return getxattr(entry, req, resp)
  76. }
  77. func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
  78. glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
  79. // resp.Flags |= fuse.OpenDirectIO
  80. handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
  81. resp.Handle = fuse.HandleID(handle.handle)
  82. glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
  83. return handle, nil
  84. }
  85. func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
  86. glog.V(4).Infof("%v file setattr %+v mode=%d", file.fullpath(), req, req.Mode)
  87. entry, err := file.maybeLoadEntry(ctx)
  88. if err != nil {
  89. return err
  90. }
  91. if req.Valid.Size() {
  92. glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(entry.Chunks))
  93. if req.Size < filer.FileSize(entry) {
  94. // fmt.Printf("truncate %v \n", fullPath)
  95. var chunks []*filer_pb.FileChunk
  96. var truncatedChunks []*filer_pb.FileChunk
  97. for _, chunk := range entry.Chunks {
  98. int64Size := int64(chunk.Size)
  99. if chunk.Offset+int64Size > int64(req.Size) {
  100. // this chunk is truncated
  101. int64Size = int64(req.Size) - chunk.Offset
  102. if int64Size > 0 {
  103. chunks = append(chunks, chunk)
  104. glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
  105. chunk.Size = uint64(int64Size)
  106. } else {
  107. glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString())
  108. truncatedChunks = append(truncatedChunks, chunk)
  109. }
  110. }
  111. }
  112. // set the new chunks and reset entry cache
  113. entry.Chunks = chunks
  114. file.wfs.handlesLock.Lock()
  115. existingHandle, found := file.wfs.handles[file.Id()]
  116. file.wfs.handlesLock.Unlock()
  117. if found {
  118. existingHandle.entryViewCache = nil
  119. }
  120. }
  121. entry.Attributes.Mtime = time.Now().Unix()
  122. entry.Attributes.FileSize = req.Size
  123. file.dirtyMetadata = true
  124. }
  125. if req.Valid.Mode() && entry.Attributes.FileMode != uint32(req.Mode) {
  126. entry.Attributes.FileMode = uint32(req.Mode)
  127. entry.Attributes.Mtime = time.Now().Unix()
  128. file.dirtyMetadata = true
  129. }
  130. if req.Valid.Uid() && entry.Attributes.Uid != req.Uid {
  131. entry.Attributes.Uid = req.Uid
  132. entry.Attributes.Mtime = time.Now().Unix()
  133. file.dirtyMetadata = true
  134. }
  135. if req.Valid.Gid() && entry.Attributes.Gid != req.Gid {
  136. entry.Attributes.Gid = req.Gid
  137. entry.Attributes.Mtime = time.Now().Unix()
  138. file.dirtyMetadata = true
  139. }
  140. if req.Valid.Crtime() {
  141. entry.Attributes.Crtime = req.Crtime.Unix()
  142. entry.Attributes.Mtime = time.Now().Unix()
  143. file.dirtyMetadata = true
  144. }
  145. if req.Valid.Mtime() && entry.Attributes.Mtime != req.Mtime.Unix() {
  146. entry.Attributes.Mtime = req.Mtime.Unix()
  147. file.dirtyMetadata = true
  148. }
  149. if req.Valid.Handle() {
  150. // fmt.Printf("file handle => %d\n", req.Handle)
  151. }
  152. if file.isOpen > 0 {
  153. return nil
  154. }
  155. if !file.dirtyMetadata {
  156. return nil
  157. }
  158. return file.saveEntry(entry)
  159. }
  160. func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
  161. glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
  162. entry, err := file.maybeLoadEntry(ctx)
  163. if err != nil {
  164. return err
  165. }
  166. if err := setxattr(entry, req); err != nil {
  167. return err
  168. }
  169. file.dirtyMetadata = true
  170. if file.isOpen > 0 {
  171. return nil
  172. }
  173. return file.saveEntry(entry)
  174. }
  175. func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
  176. glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
  177. entry, err := file.maybeLoadEntry(ctx)
  178. if err != nil {
  179. return err
  180. }
  181. if err := removexattr(entry, req); err != nil {
  182. return err
  183. }
  184. file.dirtyMetadata = true
  185. if file.isOpen > 0 {
  186. return nil
  187. }
  188. return file.saveEntry(entry)
  189. }
  190. func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
  191. glog.V(4).Infof("file Listxattr %s", file.fullpath())
  192. entry, err := file.maybeLoadEntry(ctx)
  193. if err != nil {
  194. return err
  195. }
  196. if err := listxattr(entry, req, resp); err != nil {
  197. return err
  198. }
  199. return nil
  200. }
  201. func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
  202. // write the file chunks to the filerGrpcAddress
  203. glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
  204. return file.wfs.Fsync(file, req.Header)
  205. }
  206. func (file *File) Forget() {
  207. t := util.NewFullPath(file.dir.FullPath(), file.Name)
  208. glog.V(4).Infof("Forget file %s", t)
  209. file.wfs.ReleaseHandle(t, fuse.HandleID(t.AsInode(file.entry.FileMode())))
  210. }
  211. func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) {
  212. file.wfs.handlesLock.Lock()
  213. handle, found := file.wfs.handles[file.Id()]
  214. file.wfs.handlesLock.Unlock()
  215. entry = file.entry
  216. if found {
  217. // glog.V(4).Infof("maybeLoadEntry found opened file %s/%s", file.dir.FullPath(), file.Name)
  218. entry = handle.f.entry
  219. }
  220. if entry != nil {
  221. if len(entry.HardLinkId) == 0 {
  222. // only always reload hard link
  223. return entry, nil
  224. }
  225. }
  226. entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
  227. if err != nil {
  228. glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
  229. return entry, err
  230. }
  231. if entry != nil {
  232. // file.entry = entry
  233. } else {
  234. glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
  235. }
  236. return entry, nil
  237. }
  238. func lessThan(a, b *filer_pb.FileChunk) bool {
  239. if a.Mtime == b.Mtime {
  240. return a.Fid.FileKey < b.Fid.FileKey
  241. }
  242. return a.Mtime < b.Mtime
  243. }
  244. func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
  245. // find the earliest incoming chunk
  246. newChunks := chunks
  247. earliestChunk := newChunks[0]
  248. for i := 1; i < len(newChunks); i++ {
  249. if lessThan(earliestChunk, newChunks[i]) {
  250. earliestChunk = newChunks[i]
  251. }
  252. }
  253. entry := file.getEntry()
  254. if entry == nil {
  255. return
  256. }
  257. // pick out-of-order chunks from existing chunks
  258. for _, chunk := range entry.Chunks {
  259. if lessThan(earliestChunk, chunk) {
  260. chunks = append(chunks, chunk)
  261. }
  262. }
  263. // sort incoming chunks
  264. sort.Slice(chunks, func(i, j int) bool {
  265. return lessThan(chunks[i], chunks[j])
  266. })
  267. glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(entry.Chunks), len(chunks))
  268. entry.Chunks = append(entry.Chunks, newChunks...)
  269. }
  270. func (file *File) saveEntry(entry *filer_pb.Entry) error {
  271. return file.wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  272. file.wfs.mapPbIdFromLocalToFiler(entry)
  273. defer file.wfs.mapPbIdFromFilerToLocal(entry)
  274. request := &filer_pb.CreateEntryRequest{
  275. Directory: file.dir.FullPath(),
  276. Entry: entry,
  277. Signatures: []int32{file.wfs.signature},
  278. }
  279. glog.V(4).Infof("save file entry: %v", request)
  280. _, err := client.CreateEntry(context.Background(), request)
  281. if err != nil {
  282. glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
  283. return fuse.EIO
  284. }
  285. file.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
  286. file.dirtyMetadata = false
  287. return nil
  288. })
  289. }
  290. func (file *File) getEntry() *filer_pb.Entry {
  291. return file.entry
  292. }
  293. func (file *File) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) {
  294. err := file.wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  295. request := &filer_pb.CacheRemoteObjectToLocalClusterRequest{
  296. Directory: file.dir.FullPath(),
  297. Name: entry.Name,
  298. }
  299. glog.V(4).Infof("download entry: %v", request)
  300. resp, err := client.CacheRemoteObjectToLocalCluster(context.Background(), request)
  301. if err != nil {
  302. glog.Errorf("CacheRemoteObjectToLocalCluster file %s/%s: %v", file.dir.FullPath(), file.Name, err)
  303. return fuse.EIO
  304. }
  305. entry = resp.Entry
  306. file.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, resp.Entry))
  307. file.dirtyMetadata = false
  308. return nil
  309. })
  310. return entry, err
  311. }