You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

290 lines
8.3 KiB

7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
5 years ago
7 years ago
4 years ago
4 years ago
7 years ago
5 years ago
5 years ago
7 years ago
5 years ago
7 years ago
7 years ago
5 years ago
5 years ago
7 years ago
7 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
7 years ago
5 years ago
7 years ago
5 years ago
6 years ago
5 years ago
6 years ago
5 years ago
7 years ago
6 years ago
6 years ago
7 years ago
5 years ago
7 years ago
7 years ago
5 years ago
  1. package filer
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "os"
  7. "strings"
  8. "time"
  9. "google.golang.org/grpc"
  10. "github.com/chrislusf/seaweedfs/weed/glog"
  11. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  12. "github.com/chrislusf/seaweedfs/weed/util"
  13. "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
  14. "github.com/chrislusf/seaweedfs/weed/wdclient"
  15. )
  16. const (
  17. LogFlushInterval = time.Minute
  18. PaginationSize = 1024 * 256
  19. )
  20. var (
  21. OS_UID = uint32(os.Getuid())
  22. OS_GID = uint32(os.Getgid())
  23. ErrUnsupportedListDirectoryPrefixed = errors.New("UNSUPPORTED")
  24. )
  25. type Filer struct {
  26. Store *FilerStoreWrapper
  27. MasterClient *wdclient.MasterClient
  28. fileIdDeletionQueue *util.UnboundedQueue
  29. GrpcDialOption grpc.DialOption
  30. DirBucketsPath string
  31. FsyncBuckets []string
  32. buckets *FilerBuckets
  33. Cipher bool
  34. LocalMetaLogBuffer *log_buffer.LogBuffer
  35. metaLogCollection string
  36. metaLogReplication string
  37. MetaAggregator *MetaAggregator
  38. Signature int32
  39. }
  40. func NewFiler(masters []string, grpcDialOption grpc.DialOption,
  41. filerHost string, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
  42. f := &Filer{
  43. MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, masters),
  44. fileIdDeletionQueue: util.NewUnboundedQueue(),
  45. GrpcDialOption: grpcDialOption,
  46. Signature: util.RandomInt32(),
  47. }
  48. f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn)
  49. f.metaLogCollection = collection
  50. f.metaLogReplication = replication
  51. go f.loopProcessingDeletion()
  52. return f
  53. }
  54. func (f *Filer) AggregateFromPeers(self string, filers []string) {
  55. // set peers
  56. if len(filers) == 0 {
  57. filers = append(filers, self)
  58. }
  59. f.MetaAggregator = NewMetaAggregator(filers, f.GrpcDialOption)
  60. f.MetaAggregator.StartLoopSubscribe(f, self)
  61. }
  62. func (f *Filer) SetStore(store FilerStore) {
  63. f.Store = NewFilerStoreWrapper(store)
  64. }
  65. func (f *Filer) GetStore() (store FilerStore) {
  66. return f.Store
  67. }
  68. func (fs *Filer) GetMaster() string {
  69. return fs.MasterClient.GetMaster()
  70. }
  71. func (fs *Filer) KeepConnectedToMaster() {
  72. fs.MasterClient.KeepConnectedToMaster()
  73. }
  74. func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
  75. return f.Store.BeginTransaction(ctx)
  76. }
  77. func (f *Filer) CommitTransaction(ctx context.Context) error {
  78. return f.Store.CommitTransaction(ctx)
  79. }
  80. func (f *Filer) RollbackTransaction(ctx context.Context) error {
  81. return f.Store.RollbackTransaction(ctx)
  82. }
  83. func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool, signatures []int32) error {
  84. if string(entry.FullPath) == "/" {
  85. return nil
  86. }
  87. dirParts := strings.Split(string(entry.FullPath), "/")
  88. // fmt.Printf("directory parts: %+v\n", dirParts)
  89. var lastDirectoryEntry *Entry
  90. for i := 1; i < len(dirParts); i++ {
  91. dirPath := "/" + util.Join(dirParts[:i]...)
  92. // fmt.Printf("%d directory: %+v\n", i, dirPath)
  93. // check the store directly
  94. glog.V(4).Infof("find uncached directory: %s", dirPath)
  95. dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
  96. // no such existing directory
  97. if dirEntry == nil {
  98. // create the directory
  99. now := time.Now()
  100. dirEntry = &Entry{
  101. FullPath: util.FullPath(dirPath),
  102. Attr: Attr{
  103. Mtime: now,
  104. Crtime: now,
  105. Mode: os.ModeDir | entry.Mode | 0110,
  106. Uid: entry.Uid,
  107. Gid: entry.Gid,
  108. Collection: entry.Collection,
  109. Replication: entry.Replication,
  110. UserName: entry.UserName,
  111. GroupNames: entry.GroupNames,
  112. },
  113. }
  114. glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
  115. mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
  116. if mkdirErr != nil {
  117. if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
  118. glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
  119. return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
  120. }
  121. } else {
  122. f.maybeAddBucket(dirEntry)
  123. f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
  124. }
  125. } else if !dirEntry.IsDirectory() {
  126. glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
  127. return fmt.Errorf("%s is a file", dirPath)
  128. }
  129. // remember the direct parent directory entry
  130. if i == len(dirParts)-1 {
  131. lastDirectoryEntry = dirEntry
  132. }
  133. }
  134. if lastDirectoryEntry == nil {
  135. glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
  136. return fmt.Errorf("parent folder not found: %v", entry.FullPath)
  137. }
  138. /*
  139. if !hasWritePermission(lastDirectoryEntry, entry) {
  140. glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
  141. lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
  142. return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
  143. }
  144. */
  145. oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
  146. glog.V(4).Infof("CreateEntry %s: old entry: %v exclusive:%v", entry.FullPath, oldEntry, o_excl)
  147. if oldEntry == nil {
  148. if err := f.Store.InsertEntry(ctx, entry); err != nil {
  149. glog.Errorf("insert entry %s: %v", entry.FullPath, err)
  150. return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
  151. }
  152. } else {
  153. if o_excl {
  154. glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
  155. return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
  156. }
  157. if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
  158. glog.Errorf("update entry %s: %v", entry.FullPath, err)
  159. return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
  160. }
  161. }
  162. f.maybeAddBucket(entry)
  163. f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
  164. f.deleteChunksIfNotNew(oldEntry, entry)
  165. glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
  166. return nil
  167. }
  168. func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
  169. if oldEntry != nil {
  170. if oldEntry.IsDirectory() && !entry.IsDirectory() {
  171. glog.Errorf("existing %s is a directory", entry.FullPath)
  172. return fmt.Errorf("existing %s is a directory", entry.FullPath)
  173. }
  174. if !oldEntry.IsDirectory() && entry.IsDirectory() {
  175. glog.Errorf("existing %s is a file", entry.FullPath)
  176. return fmt.Errorf("existing %s is a file", entry.FullPath)
  177. }
  178. }
  179. return f.Store.UpdateEntry(ctx, entry)
  180. }
  181. func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
  182. now := time.Now()
  183. if string(p) == "/" {
  184. return &Entry{
  185. FullPath: p,
  186. Attr: Attr{
  187. Mtime: now,
  188. Crtime: now,
  189. Mode: os.ModeDir | 0755,
  190. Uid: OS_UID,
  191. Gid: OS_GID,
  192. },
  193. }, nil
  194. }
  195. entry, err = f.Store.FindEntry(ctx, p)
  196. if entry != nil && entry.TtlSec > 0 {
  197. if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
  198. f.Store.DeleteEntry(ctx, p.Child(entry.Name()))
  199. return nil, filer_pb.ErrNotFound
  200. }
  201. }
  202. return
  203. }
  204. func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) ([]*Entry, error) {
  205. if strings.HasSuffix(string(p), "/") && len(p) > 1 {
  206. p = p[0 : len(p)-1]
  207. }
  208. var makeupEntries []*Entry
  209. entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix)
  210. for expiredCount > 0 && err == nil {
  211. makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix)
  212. if err == nil {
  213. entries = append(entries, makeupEntries...)
  214. }
  215. }
  216. return entries, err
  217. }
  218. func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*Entry, expiredCount int, lastFileName string, err error) {
  219. listedEntries, listErr := f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix)
  220. if listErr != nil {
  221. return listedEntries, expiredCount, "", listErr
  222. }
  223. for _, entry := range listedEntries {
  224. lastFileName = entry.Name()
  225. if entry.TtlSec > 0 {
  226. if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
  227. f.Store.DeleteEntry(ctx, p.Child(entry.Name()))
  228. expiredCount++
  229. continue
  230. }
  231. }
  232. entries = append(entries, entry)
  233. }
  234. return
  235. }
  236. func (f *Filer) Shutdown() {
  237. f.LocalMetaLogBuffer.Shutdown()
  238. f.Store.Shutdown()
  239. }