Browse Source

Merge pull request #18 from chrislusf/master

sync
pull/1482/head
hilimd 4 years ago
committed by GitHub
parent
commit
0db149fb5f
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      weed/command/mount_std.go
  2. 2
      weed/filer/meta_aggregator.go
  3. 20
      weed/filesys/meta_cache/meta_cache.go
  4. 2
      weed/filesys/wfs.go

2
weed/command/mount_std.go

@ -91,7 +91,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// detect mount folder mode // detect mount folder mode
if *option.dirAutoCreate { if *option.dirAutoCreate {
os.MkdirAll(dir, 0755)
os.MkdirAll(dir, os.FileMode(0777) &^ umask)
} }
mountMode := os.ModeDir | 0755 mountMode := os.ModeDir | 0755
fileInfo, err := os.Stat(dir) fileInfo, err := os.Stat(dir)

2
weed/filer/meta_aggregator.go

@ -78,7 +78,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
var counter int64 var counter int64
var synced bool var synced bool
maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) { maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) {
if err := Replay(f.Store.ActualStore, event); err != nil {
if err := Replay(f.Store, event); err != nil {
glog.Errorf("failed to reply metadata change from %v: %v", peer, err) glog.Errorf("failed to reply metadata change from %v: %v", peer, err)
return return
} }

20
weed/filesys/meta_cache/meta_cache.go

@ -17,7 +17,7 @@ import (
// e.g. fill fileId field for chunks // e.g. fill fileId field for chunks
type MetaCache struct { type MetaCache struct {
actualStore filer.FilerStore
localStore filer.FilerStore
sync.RWMutex sync.RWMutex
visitedBoundary *bounded_tree.BoundedTree visitedBoundary *bounded_tree.BoundedTree
uidGidMapper *UidGidMapper uidGidMapper *UidGidMapper
@ -25,7 +25,7 @@ type MetaCache struct {
func NewMetaCache(dbFolder string, uidGidMapper *UidGidMapper) *MetaCache { func NewMetaCache(dbFolder string, uidGidMapper *UidGidMapper) *MetaCache {
return &MetaCache{ return &MetaCache{
actualStore: openMetaStore(dbFolder),
localStore: openMetaStore(dbFolder),
visitedBoundary: bounded_tree.NewBoundedTree(), visitedBoundary: bounded_tree.NewBoundedTree(),
uidGidMapper: uidGidMapper, uidGidMapper: uidGidMapper,
} }
@ -57,7 +57,7 @@ func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error
func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) error { func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) error {
filer_pb.BeforeEntrySerialization(entry.Chunks) filer_pb.BeforeEntrySerialization(entry.Chunks)
return mc.actualStore.InsertEntry(ctx, entry)
return mc.localStore.InsertEntry(ctx, entry)
} }
func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error { func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error {
@ -71,7 +71,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
// skip the unnecessary deletion // skip the unnecessary deletion
// leave the update to the following InsertEntry operation // leave the update to the following InsertEntry operation
} else { } else {
if err := mc.actualStore.DeleteEntry(ctx, oldPath); err != nil {
if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil {
return err return err
} }
} }
@ -83,7 +83,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
if newEntry != nil { if newEntry != nil {
newDir, _ := newEntry.DirAndName() newDir, _ := newEntry.DirAndName()
if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) { if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) {
if err := mc.actualStore.InsertEntry(ctx, newEntry); err != nil {
if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil {
return err return err
} }
} }
@ -95,13 +95,13 @@ func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error
mc.Lock() mc.Lock()
defer mc.Unlock() defer mc.Unlock()
filer_pb.BeforeEntrySerialization(entry.Chunks) filer_pb.BeforeEntrySerialization(entry.Chunks)
return mc.actualStore.UpdateEntry(ctx, entry)
return mc.localStore.UpdateEntry(ctx, entry)
} }
func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) { func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) {
mc.RLock() mc.RLock()
defer mc.RUnlock() defer mc.RUnlock()
entry, err = mc.actualStore.FindEntry(ctx, fp)
entry, err = mc.localStore.FindEntry(ctx, fp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -113,14 +113,14 @@ func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *fi
func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
mc.Lock() mc.Lock()
defer mc.Unlock() defer mc.Unlock()
return mc.actualStore.DeleteEntry(ctx, fp)
return mc.localStore.DeleteEntry(ctx, fp)
} }
func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer.Entry, error) { func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer.Entry, error) {
mc.RLock() mc.RLock()
defer mc.RUnlock() defer mc.RUnlock()
entries, err := mc.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
entries, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -134,7 +134,7 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full
func (mc *MetaCache) Shutdown() { func (mc *MetaCache) Shutdown() {
mc.Lock() mc.Lock()
defer mc.Unlock() defer mc.Unlock()
mc.actualStore.Shutdown()
mc.localStore.Shutdown()
} }
func (mc *MetaCache) mapIdFromFilerToLocal(entry *filer.Entry) { func (mc *MetaCache) mapIdFromFilerToLocal(entry *filer.Entry) {

2
weed/filesys/wfs.go

@ -88,7 +88,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
cacheUniqueId := util.Md5String([]byte(option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:4] cacheUniqueId := util.Md5String([]byte(option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:4]
cacheDir := path.Join(option.CacheDir, cacheUniqueId) cacheDir := path.Join(option.CacheDir, cacheUniqueId)
if option.CacheSizeMB > 0 { if option.CacheSizeMB > 0 {
os.MkdirAll(cacheDir, 0755)
os.MkdirAll(cacheDir, os.FileMode(0777) &^ option.Umask)
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB) wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB)
} }

Loading…
Cancel
Save