diff --git a/unmaintained/see_log_entry/see_log_entry.go b/unmaintained/see_log_entry/see_log_entry.go
index 34965f6be..45480d4dc 100644
--- a/unmaintained/see_log_entry/see_log_entry.go
+++ b/unmaintained/see_log_entry/see_log_entry.go
@@ -9,13 +9,13 @@ import (
 
 	"github.com/golang/protobuf/proto"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
 var (
-	logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir)
+	logdataFile = flag.String("logdata", "", "log data file saved under "+ filer.SystemLogDir)
 )
 
 func main() {
diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go
similarity index 86%
rename from weed/filer2/abstract_sql/abstract_sql_store.go
rename to weed/filer/abstract_sql/abstract_sql_store.go
index 3dd4af103..368bec973 100644
--- a/weed/filer2/abstract_sql/abstract_sql_store.go
+++ b/weed/filer/abstract_sql/abstract_sql_store.go
@@ -4,11 +4,11 @@ import (
 	"context"
 	"database/sql"
 	"fmt"
-
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
+	"strings"
 )
 
 type AbstractSqlStore struct {
@@ -59,7 +59,7 @@ func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
 	return store.DB
 }
 
-func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	dir, name := entry.FullPath.DirAndName()
 	meta, err := entry.EncodeAttributesAndChunks()
@@ -69,12 +69,9 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En
 
 	res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta)
 	if err != nil {
-		return fmt.Errorf("insert %s: %s", entry.FullPath, err)
-	}
-
-	affectedRows, err := res.RowsAffected()
-	if err == nil && affectedRows > 0 {
-		return nil
+		if !strings.Contains(strings.ToLower(err.Error()), "duplicate") {
+			return fmt.Errorf("kv insert: %s", err)
+		}
 	}
 
 	// now the insert failed possibly due to duplication constraints
@@ -93,7 +90,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En
 
 }
 
-func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	dir, name := entry.FullPath.DirAndName()
 	meta, err := entry.EncodeAttributesAndChunks()
@@ -113,16 +110,20 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En
 	return nil
 }
 
-func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer2.Entry, error) {
+func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) {
 
 	dir, name := fullpath.DirAndName()
 	row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
+
 	var data []byte
 	if err := row.Scan(&data); err != nil {
-		return nil, filer_pb.ErrNotFound
+		if err == sql.ErrNoRows {
+			return nil, filer_pb.ErrNotFound
+		}
+		return nil, fmt.Errorf("find %s: %v", fullpath, err)
 	}
 
-	entry := &filer2.Entry{
+	entry := &filer.Entry{
 		FullPath: fullpath,
 	}
 	if err := entry.DecodeAttributesAndChunks(data); err != nil {
@@ -164,14 +165,13 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
 	return nil
 }
 
-func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
-
+func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
 	sqlText := store.SqlListExclusive
 	if inclusive {
 		sqlText = store.SqlListInclusive
 	}
 
-	rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), limit)
+	rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), prefix, limit)
 	if err != nil {
 		return nil, fmt.Errorf("list %s : %v", fullpath, err)
 	}
@@ -185,7 +185,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat
 			return nil, fmt.Errorf("scan %s: %v", fullpath, err)
 		}
 
-		entry := &filer2.Entry{
+		entry := &filer.Entry{
 			FullPath: util.NewFullPath(string(fullpath), name),
 		}
 		if err = entry.DecodeAttributesAndChunks(data); err != nil {
@@ -199,6 +199,10 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat
 	return entries, nil
 }
 
+func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
+	return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
+}
+
 func (store *AbstractSqlStore) Shutdown() {
 	store.DB.Close()
 }
diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go
new file mode 100644
index 000000000..b5a662c6b
--- /dev/null
+++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go
@@ -0,0 +1,87 @@
+package abstract_sql
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+	"strings"
+
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/chrislusf/seaweedfs/weed/glog"
+	"github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+	dirStr, dirHash, name := genDirAndName(key)
+
+	res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, dirHash, name, dirStr, value)
+	if err != nil {
+		if !strings.Contains(strings.ToLower(err.Error()), "duplicate") {
+			return fmt.Errorf("kv insert: %s", err)
+		}
+	}
+
+	// now the insert failed possibly due to duplication constraints
+	glog.V(1).Infof("kv insert falls back to update: %s", err)
+
+	res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)
+	if err != nil {
+		return fmt.Errorf("kv upsert: %s", err)
+	}
+
+	_, err = res.RowsAffected()
+	if err != nil {
+		return fmt.Errorf("kv upsert no rows affected: %s", err)
+	}
+	return nil
+
+}
+
+func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+	dirStr, dirHash, name := genDirAndName(key)
+	row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, dirHash, name, dirStr)
+
+	err = row.Scan(&value)
+
+	if err == sql.ErrNoRows {
+		return nil, filer.ErrKvNotFound
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("kv get: %v", err)
+	}
+
+	return
+}
+
+func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+	dirStr, dirHash, name := genDirAndName(key)
+
+	res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, dirHash, name, dirStr)
+	if err != nil {
+		return fmt.Errorf("kv delete: %s", err)
+	}
+
+	_, err = res.RowsAffected()
+	if err != nil {
+		return fmt.Errorf("kv delete no rows affected: %s", err)
+	}
+
+	return nil
+
+}
+
+func genDirAndName(key []byte) (dirStr string, dirHash int64, name string) {
+	for len(key) < 8 {
+		key = append(key, 0)
+	}
+
+	dirHash = int64(util.BytesToUint64(key[:8]))
+	dirStr = string(key[:8])
+	name = string(key[8:])
+
+	return
+}
diff --git a/weed/filer2/cassandra/README.txt b/weed/filer/cassandra/README.txt
similarity index 100%
rename from weed/filer2/cassandra/README.txt
rename to weed/filer/cassandra/README.txt
diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go
similarity index 87%
rename from weed/filer2/cassandra/cassandra_store.go
rename to weed/filer/cassandra/cassandra_store.go
index 5dd7d8036..fd161b1f1 100644
--- a/weed/filer2/cassandra/cassandra_store.go
+++ b/weed/filer/cassandra/cassandra_store.go
@@ -3,17 +3,16 @@ package cassandra
 import (
 	"context"
 	"fmt"
-
 	"github.com/gocql/gocql"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &CassandraStore{})
+	filer.Stores = append(filer.Stores, &CassandraStore{})
 }
 
 type CassandraStore struct {
@@ -53,7 +52,7 @@ func (store *CassandraStore) RollbackTransaction(ctx context.Context) error {
 	return nil
 }
 
-func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	dir, name := entry.FullPath.DirAndName()
 	meta, err := entry.EncodeAttributesAndChunks()
@@ -70,12 +69,12 @@ func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entr
 	return nil
 }
 
-func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	return store.InsertEntry(ctx, entry)
 }
 
-func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
 
 	dir, name := fullpath.DirAndName()
 	var data []byte
@@ -91,7 +90,7 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPa
 		return nil, filer_pb.ErrNotFound
 	}
 
-	entry = &filer2.Entry{
+	entry = &filer.Entry{
 		FullPath: fullpath,
 	}
 	err = entry.DecodeAttributesAndChunks(data)
@@ -126,8 +125,12 @@ func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath
 	return nil
 }
 
+func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+	return nil, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
 func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
-	limit int) (entries []*filer2.Entry, err error) {
+	limit int) (entries []*filer.Entry, err error) {
 
 	cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
 	if inclusive {
@@ -138,7 +141,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
 	var name string
 	iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
 	for iter.Scan(&name, &data) {
-		entry := &filer2.Entry{
+		entry := &filer.Entry{
 			FullPath: util.NewFullPath(string(fullpath), name),
 		}
 		if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil {
diff --git a/weed/filer/cassandra/cassandra_store_kv.go b/weed/filer/cassandra/cassandra_store_kv.go
new file mode 100644
index 000000000..f7668746f
--- /dev/null
+++ b/weed/filer/cassandra/cassandra_store_kv.go
@@ -0,0 +1,18 @@
+package cassandra
+
+import (
+	"context"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+)
+
+func (store *CassandraStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+	return filer.ErrKvNotImplemented
+}
+
+func (store *CassandraStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+	return nil, filer.ErrKvNotImplemented
+}
+
+func (store *CassandraStore) KvDelete(ctx context.Context, key []byte) (err error) {
+	return filer.ErrKvNotImplemented
+}
diff --git a/weed/filer2/configuration.go b/weed/filer/configuration.go
similarity index 98%
rename from weed/filer2/configuration.go
rename to weed/filer/configuration.go
index a174117ea..3dce67d6d 100644
--- a/weed/filer2/configuration.go
+++ b/weed/filer/configuration.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"os"
diff --git a/weed/filer2/entry.go b/weed/filer/entry.go
similarity index 99%
rename from weed/filer2/entry.go
rename to weed/filer/entry.go
index fedfde40d..4a73de19a 100644
--- a/weed/filer2/entry.go
+++ b/weed/filer/entry.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"os"
diff --git a/weed/filer2/entry_codec.go b/weed/filer/entry_codec.go
similarity index 99%
rename from weed/filer2/entry_codec.go
rename to weed/filer/entry_codec.go
index 4d615194f..fb6448b30 100644
--- a/weed/filer2/entry_codec.go
+++ b/weed/filer/entry_codec.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"bytes"
diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go
similarity index 86%
rename from weed/filer2/etcd/etcd_store.go
rename to weed/filer/etcd/etcd_store.go
index 2ef65b4a0..36db4ac01 100644
--- a/weed/filer2/etcd/etcd_store.go
+++ b/weed/filer/etcd/etcd_store.go
@@ -8,7 +8,7 @@ import (
 
 	"go.etcd.io/etcd/clientv3"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	weed_util "github.com/chrislusf/seaweedfs/weed/util"
@@ -19,7 +19,7 @@ const (
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &EtcdStore{})
+	filer.Stores = append(filer.Stores, &EtcdStore{})
 }
 
 type EtcdStore struct {
@@ -73,7 +73,7 @@ func (store *EtcdStore) RollbackTransaction(ctx context.Context) error {
 	return nil
 }
 
-func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
 	key := genKey(entry.DirAndName())
 
 	value, err := entry.EncodeAttributesAndChunks()
@@ -88,11 +88,11 @@ func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (e
 	return nil
 }
 
-func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
 	return store.InsertEntry(ctx, entry)
 }
 
-func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
+func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
 	key := genKey(fullpath.DirAndName())
 
 	resp, err := store.client.Get(ctx, string(key))
@@ -104,7 +104,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPa
 		return nil, filer_pb.ErrNotFound
 	}
 
-	entry = &filer2.Entry{
+	entry = &filer.Entry{
 		FullPath: fullpath,
 	}
 	err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value)
@@ -135,9 +135,11 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_
 	return nil
 }
 
-func (store *EtcdStore) ListDirectoryEntries(
-	ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
-) (entries []*filer2.Entry, err error) {
+func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+	return nil, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
+func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
 	directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
 
 	resp, err := store.client.Get(ctx, string(directoryPrefix),
@@ -158,7 +160,7 @@ func (store *EtcdStore) ListDirectoryEntries(
 		if limit < 0 {
 			break
 		}
-		entry := &filer2.Entry{
+		entry := &filer.Entry{
 			FullPath: weed_util.NewFullPath(string(fullpath), fileName),
 		}
 		if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil {
diff --git a/weed/filer/etcd/etcd_store_kv.go b/weed/filer/etcd/etcd_store_kv.go
new file mode 100644
index 000000000..a803a5834
--- /dev/null
+++ b/weed/filer/etcd/etcd_store_kv.go
@@ -0,0 +1,44 @@
+package etcd
+
+import (
+	"context"
+	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+)
+
+func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+	_, err = store.client.Put(ctx, string(key), string(value))
+
+	if err != nil {
+		return fmt.Errorf("kv put: %v", err)
+	}
+
+	return nil
+}
+
+func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+	resp, err := store.client.Get(ctx, string(key), nil)
+
+	if err != nil {
+		return nil, fmt.Errorf("kv get: %v", err)
+	}
+
+	if len(resp.Kvs) == 0 {
+		return nil, filer.ErrKvNotFound
+	}
+
+	return resp.Kvs[0].Value, nil
+}
+
+func (store *EtcdStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+	_, err = store.client.Delete(ctx, string(key))
+
+	if err != nil {
+		return fmt.Errorf("kv delete: %v", err)
+	}
+
+	return nil
+}
diff --git a/weed/filer2/filechunk_manifest.go b/weed/filer/filechunk_manifest.go
similarity index 99%
rename from weed/filer2/filechunk_manifest.go
rename to weed/filer/filechunk_manifest.go
index ba4625bab..e84cf21e5 100644
--- a/weed/filer2/filechunk_manifest.go
+++ b/weed/filer/filechunk_manifest.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"bytes"
diff --git a/weed/filer2/filechunk_manifest_test.go b/weed/filer/filechunk_manifest_test.go
similarity index 99%
rename from weed/filer2/filechunk_manifest_test.go
rename to weed/filer/filechunk_manifest_test.go
index 2b0862d07..ce12c5da6 100644
--- a/weed/filer2/filechunk_manifest_test.go
+++ b/weed/filer/filechunk_manifest_test.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"bytes"
diff --git a/weed/filer2/filechunks.go b/weed/filer/filechunks.go
similarity index 99%
rename from weed/filer2/filechunks.go
rename to weed/filer/filechunks.go
index 53c679d6b..c45963193 100644
--- a/weed/filer2/filechunks.go
+++ b/weed/filer/filechunks.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"fmt"
diff --git a/weed/filer2/filechunks2_test.go b/weed/filer/filechunks2_test.go
similarity index 99%
rename from weed/filer2/filechunks2_test.go
rename to weed/filer/filechunks2_test.go
index d896da3cc..9f9566d9b 100644
--- a/weed/filer2/filechunks2_test.go
+++ b/weed/filer/filechunks2_test.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"sort"
diff --git a/weed/filer2/filechunks_test.go b/weed/filer/filechunks_test.go
similarity index 99%
rename from weed/filer2/filechunks_test.go
rename to weed/filer/filechunks_test.go
index 31b74a22a..699e7e298 100644
--- a/weed/filer2/filechunks_test.go
+++ b/weed/filer/filechunks_test.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"fmt"
diff --git a/weed/filer2/filer.go b/weed/filer/filer.go
similarity index 94%
rename from weed/filer2/filer.go
rename to weed/filer/filer.go
index a3b7709ad..7a555372f 100644
--- a/weed/filer2/filer.go
+++ b/weed/filer/filer.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"context"
@@ -246,15 +246,15 @@ func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, e
 
 }
 
-func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
+func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) ([]*Entry, error) {
 	if strings.HasSuffix(string(p), "/") && len(p) > 1 {
 		p = p[0 : len(p)-1]
 	}
 
 	var makeupEntries []*Entry
-	entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+	entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix)
 	for expiredCount > 0 && err == nil {
-		makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount)
+		makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix)
 		if err == nil {
 			entries = append(entries, makeupEntries...)
 		}
@@ -263,8 +263,8 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, start
 	return entries, err
 }
 
-func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) {
-	listedEntries, listErr := f.Store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*Entry, expiredCount int, lastFileName string, err error) {
+	listedEntries, listErr := f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix)
 	if listErr != nil {
 		return listedEntries, expiredCount, "", listErr
 	}
diff --git a/weed/filer2/filer_buckets.go b/weed/filer/filer_buckets.go
similarity index 97%
rename from weed/filer2/filer_buckets.go
rename to weed/filer/filer_buckets.go
index 7a57e7ee1..4d4f4abc3 100644
--- a/weed/filer2/filer_buckets.go
+++ b/weed/filer/filer_buckets.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"context"
@@ -29,7 +29,7 @@ func (f *Filer) LoadBuckets() {
 
 	limit := math.MaxInt32
 
-	entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
+	entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "")
 
 	if err != nil {
 		glog.V(1).Infof("no buckets found: %v", err)
diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
similarity index 98%
rename from weed/filer2/filer_delete_entry.go
rename to weed/filer/filer_delete_entry.go
index 797b0f651..e2198bd21 100644
--- a/weed/filer2/filer_delete_entry.go
+++ b/weed/filer/filer_delete_entry.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"context"
@@ -58,7 +58,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
 	lastFileName := ""
 	includeLastFile := false
 	for {
-		entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize)
+		entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
 		if err != nil {
 			glog.Errorf("list folder %s: %v", entry.FullPath, err)
 			return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
diff --git a/weed/filer2/filer_deletion.go b/weed/filer/filer_deletion.go
similarity index 99%
rename from weed/filer2/filer_deletion.go
rename to weed/filer/filer_deletion.go
index dbee4a61d..126d162ec 100644
--- a/weed/filer2/filer_deletion.go
+++ b/weed/filer/filer_deletion.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"strings"
diff --git a/weed/filer2/filer_notify.go b/weed/filer/filer_notify.go
similarity index 98%
rename from weed/filer2/filer_notify.go
rename to weed/filer/filer_notify.go
index 96776a359..e00117382 100644
--- a/weed/filer2/filer_notify.go
+++ b/weed/filer/filer_notify.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"context"
@@ -100,13 +100,13 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
 	sizeBuf := make([]byte, 4)
 	startTsNs := startTime.UnixNano()
 
-	dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366)
+	dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "")
 	if listDayErr != nil {
 		return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
 	}
 	for _, dayEntry := range dayEntries {
 		// println("checking day", dayEntry.FullPath)
-		hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60)
+		hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "")
 		if listHourMinuteErr != nil {
 			return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
 		}
diff --git a/weed/filer2/filer_notify_append.go b/weed/filer/filer_notify_append.go
similarity index 99%
rename from weed/filer2/filer_notify_append.go
rename to weed/filer/filer_notify_append.go
index 31cdb3c1c..b1836b046 100644
--- a/weed/filer2/filer_notify_append.go
+++ b/weed/filer/filer_notify_append.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"context"
diff --git a/weed/filer2/filer_notify_test.go b/weed/filer/filer_notify_test.go
similarity index 98%
rename from weed/filer2/filer_notify_test.go
rename to weed/filer/filer_notify_test.go
index 29170bfdf..6a2be8f18 100644
--- a/weed/filer2/filer_notify_test.go
+++ b/weed/filer/filer_notify_test.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"testing"
diff --git a/weed/filer2/filerstore.go b/weed/filer/filerstore.go
similarity index 64%
rename from weed/filer2/filerstore.go
rename to weed/filer/filerstore.go
index ab1d0e659..518212437 100644
--- a/weed/filer2/filerstore.go
+++ b/weed/filer/filerstore.go
@@ -1,7 +1,9 @@
-package filer2
+package filer
 
 import (
 	"context"
+	"errors"
+	"strings"
 	"time"
 
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -9,6 +11,12 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
+var (
+	ErrUnsupportedListDirectoryPrefixed = errors.New("unsupported directory prefix listing")
+	ErrKvNotImplemented                 = errors.New("kv not implemented yet")
+	ErrKvNotFound                       = errors.New("kv: not found")
+)
+
 type FilerStore interface {
 	// GetName gets the name to locate the configuration in filer.toml file
 	GetName() string
@@ -21,11 +29,16 @@ type FilerStore interface {
 	DeleteEntry(context.Context, util.FullPath) (err error)
 	DeleteFolderChildren(context.Context, util.FullPath) (err error)
 	ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
+	ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error)
 
 	BeginTransaction(ctx context.Context) (context.Context, error)
 	CommitTransaction(ctx context.Context) error
 	RollbackTransaction(ctx context.Context) error
 
+	KvPut(ctx context.Context, key []byte, value []byte) (err error)
+	KvGet(ctx context.Context, key []byte) (value []byte, err error)
+	KvDelete(ctx context.Context, key []byte) (err error)
+
 	Shutdown()
 }
 
@@ -135,6 +148,60 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath
 	return entries, err
 }
 
+func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error) {
+	stats.FilerStoreCounter.WithLabelValues(fsw.ActualStore.GetName(), "prefixList").Inc()
+	start := time.Now()
+	defer func() {
+		stats.FilerStoreHistogram.WithLabelValues(fsw.ActualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds())
+	}()
+	entries, err := fsw.ActualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix)
+	if err == ErrUnsupportedListDirectoryPrefixed {
+		entries, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix)
+	}
+	if err != nil {
+		return nil, err
+	}
+	for _, entry := range entries {
+		filer_pb.AfterEntryDeserialization(entry.Chunks)
+	}
+	return entries, nil
+}
+
+func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) (entries []*Entry, err error) {
+	entries, err = fsw.ActualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
+	if err != nil {
+		return nil, err
+	}
+
+	if prefix == "" {
+		return
+	}
+
+	count := 0
+	var lastFileName string
+	notPrefixed := entries
+	entries = nil
+	for count < limit && len(notPrefixed) > 0 {
+		for _, entry := range notPrefixed {
+			lastFileName = entry.Name()
+			if strings.HasPrefix(entry.Name(), prefix) {
+				count++
+				entries = append(entries, entry)
+				if count >= limit {
+					break
+				}
+			}
+		}
+		if count < limit {
+			notPrefixed, err = fsw.ActualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit)
+			if err != nil {
+				return
+			}
+		}
+	}
+	return
+}
+
 func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) {
 	return fsw.ActualStore.BeginTransaction(ctx)
 }
@@ -150,3 +217,13 @@ func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error {
 func (fsw *FilerStoreWrapper) Shutdown() {
 	fsw.ActualStore.Shutdown()
 }
+
+func (fsw *FilerStoreWrapper) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+	return fsw.ActualStore.KvPut(ctx, key, value)
+}
+func (fsw *FilerStoreWrapper) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+	return fsw.ActualStore.KvGet(ctx, key)
+}
+func (fsw *FilerStoreWrapper) KvDelete(ctx context.Context, key []byte) (err error) {
+	return fsw.ActualStore.KvDelete(ctx, key)
+}
diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go
similarity index 88%
rename from weed/filer2/leveldb/leveldb_store.go
rename to weed/filer/leveldb/leveldb_store.go
index 31919ca49..eccb760a2 100644
--- a/weed/filer2/leveldb/leveldb_store.go
+++ b/weed/filer/leveldb/leveldb_store.go
@@ -4,13 +4,12 @@ import (
 	"bytes"
 	"context"
 	"fmt"
-
 	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/errors"
+	leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/opt"
 	leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	weed_util "github.com/chrislusf/seaweedfs/weed/util"
@@ -21,7 +20,7 @@ const (
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &LevelDBStore{})
+	filer.Stores = append(filer.Stores, &LevelDBStore{})
 }
 
 type LevelDBStore struct {
@@ -50,7 +49,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
 	}
 
 	if store.db, err = leveldb.OpenFile(dir, opts); err != nil {
-		if errors.IsCorrupted(err) {
+		if leveldb_errors.IsCorrupted(err) {
 			store.db, err = leveldb.RecoverFile(dir, opts)
 		}
 		if err != nil {
@@ -71,7 +70,7 @@ func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error {
 	return nil
 }
 
-func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
 	key := genKey(entry.DirAndName())
 
 	value, err := entry.EncodeAttributesAndChunks()
@@ -90,12 +89,12 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry)
 	return nil
 }
 
-func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	return store.InsertEntry(ctx, entry)
 }
 
-func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
+func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
 	key := genKey(fullpath.DirAndName())
 
 	data, err := store.db.Get(key, nil)
@@ -107,7 +106,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
 		return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
 	}
 
-	entry = &filer2.Entry{
+	entry = &filer.Entry{
 		FullPath: fullpath,
 	}
 	err = entry.DecodeAttributesAndChunks(data)
@@ -159,8 +158,12 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath we
 	return nil
 }
 
+func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+	return nil, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
 func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
-	limit int) (entries []*filer2.Entry, err error) {
+	limit int) (entries []*filer.Entry, err error) {
 
 	directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
 
@@ -181,7 +184,7 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath we
 		if limit < 0 {
 			break
 		}
-		entry := &filer2.Entry{
+		entry := &filer.Entry{
 			FullPath: weed_util.NewFullPath(string(fullpath), fileName),
 		}
 		if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
diff --git a/weed/filer/leveldb/leveldb_store_kv.go b/weed/filer/leveldb/leveldb_store_kv.go
new file mode 100644
index 000000000..f686cbf21
--- /dev/null
+++ b/weed/filer/leveldb/leveldb_store_kv.go
@@ -0,0 +1,45 @@
+package leveldb
+
+import (
+	"context"
+	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/syndtr/goleveldb/leveldb"
+)
+
+func (store *LevelDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+	err = store.db.Put(key, value, nil)
+
+	if err != nil {
+		return fmt.Errorf("kv put: %v", err)
+	}
+
+	return nil
+}
+
+func (store *LevelDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+	value, err = store.db.Get(key, nil)
+
+	if err == leveldb.ErrNotFound {
+		return nil, filer.ErrKvNotFound
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("kv get: %v", err)
+	}
+
+	return
+}
+
+func (store *LevelDBStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+	err = store.db.Delete(key, nil)
+
+	if err != nil {
+		return fmt.Errorf("kv delete: %v", err)
+	}
+
+	return nil
+}
diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go
similarity index 65%
rename from weed/filer2/leveldb/leveldb_store_test.go
rename to weed/filer/leveldb/leveldb_store_test.go
index 5c064ee5a..b07f81129 100644
--- a/weed/filer2/leveldb/leveldb_store_test.go
+++ b/weed/filer/leveldb/leveldb_store_test.go
@@ -6,37 +6,37 @@ import (
 	"os"
 	"testing"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
 func TestCreateAndFind(t *testing.T) {
-	filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
+	testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
 	dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
 	defer os.RemoveAll(dir)
 	store := &LevelDBStore{}
 	store.initialize(dir)
-	filer.SetStore(store)
+	testFiler.SetStore(store)
 
 	fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
 
 	ctx := context.Background()
 
-	entry1 := &filer2.Entry{
+	entry1 := &filer.Entry{
 		FullPath: fullpath,
-		Attr: filer2.Attr{
+		Attr: filer.Attr{
 			Mode: 0440,
 			Uid:  1234,
 			Gid:  5678,
 		},
 	}
 
-	if err := filer.CreateEntry(ctx, entry1, false, false, nil); err != nil {
+	if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
 		t.Errorf("create entry %v: %v", entry1.FullPath, err)
 		return
 	}
 
-	entry, err := filer.FindEntry(ctx, fullpath)
+	entry, err := testFiler.FindEntry(ctx, fullpath)
 
 	if err != nil {
 		t.Errorf("find entry: %v", err)
@@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
 	}
 
 	// checking one upper directory
-	entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
+	entries, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
 	}
 
 	// checking one upper directory
-	entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
+	entries, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
@@ -65,17 +65,17 @@ func TestCreateAndFind(t *testing.T) {
 }
 
 func TestEmptyRoot(t *testing.T) {
-	filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
+	testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
 	dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
 	defer os.RemoveAll(dir)
 	store := &LevelDBStore{}
 	store.initialize(dir)
-	filer.SetStore(store)
+	testFiler.SetStore(store)
 
 	ctx := context.Background()
 
 	// checking one upper directory
-	entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
+	entries, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
 	if err != nil {
 		t.Errorf("list entries: %v", err)
 		return
diff --git a/weed/filer2/leveldb2/leveldb2_local_store.go b/weed/filer/leveldb2/leveldb2_local_store.go
similarity index 89%
rename from weed/filer2/leveldb2/leveldb2_local_store.go
rename to weed/filer/leveldb2/leveldb2_local_store.go
index 3625abf9e..faae25c45 100644
--- a/weed/filer2/leveldb2/leveldb2_local_store.go
+++ b/weed/filer/leveldb2/leveldb2_local_store.go
@@ -3,12 +3,12 @@ package leveldb
 import (
 	"fmt"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
 var (
-	_ = filer2.FilerLocalStore(&LevelDB2Store{})
+	_ = filer.FilerLocalStore(&LevelDB2Store{})
 )
 
 func (store *LevelDB2Store) UpdateOffset(filer string, lastTsNs int64) error {
diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go
similarity index 89%
rename from weed/filer2/leveldb2/leveldb2_store.go
rename to weed/filer/leveldb2/leveldb2_store.go
index c907e8746..7a2bdac2e 100644
--- a/weed/filer2/leveldb2/leveldb2_store.go
+++ b/weed/filer/leveldb2/leveldb2_store.go
@@ -5,22 +5,21 @@ import (
 	"context"
 	"crypto/md5"
 	"fmt"
-	"io"
-	"os"
-
 	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/errors"
+	leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/opt"
 	leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+	"io"
+	"os"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	weed_util "github.com/chrislusf/seaweedfs/weed/util"
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &LevelDB2Store{})
+	filer.Stores = append(filer.Stores, &LevelDB2Store{})
 }
 
 type LevelDB2Store struct {
@@ -53,7 +52,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
 		dbFolder := fmt.Sprintf("%s/%02d", dir, d)
 		os.MkdirAll(dbFolder, 0755)
 		db, dbErr := leveldb.OpenFile(dbFolder, opts)
-		if errors.IsCorrupted(dbErr) {
+		if leveldb_errors.IsCorrupted(dbErr) {
 			db, dbErr = leveldb.RecoverFile(dbFolder, opts)
 		}
 		if dbErr != nil {
@@ -77,7 +76,7 @@ func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error {
 	return nil
 }
 
-func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
 	dir, name := entry.DirAndName()
 	key, partitionId := genKey(dir, name, store.dbCount)
 
@@ -97,12 +96,12 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry
 	return nil
 }
 
-func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	return store.InsertEntry(ctx, entry)
 }
 
-func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
+func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
 	dir, name := fullpath.DirAndName()
 	key, partitionId := genKey(dir, name, store.dbCount)
 
@@ -115,7 +114,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.Fu
 		return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
 	}
 
-	entry = &filer2.Entry{
+	entry = &filer.Entry{
 		FullPath: fullpath,
 	}
 	err = entry.DecodeAttributesAndChunks(data)
@@ -168,8 +167,12 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath w
 	return nil
 }
 
+func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+	return nil, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
 func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
-	limit int) (entries []*filer2.Entry, err error) {
+	limit int) (entries []*filer.Entry, err error) {
 
 	directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
 	lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount)
@@ -191,7 +194,7 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath w
 		if limit < 0 {
 			break
 		}
-		entry := &filer2.Entry{
+		entry := &filer.Entry{
 			FullPath: weed_util.NewFullPath(string(fullpath), fileName),
 		}
 
diff --git a/weed/filer/leveldb2/leveldb2_store_kv.go b/weed/filer/leveldb2/leveldb2_store_kv.go
new file mode 100644
index 000000000..b415d3c32
--- /dev/null
+++ b/weed/filer/leveldb2/leveldb2_store_kv.go
@@ -0,0 +1,56 @@
+package leveldb
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/syndtr/goleveldb/leveldb"
+)
+
+func (store *LevelDB2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+	partitionId := bucketKvKey(key, store.dbCount)
+
+	err = store.dbs[partitionId].Put(key, value, nil)
+
+	if err != nil {
+		return fmt.Errorf("kv bucket %d put: %v", partitionId, err)
+	}
+
+	return nil
+}
+
+func (store *LevelDB2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+	partitionId := bucketKvKey(key, store.dbCount)
+
+	value, err = store.dbs[partitionId].Get(key, nil)
+
+	if err == leveldb.ErrNotFound {
+		return nil, filer.ErrKvNotFound
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("kv bucket %d get: %v", partitionId, err)
+	}
+
+	return
+}
+
+func (store *LevelDB2Store) KvDelete(ctx context.Context, key []byte) (err error) {
+
+	partitionId := bucketKvKey(key, store.dbCount)
+
+	err = store.dbs[partitionId].Delete(key, nil)
+
+	if err != nil {
+		return fmt.Errorf("kv bucket %d delete: %v", partitionId, err)
+	}
+
+	return nil
+}
+
+func bucketKvKey(key []byte, dbCount int) (partitionId int) {
+	return int(key[len(key)-1]) % dbCount
+}
diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go
similarity index 65%
rename from weed/filer2/leveldb2/leveldb2_store_test.go
rename to weed/filer/leveldb2/leveldb2_store_test.go
index 5faecf93e..c9b140951 100644
--- a/weed/filer2/leveldb2/leveldb2_store_test.go
+++ b/weed/filer/leveldb2/leveldb2_store_test.go
@@ -6,37 +6,37 @@ import (
 	"os"
 	"testing"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
 func TestCreateAndFind(t *testing.T) {
-	filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
+	testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
 	dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
 	defer os.RemoveAll(dir)
 	store := &LevelDB2Store{}
 	store.initialize(dir, 2)
-	filer.SetStore(store)
+	testFiler.SetStore(store)
 
 	fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
 
 	ctx := context.Background()
 
-	entry1 := &filer2.Entry{
+	entry1 := &filer.Entry{
 		FullPath: fullpath,
-		Attr: filer2.Attr{
+		Attr: filer.Attr{
 			Mode: 0440,
 			Uid:  1234,
 			Gid:  5678,
 		},
 	}
 
-	if err := filer.CreateEntry(ctx, entry1, false, false, nil); err != nil {
+	if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
 		t.Errorf("create entry %v: %v", entry1.FullPath, err)
 		return
 	}
 
-	entry, err := filer.FindEntry(ctx, fullpath)
+	entry, err := testFiler.FindEntry(ctx, fullpath)
 
 	if err != nil {
 		t.Errorf("find entry: %v", err)
@@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
 	}
 
 	// checking one upper directory
-	entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
+	entries, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
 	}
 
 	// checking one upper directory
-	entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
+	entries, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
@@ -65,17 +65,17 @@ func TestCreateAndFind(t *testing.T) {
 }
 
 func TestEmptyRoot(t *testing.T) {
-	filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
+	testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
 	dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
 	defer os.RemoveAll(dir)
 	store := &LevelDB2Store{}
 	store.initialize(dir, 2)
-	filer.SetStore(store)
+	testFiler.SetStore(store)
 
 	ctx := context.Background()
 
 	// checking one upper directory
-	entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
+	entries, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
 	if err != nil {
 		t.Errorf("list entries: %v", err)
 		return
diff --git a/weed/filer2/meta_aggregator.go b/weed/filer/meta_aggregator.go
similarity index 99%
rename from weed/filer2/meta_aggregator.go
rename to weed/filer/meta_aggregator.go
index f2792bd26..506f03e4c 100644
--- a/weed/filer2/meta_aggregator.go
+++ b/weed/filer/meta_aggregator.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"context"
diff --git a/weed/filer2/meta_replay.go b/weed/filer/meta_replay.go
similarity index 98%
rename from weed/filer2/meta_replay.go
rename to weed/filer/meta_replay.go
index d9cdaa76a..feb76278b 100644
--- a/weed/filer2/meta_replay.go
+++ b/weed/filer/meta_replay.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"context"
diff --git a/weed/filer2/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go
similarity index 88%
rename from weed/filer2/mongodb/mongodb_store.go
rename to weed/filer/mongodb/mongodb_store.go
index 375a457a4..104d1f9e2 100644
--- a/weed/filer2/mongodb/mongodb_store.go
+++ b/weed/filer/mongodb/mongodb_store.go
@@ -3,7 +3,7 @@ package mongodb
 import (
 	"context"
 	"fmt"
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -15,7 +15,7 @@ import (
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &MongodbStore{})
+	filer.Stores = append(filer.Stores, &MongodbStore{})
 }
 
 type MongodbStore struct {
@@ -93,7 +93,7 @@ func (store *MongodbStore) RollbackTransaction(ctx context.Context) error {
 	return nil
 }
 
-func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	dir, name := entry.FullPath.DirAndName()
 	meta, err := entry.EncodeAttributesAndChunks()
@@ -109,14 +109,18 @@ func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer2.Entry)
 		Meta:      meta,
 	})
 
+	if err != nil {
+		return fmt.Errorf("InsertEntry %st: %v", entry.FullPath, err)
+	}
+
 	return nil
 }
 
-func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
 	return store.InsertEntry(ctx, entry)
 }
 
-func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
 
 	dir, name := fullpath.DirAndName()
 	var data Model
@@ -124,6 +128,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
 	var where = bson.M{"directory": dir, "name": name}
 	err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
 	if err != mongo.ErrNoDocuments && err != nil {
+		glog.Error("find %s: %v", fullpath, err)
 		return nil, filer_pb.ErrNotFound
 	}
 
@@ -131,7 +136,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
 		return nil, filer_pb.ErrNotFound
 	}
 
-	entry = &filer2.Entry{
+	entry = &filer.Entry{
 		FullPath: fullpath,
 	}
 
@@ -167,7 +172,11 @@ func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath ut
 	return nil
 }
 
-func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
+func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+	return nil, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
+func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
 
 	var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}}
 	if inclusive {
@@ -185,7 +194,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
 			return nil, err
 		}
 
-		entry := &filer2.Entry{
+		entry := &filer.Entry{
 			FullPath: util.NewFullPath(string(fullpath), data.Name),
 		}
 		if decodeErr := entry.DecodeAttributesAndChunks(data.Meta); decodeErr != nil {
diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go
new file mode 100644
index 000000000..09508e691
--- /dev/null
+++ b/weed/filer/mongodb/mongodb_store_kv.go
@@ -0,0 +1,72 @@
+package mongodb
+
+import (
+	"context"
+	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/chrislusf/seaweedfs/weed/glog"
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/mongo"
+)
+
+func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+	dir, name := genDirAndName(key)
+
+	c := store.connect.Database(store.database).Collection(store.collectionName)
+
+	_, err = c.InsertOne(ctx, Model{
+		Directory: dir,
+		Name:      name,
+		Meta:      value,
+	})
+
+	if err != nil {
+		return fmt.Errorf("kv put: %v", err)
+	}
+
+	return nil
+}
+
+func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+	dir, name := genDirAndName(key)
+
+	var data Model
+
+	var where = bson.M{"directory": dir, "name": name}
+	err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
+	if err != mongo.ErrNoDocuments && err != nil {
+		glog.Error("kv get: %v", err)
+		return nil, filer.ErrKvNotFound
+	}
+
+	if len(data.Meta) == 0 {
+		return nil, filer.ErrKvNotFound
+	}
+
+	return data.Meta, nil
+}
+
+func (store *MongodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+	dir, name := genDirAndName(key)
+
+	where := bson.M{"directory": dir, "name": name}
+	_, err = store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
+	if err != nil {
+		return fmt.Errorf("kv delete %s : %v", err)
+	}
+
+	return nil
+}
+
+func genDirAndName(key []byte) (dir string, name string) {
+	for len(key) < 8 {
+		key = append(key, 0)
+	}
+
+	dir = string(key[:8])
+	name = string(key[8:])
+
+	return
+}
diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go
similarity index 85%
rename from weed/filer2/mysql/mysql_store.go
rename to weed/filer/mysql/mysql_store.go
index 63d99cd9d..708a67cc3 100644
--- a/weed/filer2/mysql/mysql_store.go
+++ b/weed/filer/mysql/mysql_store.go
@@ -4,8 +4,8 @@ import (
 	"database/sql"
 	"fmt"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
-	"github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
 	"github.com/chrislusf/seaweedfs/weed/util"
 	_ "github.com/go-sql-driver/mysql"
 )
@@ -15,7 +15,7 @@ const (
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &MysqlStore{})
+	filer.Stores = append(filer.Stores, &MysqlStore{})
 }
 
 type MysqlStore struct {
@@ -41,14 +41,14 @@ func (store *MysqlStore) Initialize(configuration util.Configuration, prefix str
 
 func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int,
 	interpolateParams bool) (err error) {
-
+	//
 	store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)"
 	store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?"
 	store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
 	store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
 	store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?"
-	store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?"
-	store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?"
+	store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? AND name like CONCAT(?,'%') ORDER BY NAME ASC LIMIT ?"
+	store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? AND name like CONCAT(?,'%') ORDER BY NAME ASC LIMIT ?"
 
 	sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
 	if interpolateParams {
diff --git a/weed/filer2/permission.go b/weed/filer/permission.go
similarity index 95%
rename from weed/filer2/permission.go
rename to weed/filer/permission.go
index 8a9508fbc..0d8b8292b 100644
--- a/weed/filer2/permission.go
+++ b/weed/filer/permission.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 func hasWritePermission(dir *Entry, entry *Entry) bool {
 
diff --git a/weed/filer2/postgres/README.txt b/weed/filer/postgres/README.txt
similarity index 100%
rename from weed/filer2/postgres/README.txt
rename to weed/filer/postgres/README.txt
diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer/postgres/postgres_store.go
similarity index 86%
rename from weed/filer2/postgres/postgres_store.go
rename to weed/filer/postgres/postgres_store.go
index 51c069aae..4544c8416 100644
--- a/weed/filer2/postgres/postgres_store.go
+++ b/weed/filer/postgres/postgres_store.go
@@ -4,8 +4,8 @@ import (
 	"database/sql"
 	"fmt"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
-	"github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
 	"github.com/chrislusf/seaweedfs/weed/util"
 	_ "github.com/lib/pq"
 )
@@ -15,7 +15,7 @@ const (
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &PostgresStore{})
+	filer.Stores = append(filer.Stores, &PostgresStore{})
 }
 
 type PostgresStore struct {
@@ -46,8 +46,8 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int
 	store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
 	store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
 	store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2"
-	store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
-	store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
+	store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like CONCAT($4,'%')ORDER BY NAME ASC LIMIT $5"
+	store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like CONCAT($4,'%') ORDER BY NAME ASC LIMIT $5"
 
 	sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode)
 	if password != "" {
diff --git a/weed/filer2/reader_at.go b/weed/filer/reader_at.go
similarity index 99%
rename from weed/filer2/reader_at.go
rename to weed/filer/reader_at.go
index 0cea83ff9..9f338782e 100644
--- a/weed/filer2/reader_at.go
+++ b/weed/filer/reader_at.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"context"
diff --git a/weed/filer2/reader_at_test.go b/weed/filer/reader_at_test.go
similarity index 99%
rename from weed/filer2/reader_at_test.go
rename to weed/filer/reader_at_test.go
index 7bfc9a972..d4a34cbfe 100644
--- a/weed/filer2/reader_at_test.go
+++ b/weed/filer/reader_at_test.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"fmt"
diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer/redis/redis_cluster_store.go
similarity index 90%
rename from weed/filer2/redis/redis_cluster_store.go
rename to weed/filer/redis/redis_cluster_store.go
index eaaecb740..8af94ee55 100644
--- a/weed/filer2/redis/redis_cluster_store.go
+++ b/weed/filer/redis/redis_cluster_store.go
@@ -1,13 +1,13 @@
 package redis
 
 import (
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/util"
 	"github.com/go-redis/redis"
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &RedisClusterStore{})
+	filer.Stores = append(filer.Stores, &RedisClusterStore{})
 }
 
 type RedisClusterStore struct {
diff --git a/weed/filer2/redis/redis_store.go b/weed/filer/redis/redis_store.go
similarity index 87%
rename from weed/filer2/redis/redis_store.go
rename to weed/filer/redis/redis_store.go
index 9debdb070..e152457ed 100644
--- a/weed/filer2/redis/redis_store.go
+++ b/weed/filer/redis/redis_store.go
@@ -1,13 +1,13 @@
 package redis
 
 import (
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/util"
 	"github.com/go-redis/redis"
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &RedisStore{})
+	filer.Stores = append(filer.Stores, &RedisStore{})
 }
 
 type RedisStore struct {
diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go
similarity index 89%
rename from weed/filer2/redis/universal_redis_store.go
rename to weed/filer/redis/universal_redis_store.go
index e5b9e8840..cc8819019 100644
--- a/weed/filer2/redis/universal_redis_store.go
+++ b/weed/filer/redis/universal_redis_store.go
@@ -9,7 +9,7 @@ import (
 
 	"github.com/go-redis/redis"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -33,7 +33,7 @@ func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error
 	return nil
 }
 
-func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	value, err := entry.EncodeAttributesAndChunks()
 	if err != nil {
@@ -57,12 +57,12 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2
 	return nil
 }
 
-func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	return store.InsertEntry(ctx, entry)
 }
 
-func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
 
 	data, err := store.Client.Get(string(fullpath)).Result()
 	if err == redis.Nil {
@@ -73,7 +73,7 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.F
 		return nil, fmt.Errorf("get %s : %v", fullpath, err)
 	}
 
-	entry = &filer2.Entry{
+	entry = &filer.Entry{
 		FullPath: fullpath,
 	}
 	err = entry.DecodeAttributesAndChunks([]byte(data))
@@ -121,8 +121,12 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full
 	return nil
 }
 
+func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+	return nil, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
 func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
-	limit int) (entries []*filer2.Entry, err error) {
+	limit int) (entries []*filer.Entry, err error) {
 
 	dirListKey := genDirectoryListKey(string(fullpath))
 	members, err := store.Client.SMembers(dirListKey).Result()
diff --git a/weed/filer/redis/universal_redis_store_kv.go b/weed/filer/redis/universal_redis_store_kv.go
new file mode 100644
index 000000000..0fc12c631
--- /dev/null
+++ b/weed/filer/redis/universal_redis_store_kv.go
@@ -0,0 +1,42 @@
+package redis
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/go-redis/redis"
+)
+
+func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+	_, err = store.Client.Set(string(key), value, 0).Result()
+
+	if err != nil {
+		return fmt.Errorf("kv put: %v", err)
+	}
+
+	return nil
+}
+
+func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+	data, err := store.Client.Get(string(key)).Result()
+
+	if err == redis.Nil {
+		return nil, filer.ErrKvNotFound
+	}
+
+	return []byte(data), err
+}
+
+func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+	_, err = store.Client.Del(string(key)).Result()
+
+	if err != nil {
+		return fmt.Errorf("kv delete: %v", err)
+	}
+
+	return nil
+}
diff --git a/weed/filer2/redis2/redis_cluster_store.go b/weed/filer/redis2/redis_cluster_store.go
similarity index 90%
rename from weed/filer2/redis2/redis_cluster_store.go
rename to weed/filer/redis2/redis_cluster_store.go
index b252eabab..d155dbe88 100644
--- a/weed/filer2/redis2/redis_cluster_store.go
+++ b/weed/filer/redis2/redis_cluster_store.go
@@ -1,13 +1,13 @@
 package redis2
 
 import (
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/util"
 	"github.com/go-redis/redis"
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &RedisCluster2Store{})
+	filer.Stores = append(filer.Stores, &RedisCluster2Store{})
 }
 
 type RedisCluster2Store struct {
diff --git a/weed/filer2/redis2/redis_store.go b/weed/filer/redis2/redis_store.go
similarity index 87%
rename from weed/filer2/redis2/redis_store.go
rename to weed/filer/redis2/redis_store.go
index 1e2a20043..ed04c817b 100644
--- a/weed/filer2/redis2/redis_store.go
+++ b/weed/filer/redis2/redis_store.go
@@ -1,13 +1,13 @@
 package redis2
 
 import (
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/util"
 	"github.com/go-redis/redis"
 )
 
 func init() {
-	filer2.Stores = append(filer2.Stores, &Redis2Store{})
+	filer.Stores = append(filer.Stores, &Redis2Store{})
 }
 
 type Redis2Store struct {
diff --git a/weed/filer2/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go
similarity index 88%
rename from weed/filer2/redis2/universal_redis_store.go
rename to weed/filer/redis2/universal_redis_store.go
index 420336b46..9e06ff68f 100644
--- a/weed/filer2/redis2/universal_redis_store.go
+++ b/weed/filer/redis2/universal_redis_store.go
@@ -7,7 +7,7 @@ import (
 
 	"github.com/go-redis/redis"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -31,7 +31,7 @@ func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) erro
 	return nil
 }
 
-func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	value, err := entry.EncodeAttributesAndChunks()
 	if err != nil {
@@ -52,12 +52,12 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer
 	return nil
 }
 
-func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
 
 	return store.InsertEntry(ctx, entry)
 }
 
-func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
 
 	data, err := store.Client.Get(string(fullpath)).Result()
 	if err == redis.Nil {
@@ -68,7 +68,7 @@ func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.
 		return nil, fmt.Errorf("get %s : %v", fullpath, err)
 	}
 
-	entry = &filer2.Entry{
+	entry = &filer.Entry{
 		FullPath: fullpath,
 	}
 	err = entry.DecodeAttributesAndChunks([]byte(data))
@@ -116,8 +116,12 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
 	return nil
 }
 
+func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+	return nil, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
 func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
-	limit int) (entries []*filer2.Entry, err error) {
+	limit int) (entries []*filer.Entry, err error) {
 
 	dirListKey := genDirectoryListKey(string(fullpath))
 	start := int64(0)
diff --git a/weed/filer/redis2/universal_redis_store_kv.go b/weed/filer/redis2/universal_redis_store_kv.go
new file mode 100644
index 000000000..658491ddf
--- /dev/null
+++ b/weed/filer/redis2/universal_redis_store_kv.go
@@ -0,0 +1,42 @@
+package redis2
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/go-redis/redis"
+)
+
+func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+	_, err = store.Client.Set(string(key), value, 0).Result()
+
+	if err != nil {
+		return fmt.Errorf("kv put: %v", err)
+	}
+
+	return nil
+}
+
+func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+	data, err := store.Client.Get(string(key)).Result()
+
+	if err == redis.Nil {
+		return nil, filer.ErrKvNotFound
+	}
+
+	return []byte(data), err
+}
+
+func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (err error) {
+
+	_, err = store.Client.Del(string(key)).Result()
+
+	if err != nil {
+		return fmt.Errorf("kv delete: %v", err)
+	}
+
+	return nil
+}
diff --git a/weed/filer2/stream.go b/weed/filer/stream.go
similarity index 99%
rename from weed/filer2/stream.go
rename to weed/filer/stream.go
index fee9d45da..416359ebf 100644
--- a/weed/filer2/stream.go
+++ b/weed/filer/stream.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 import (
 	"bytes"
diff --git a/weed/filer2/topics.go b/weed/filer/topics.go
similarity index 84%
rename from weed/filer2/topics.go
rename to weed/filer/topics.go
index 9c6e5c88d..3a2fde8c4 100644
--- a/weed/filer2/topics.go
+++ b/weed/filer/topics.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
 
 const (
 	TopicsDir    = "/topics"
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index f85b90a5d..59c4b7965 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -11,7 +11,7 @@ import (
 	"github.com/seaweedfs/fuse"
 	"github.com/seaweedfs/fuse/fs"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -156,7 +156,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
 			return fuse.EIO
 		}
 
-		dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+		dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
 
 		return nil
 	}); err != nil {
@@ -205,7 +205,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
 			return err
 		}
 
-		dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+		dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
 
 		return nil
 	})
@@ -471,7 +471,7 @@ func (dir *Dir) saveEntry() error {
 			return fuse.EIO
 		}
 
-		dir.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+		dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
 
 		return nil
 	})
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index d813dd96a..71aa193f1 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -6,7 +6,7 @@ import (
 	"syscall"
 	"time"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/seaweedfs/fuse"
@@ -43,7 +43,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
 			return fuse.EIO
 		}
 
-		dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+		dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
 
 		return nil
 	})
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index d2117bfbb..abc2935c5 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -10,7 +10,7 @@ import (
 	"github.com/seaweedfs/fuse"
 	"github.com/seaweedfs/fuse/fs"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -33,7 +33,7 @@ type File struct {
 	dir            *Dir
 	wfs            *WFS
 	entry          *filer_pb.Entry
-	entryViewCache []filer2.VisibleInterval
+	entryViewCache []filer.VisibleInterval
 	isOpen         int
 	reader         io.ReaderAt
 	dirtyMetadata  bool
@@ -56,7 +56,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
 	attr.Inode = file.fullpath().AsInode()
 	attr.Valid = time.Second
 	attr.Mode = os.FileMode(file.entry.Attributes.FileMode)
-	attr.Size = filer2.FileSize(file.entry)
+	attr.Size = filer.FileSize(file.entry)
 	if file.isOpen > 0 {
 		attr.Size = file.entry.Attributes.FileSize
 		glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
@@ -118,7 +118,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
 	if req.Valid.Size() {
 
 		glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
-		if req.Size < filer2.FileSize(file.entry) {
+		if req.Size < filer.FileSize(file.entry) {
 			// fmt.Printf("truncate %v \n", fullPath)
 			var chunks []*filer_pb.FileChunk
 			var truncatedChunks []*filer_pb.FileChunk
@@ -273,7 +273,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
 	})
 
 	for _, chunk := range chunks {
-		file.entryViewCache = filer2.MergeIntoVisibles(file.entryViewCache, chunk)
+		file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk)
 	}
 
 	file.reader = nil
@@ -285,7 +285,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
 
 func (file *File) setEntry(entry *filer_pb.Entry) {
 	file.entry = entry
-	file.entryViewCache, _ = filer2.NonOverlappingVisibleIntervals(filer2.LookupFn(file.wfs), file.entry.Chunks)
+	file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), file.entry.Chunks)
 	file.reader = nil
 }
 
@@ -305,7 +305,7 @@ func (file *File) saveEntry() error {
 			return fuse.EIO
 		}
 
-		file.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+		file.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
 
 		return nil
 	})
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index a1f18df6f..195d8ae8d 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -13,7 +13,7 @@ import (
 	"github.com/seaweedfs/fuse"
 	"github.com/seaweedfs/fuse/fs"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 )
@@ -41,7 +41,7 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle {
 		Gid:        gid,
 	}
 	if fh.f.entry != nil {
-		fh.f.entry.Attributes.FileSize = filer2.FileSize(fh.f.entry)
+		fh.f.entry.Attributes.FileSize = filer.FileSize(fh.f.entry)
 	}
 
 	return fh
@@ -99,7 +99,7 @@ func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxSto
 
 func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
 
-	fileSize := int64(filer2.FileSize(fh.f.entry))
+	fileSize := int64(filer.FileSize(fh.f.entry))
 
 	if fileSize == 0 {
 		glog.V(1).Infof("empty fh %v", fh.f.fullpath())
@@ -108,7 +108,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
 
 	var chunkResolveErr error
 	if fh.f.entryViewCache == nil {
-		fh.f.entryViewCache, chunkResolveErr = filer2.NonOverlappingVisibleIntervals(filer2.LookupFn(fh.f.wfs), fh.f.entry.Chunks)
+		fh.f.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(filer.LookupFn(fh.f.wfs), fh.f.entry.Chunks)
 		if chunkResolveErr != nil {
 			return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
 		}
@@ -116,8 +116,8 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
 	}
 
 	if fh.f.reader == nil {
-		chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
-		fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache, fileSize)
+		chunkViews := filer.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
+		fh.f.reader = filer.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache, fileSize)
 	}
 
 	totalRead, err := fh.f.reader.ReadAt(buff, offset)
@@ -254,10 +254,10 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
 			glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
 		}
 
-		manifestChunks, nonManifestChunks := filer2.SeparateManifestChunks(fh.f.entry.Chunks)
+		manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks)
 
-		chunks, _ := filer2.CompactFileChunks(filer2.LookupFn(fh.f.wfs), nonManifestChunks)
-		chunks, manifestErr := filer2.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.dir.FullPath()), chunks)
+		chunks, _ := filer.CompactFileChunks(filer.LookupFn(fh.f.wfs), nonManifestChunks)
+		chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.dir.FullPath()), chunks)
 		if manifestErr != nil {
 			// not good, but should be ok
 			glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
@@ -270,7 +270,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
 			return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
 		}
 
-		fh.f.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+		fh.f.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
 
 		return nil
 	})
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
index c0bb75f4a..f714fde09 100644
--- a/weed/filesys/meta_cache/meta_cache.go
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -5,8 +5,8 @@ import (
 	"os"
 	"sync"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
-	"github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/chrislusf/seaweedfs/weed/filer/leveldb"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -17,7 +17,7 @@ import (
 // e.g. fill fileId field for chunks
 
 type MetaCache struct {
-	actualStore filer2.FilerStore
+	actualStore filer.FilerStore
 	sync.RWMutex
 	visitedBoundary *bounded_tree.BoundedTree
 }
@@ -29,7 +29,7 @@ func NewMetaCache(dbFolder string) *MetaCache {
 	}
 }
 
-func openMetaStore(dbFolder string) filer2.FilerStore {
+func openMetaStore(dbFolder string) filer.FilerStore {
 
 	os.RemoveAll(dbFolder)
 	os.MkdirAll(dbFolder, 0755)
@@ -47,18 +47,18 @@ func openMetaStore(dbFolder string) filer2.FilerStore {
 
 }
 
-func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer2.Entry) error {
+func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error {
 	mc.Lock()
 	defer mc.Unlock()
 	return mc.doInsertEntry(ctx, entry)
 }
 
-func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer2.Entry) error {
+func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) error {
 	filer_pb.BeforeEntrySerialization(entry.Chunks)
 	return mc.actualStore.InsertEntry(ctx, entry)
 }
 
-func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPath, newEntry *filer2.Entry) error {
+func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error {
 	mc.Lock()
 	defer mc.Unlock()
 
@@ -89,14 +89,14 @@ func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPat
 	return nil
 }
 
-func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer2.Entry) error {
+func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error {
 	mc.Lock()
 	defer mc.Unlock()
 	filer_pb.BeforeEntrySerialization(entry.Chunks)
 	return mc.actualStore.UpdateEntry(ctx, entry)
 }
 
-func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer2.Entry, err error) {
+func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) {
 	mc.RLock()
 	defer mc.RUnlock()
 	entry, err = mc.actualStore.FindEntry(ctx, fp)
@@ -113,7 +113,7 @@ func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err err
 	return mc.actualStore.DeleteEntry(ctx, fp)
 }
 
-func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer2.Entry, error) {
+func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer.Entry, error) {
 	mc.RLock()
 	defer mc.RUnlock()
 
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
index 05983ec7d..455a8772c 100644
--- a/weed/filesys/meta_cache/meta_cache_init.go
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -4,7 +4,7 @@ import (
 	"context"
 	"fmt"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -17,7 +17,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
 		glog.V(4).Infof("ReadDirAllEntries %s ...", path)
 
 		err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
-			entry := filer2.FromPbEntry(string(dirPath), pbEntry)
+			entry := filer.FromPbEntry(string(dirPath), pbEntry)
 			if err := mc.doInsertEntry(context.Background(), entry); err != nil {
 				glog.V(0).Infof("read %s: %v", entry.FullPath, err)
 				return err
diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go
index 3c0a9c2ac..bd98666ed 100644
--- a/weed/filesys/meta_cache/meta_cache_subscribe.go
+++ b/weed/filesys/meta_cache/meta_cache_subscribe.go
@@ -6,7 +6,7 @@ import (
 	"io"
 	"time"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -24,7 +24,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
 		}
 
 		var oldPath util.FullPath
-		var newEntry *filer2.Entry
+		var newEntry *filer.Entry
 		if message.OldEntry != nil {
 			oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
 			glog.V(4).Infof("deleting %v", oldPath)
@@ -37,7 +37,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
 			}
 			key := util.NewFullPath(dir, message.NewEntry.Name)
 			glog.V(4).Infof("creating %v", key)
-			newEntry = filer2.FromPbEntry(dir, message.NewEntry)
+			newEntry = filer.FromPbEntry(dir, message.NewEntry)
 		}
 		return mc.AtomicUpdateEntry(context.Background(), oldPath, newEntry)
 	}
diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go
index 87a4e907f..9791c8630 100644
--- a/weed/filesys/wfs_deletion.go
+++ b/weed/filesys/wfs_deletion.go
@@ -5,7 +5,7 @@ import (
 
 	"google.golang.org/grpc"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/operation"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -22,7 +22,7 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
 			fileIds = append(fileIds, chunk.GetFileIdString())
 			continue
 		}
-		dataChunks, manifestResolveErr := filer2.ResolveOneChunkManifest(filer2.LookupFn(wfs), chunk)
+		dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk)
 		if manifestResolveErr != nil {
 			glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
 		}
@@ -42,7 +42,7 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se
 
 	var vids []string
 	for _, fileId := range fileIds {
-		vids = append(vids, filer2.VolumeId(fileId))
+		vids = append(vids, filer.VolumeId(fileId))
 	}
 
 	lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go
index 786d0b42a..fec33e4ab 100644
--- a/weed/filesys/wfs_write.go
+++ b/weed/filesys/wfs_write.go
@@ -5,14 +5,14 @@ import (
 	"fmt"
 	"io"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/operation"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/security"
 )
 
-func (wfs *WFS) saveDataAsChunk(dir string) filer2.SaveDataAsChunkFunctionType {
+func (wfs *WFS) saveDataAsChunk(dir string) filer.SaveDataAsChunkFunctionType {
 
 	return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) {
 		var fileId, host string
diff --git a/weed/messaging/broker/broker_grpc_server.go b/weed/messaging/broker/broker_grpc_server.go
index 1950326ec..8e207b1cc 100644
--- a/weed/messaging/broker/broker_grpc_server.go
+++ b/weed/messaging/broker/broker_grpc_server.go
@@ -4,7 +4,7 @@ import (
 	"context"
 	"fmt"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
 )
@@ -29,9 +29,9 @@ func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *m
 }
 
 func genTopicDir(namespace, topic string) string {
-	return fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, namespace, topic)
+	return fmt.Sprintf("%s/%s/%s", filer.TopicsDir, namespace, topic)
 }
 
 func genTopicDirEntry(namespace, topic string) (dir, entry string) {
-	return fmt.Sprintf("%s/%s", filer2.TopicsDir, namespace), topic
+	return fmt.Sprintf("%s/%s", filer.TopicsDir, namespace), topic
 }
diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go
index 154bf8a44..6e6b723d1 100644
--- a/weed/messaging/broker/broker_grpc_server_publish.go
+++ b/weed/messaging/broker/broker_grpc_server_publish.go
@@ -7,7 +7,7 @@ import (
 
 	"github.com/golang/protobuf/proto"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
@@ -49,7 +49,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
 		Partition: in.Init.Partition,
 	}
 
-	tpDir := fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, tp.Namespace, tp.Topic)
+	tpDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, tp.Namespace, tp.Topic)
 	md5File := fmt.Sprintf("p%02d.md5", tp.Partition)
 	// println("chan data stored under", tpDir, "as", md5File)
 
diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go
index 8cc5a928c..4a89937c1 100644
--- a/weed/messaging/broker/broker_grpc_server_subscribe.go
+++ b/weed/messaging/broker/broker_grpc_server_subscribe.go
@@ -8,7 +8,7 @@ import (
 
 	"github.com/golang/protobuf/proto"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
@@ -147,9 +147,9 @@ func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTim
 				return nil
 			}
 			// println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name)
-			chunkedFileReader := filer2.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
+			chunkedFileReader := filer.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
 			defer chunkedFileReader.Close()
-			if _, err := filer2.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
+			if _, err := filer.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
 				chunkedFileReader.Close()
 				if err == io.EOF {
 					return err
diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go
index 93815f8f4..edddca813 100644
--- a/weed/messaging/broker/topic_manager.go
+++ b/weed/messaging/broker/topic_manager.go
@@ -5,7 +5,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
 	"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
@@ -59,7 +59,7 @@ func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topi
 		startTime, stopTime = startTime.UTC(), stopTime.UTC()
 		targetFile := fmt.Sprintf(
 			"%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d",
-			filer2.TopicsDir, tp.Namespace, tp.Topic,
+			filer.TopicsDir, tp.Namespace, tp.Topic,
 			startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
 			tp.Partition,
 		)
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go
index 6419509be..64c3d46ea 100644
--- a/weed/replication/sink/azuresink/azure_sink.go
+++ b/weed/replication/sink/azuresink/azure_sink.go
@@ -8,7 +8,7 @@ import (
 	"strings"
 
 	"github.com/Azure/azure-storage-blob-go/azblob"
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/replication/sink"
@@ -95,8 +95,8 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
 		return nil
 	}
 
-	totalSize := filer2.FileSize(entry)
-	chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+	totalSize := filer.FileSize(entry)
+	chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
 
 	// Create a URL that references a to-be-created blob in your
 	// Azure Storage account's container.
diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go
index 041cee952..d0b3e7a34 100644
--- a/weed/replication/sink/b2sink/b2_sink.go
+++ b/weed/replication/sink/b2sink/b2_sink.go
@@ -4,7 +4,7 @@ import (
 	"context"
 	"strings"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/replication/sink"
 	"github.com/chrislusf/seaweedfs/weed/replication/source"
@@ -84,8 +84,8 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
 		return nil
 	}
 
-	totalSize := filer2.FileSize(entry)
-	chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+	totalSize := filer.FileSize(entry)
+	chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
 
 	bucket, err := g.client.Bucket(context.Background(), g.bucket)
 	if err != nil {
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index b90a642c9..7ba1670e0 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -8,7 +8,7 @@ import (
 
 	"github.com/chrislusf/seaweedfs/weed/security"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/replication/sink"
@@ -92,7 +92,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
 		}
 		glog.V(1).Infof("lookup: %v", lookupRequest)
 		if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
-			if filer2.ETag(resp.Entry) == filer2.ETag(entry) {
+			if filer.ETag(resp.Entry) == filer.ETag(entry) {
 				glog.V(0).Infof("already replicated %s", key)
 				return nil
 			}
@@ -164,13 +164,13 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
 		// skip if already changed
 		// this usually happens when the messages are not ordered
 		glog.V(0).Infof("late updates %s", key)
-	} else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) {
+	} else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
 		// skip if no change
 		// this usually happens when retrying the replication
 		glog.V(0).Infof("already replicated %s", key)
 	} else {
 		// find out what changed
-		deletedChunks, newChunks, err := compareChunks(filer2.LookupFn(fs), oldEntry, newEntry)
+		deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
 		if err != nil {
 			return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
 		}
@@ -178,7 +178,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
 		// delete the chunks that are deleted from the source
 		if deleteIncludeChunks {
 			// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
-			existingEntry.Chunks = filer2.DoMinusChunks(existingEntry.Chunks, deletedChunks)
+			existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks)
 		}
 
 		// replicate the chunks that are new in the source
@@ -207,21 +207,21 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
 	})
 
 }
-func compareChunks(lookupFileIdFn filer2.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
-	aData, aMeta, aErr := filer2.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
+func compareChunks(lookupFileIdFn filer.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
+	aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
 	if aErr != nil {
 		return nil, nil, aErr
 	}
-	bData, bMeta, bErr := filer2.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
+	bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
 	if bErr != nil {
 		return nil, nil, bErr
 	}
 
-	deletedChunks = append(deletedChunks, filer2.DoMinusChunks(aData, bData)...)
-	deletedChunks = append(deletedChunks, filer2.DoMinusChunks(aMeta, bMeta)...)
+	deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
+	deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
 
-	newChunks = append(newChunks, filer2.DoMinusChunks(bData, aData)...)
-	newChunks = append(newChunks, filer2.DoMinusChunks(bMeta, aMeta)...)
+	newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
+	newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
 
 	return
 }
diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go
index 82f4d72cf..2e09a87f9 100644
--- a/weed/replication/sink/gcssink/gcs_sink.go
+++ b/weed/replication/sink/gcssink/gcs_sink.go
@@ -8,7 +8,7 @@ import (
 	"cloud.google.com/go/storage"
 	"google.golang.org/api/option"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/replication/sink"
@@ -89,8 +89,8 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
 		return nil
 	}
 
-	totalSize := filer2.FileSize(entry)
-	chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+	totalSize := filer.FileSize(entry)
+	chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
 
 	wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
 
diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go
index 56fc1930d..4e7df8ff2 100644
--- a/weed/replication/sink/s3sink/s3_sink.go
+++ b/weed/replication/sink/s3sink/s3_sink.go
@@ -12,7 +12,7 @@ import (
 	"github.com/aws/aws-sdk-go/service/s3"
 	"github.com/aws/aws-sdk-go/service/s3/s3iface"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/replication/sink"
@@ -107,8 +107,8 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
 		return err
 	}
 
-	totalSize := filer2.FileSize(entry)
-	chunkViews := filer2.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+	totalSize := filer.FileSize(entry)
+	chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
 
 	parts := make([]*s3.CompletedPart, len(chunkViews))
 
@@ -116,7 +116,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
 	for chunkIndex, chunk := range chunkViews {
 		partId := chunkIndex + 1
 		wg.Add(1)
-		go func(chunk *filer2.ChunkView, index int) {
+		go func(chunk *filer.ChunkView, index int) {
 			defer wg.Done()
 			if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
 				err = uploadErr
diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go
index c5c65ed5c..8a8e7a92b 100644
--- a/weed/replication/sink/s3sink/s3_write.go
+++ b/weed/replication/sink/s3sink/s3_write.go
@@ -9,7 +9,7 @@ import (
 	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/awserr"
 	"github.com/aws/aws-sdk-go/service/s3"
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -103,7 +103,7 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
 }
 
 // To upload a part
-func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
+func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.ChunkView) (*s3.CompletedPart, error) {
 	var readSeeker io.ReadSeeker
 
 	readSeeker, err := s3sink.buildReadSeeker(chunk)
@@ -156,7 +156,7 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
 	return err
 }
 
-func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) {
+func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) {
 	fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
 	if err != nil {
 		return nil, err
diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go
index a67a86454..4eb9bf32c 100644
--- a/weed/s3api/filer_multipart.go
+++ b/weed/s3api/filer_multipart.go
@@ -12,7 +12,7 @@ import (
 	"github.com/aws/aws-sdk-go/service/s3"
 	"github.com/google/uuid"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 )
@@ -108,7 +108,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
 		CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
 			Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)),
 			Bucket:   input.Bucket,
-			ETag:     aws.String("\"" + filer2.ETagChunks(finalParts) + "\""),
+			ETag:     aws.String("\"" + filer.ETagChunks(finalParts) + "\""),
 			Key:      objectKey(input.Key),
 		},
 	}
@@ -208,8 +208,8 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
 			output.Parts = append(output.Parts, &s3.Part{
 				PartNumber:   aws.Int64(int64(partNumber)),
 				LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()),
-				Size:         aws.Int64(int64(filer2.FileSize(entry))),
-				ETag:         aws.String("\"" + filer2.ETag(entry) + "\""),
+				Size:         aws.Int64(int64(filer.FileSize(entry))),
+				ETag:         aws.String("\"" + filer.ETag(entry) + "\""),
 			})
 		}
 	}
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
index 254a99275..b6779dfb7 100644
--- a/weed/s3api/s3api_objects_list_handlers.go
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -12,7 +12,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 )
 
@@ -139,8 +139,8 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
 				contents = append(contents, ListEntry{
 					Key:          fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
 					LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
-					ETag:         "\"" + filer2.ETag(entry) + "\"",
-					Size:         int64(filer2.FileSize(entry)),
+					ETag:         "\"" + filer.ETag(entry) + "\"",
+					Size:         int64(filer.FileSize(entry)),
 					Owner: CanonicalUser{
 						ID:          fmt.Sprintf("%x", entry.Attributes.Uid),
 						DisplayName: entry.Attributes.UserName,
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index 405742e1e..d3ced0a53 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -6,10 +6,9 @@ import (
 	"os"
 	"path/filepath"
 	"strconv"
-	"strings"
 	"time"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/operation"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -35,7 +34,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
 		Entry: &filer_pb.Entry{
 			Name:        req.Name,
 			IsDirectory: entry.IsDirectory(),
-			Attributes:  filer2.EntryAttributeToPb(entry),
+			Attributes:  filer.EntryAttributeToPb(entry),
 			Chunks:      entry.Chunks,
 			Extended:    entry.Extended,
 		},
@@ -51,7 +50,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
 		limit = fs.option.DirListingLimit
 	}
 
-	paginationLimit := filer2.PaginationSize
+	paginationLimit := filer.PaginationSize
 	if limit < paginationLimit {
 		paginationLimit = limit
 	}
@@ -59,7 +58,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
 	lastFileName := req.StartFromFileName
 	includeLastFile := req.InclusiveStartFrom
 	for limit > 0 {
-		entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit)
+		entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit, req.Prefix)
 
 		if err != nil {
 			return err
@@ -74,18 +73,12 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
 
 			lastFileName = entry.Name()
 
-			if req.Prefix != "" {
-				if !strings.HasPrefix(entry.Name(), req.Prefix) {
-					continue
-				}
-			}
-
 			if err := stream.Send(&filer_pb.ListEntriesResponse{
 				Entry: &filer_pb.Entry{
 					Name:        entry.Name(),
 					IsDirectory: entry.IsDirectory(),
 					Chunks:      entry.Chunks,
-					Attributes:  filer2.EntryAttributeToPb(entry),
+					Attributes:  filer.EntryAttributeToPb(entry),
 					Extended:    entry.Extended,
 				},
 			}); err != nil {
@@ -167,9 +160,9 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
 		return
 	}
 
-	createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{
+	createErr := fs.filer.CreateEntry(ctx, &filer.Entry{
 		FullPath: util.JoinPath(req.Directory, req.Entry.Name),
-		Attr:     filer2.PbToEntryAttribute(req.Entry.Attributes),
+		Attr:     filer.PbToEntryAttribute(req.Entry.Attributes),
 		Chunks:   chunks,
 	}, req.OExcl, req.IsFromOtherCluster, req.Signatures)
 
@@ -198,7 +191,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
 		return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2)
 	}
 
-	newEntry := &filer2.Entry{
+	newEntry := &filer.Entry{
 		FullPath: util.JoinPath(req.Directory, req.Entry.Name),
 		Attr:     entry.Attr,
 		Extended: req.Entry.Extended,
@@ -225,7 +218,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
 
 	}
 
-	if filer2.EqualEntry(entry, newEntry) {
+	if filer.EqualEntry(entry, newEntry) {
 		return &filer_pb.UpdateEntryResponse{}, err
 	}
 
@@ -240,23 +233,23 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
 	return &filer_pb.UpdateEntryResponse{}, err
 }
 
-func (fs *FilerServer) cleanupChunks(existingEntry *filer2.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) {
+func (fs *FilerServer) cleanupChunks(existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) {
 
 	// remove old chunks if not included in the new ones
 	if existingEntry != nil {
-		garbage, err = filer2.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks)
+		garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks)
 		if err != nil {
 			return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err)
 		}
 	}
 
 	// files with manifest chunks are usually large and append only, skip calculating covered chunks
-	manifestChunks, nonManifestChunks := filer2.SeparateManifestChunks(newEntry.Chunks)
+	manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.Chunks)
 
-	chunks, coveredChunks := filer2.CompactFileChunks(fs.lookupFileId, nonManifestChunks)
+	chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks)
 	garbage = append(garbage, coveredChunks...)
 
-	chunks, err = filer2.MaybeManifestize(fs.saveAsChunk(
+	chunks, err = filer.MaybeManifestize(fs.saveAsChunk(
 		newEntry.Attributes.Replication,
 		newEntry.Attributes.Collection,
 		"",
@@ -280,9 +273,9 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
 	var offset int64 = 0
 	entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
 	if err == filer_pb.ErrNotFound {
-		entry = &filer2.Entry{
+		entry = &filer.Entry{
 			FullPath: fullpath,
-			Attr: filer2.Attr{
+			Attr: filer.Attr{
 				Crtime: time.Now(),
 				Mtime:  time.Now(),
 				Mode:   os.FileMode(0644),
@@ -291,7 +284,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
 			},
 		}
 	} else {
-		offset = int64(filer2.TotalSize(entry.Chunks))
+		offset = int64(filer.TotalSize(entry.Chunks))
 	}
 
 	for _, chunk := range req.Chunks {
@@ -301,7 +294,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
 
 	entry.Chunks = append(entry.Chunks, req.Chunks...)
 
-	entry.Chunks, err = filer2.MaybeManifestize(fs.saveAsChunk(
+	entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(
 		entry.Replication,
 		entry.Collection,
 		"",
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index cbb71682c..35df01665 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"path/filepath"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -43,7 +43,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
 	return &filer_pb.AtomicRenameEntryResponse{}, nil
 }
 
-func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
+func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
 
 	if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error {
 		if entry.IsDirectory() {
@@ -59,7 +59,7 @@ func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, e
 	return nil
 }
 
-func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
+func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
 
 	currentDirPath := oldParent.Child(entry.Name())
 	newDirPath := newParent.Child(newName)
@@ -70,7 +70,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
 	includeLastFile := false
 	for {
 
-		entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024)
+		entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "")
 		if err != nil {
 			return err
 		}
@@ -92,7 +92,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
 	return nil
 }
 
-func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents,
+func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents,
 	moveFolderSubEntries func() error) error {
 
 	oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName)
@@ -105,7 +105,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
 	}
 
 	// add to new directory
-	newEntry := &filer2.Entry{
+	newEntry := &filer.Entry{
 		FullPath: newPath,
 		Attr:     entry.Attr,
 		Chunks:   entry.Chunks,
@@ -136,6 +136,6 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
 }
 
 type MoveEvents struct {
-	oldEntries []*filer2.Entry
-	newEntries []*filer2.Entry
+	oldEntries []*filer.Entry
+	newEntries []*filer.Entry
 }
diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go
index 2ad12e9c8..9ba45edfe 100644
--- a/weed/server/filer_grpc_server_sub_meta.go
+++ b/weed/server/filer_grpc_server_sub_meta.go
@@ -7,7 +7,7 @@ import (
 
 	"github.com/golang/protobuf/proto"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
@@ -63,7 +63,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
 
 	eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
 
-	if _, ok := fs.filer.Store.ActualStore.(filer2.FilerLocalStore); ok {
+	if _, ok := fs.filer.Store.ActualStore.(filer.FilerLocalStore); ok {
 		// println("reading from persisted logs ...")
 		processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
 		if err != nil {
@@ -124,7 +124,7 @@ func eachEventNotificationFn(req *filer_pb.SubscribeMetadataRequest, stream file
 		fullpath := util.Join(dirPath, entryName)
 
 		// skip on filer internal meta logs
-		if strings.HasPrefix(fullpath, filer2.SystemLogDir) {
+		if strings.HasPrefix(fullpath, filer.SystemLogDir) {
 			return nil
 		}
 
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 6995c7cfe..160ea5a6d 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -18,16 +18,16 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/stats"
 	"github.com/chrislusf/seaweedfs/weed/util"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/etcd"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/mongodb"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
-	_ "github.com/chrislusf/seaweedfs/weed/filer2/redis2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/notification"
 	_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"
@@ -58,7 +58,7 @@ type FilerOption struct {
 type FilerServer struct {
 	option         *FilerOption
 	secret         security.SigningKey
-	filer          *filer2.Filer
+	filer          *filer.Filer
 	grpcDialOption grpc.DialOption
 
 	// notifying clients
@@ -82,7 +82,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
 		glog.Fatal("master list is required!")
 	}
 
-	fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, func() {
+	fs.filer = filer.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, func() {
 		fs.listenersCond.Broadcast()
 	})
 	fs.filer.Cipher = option.Cipher
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index 449b9f1a0..fbd45d6b9 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -11,7 +11,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/images"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -94,7 +94,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
 	}
 
 	// set etag
-	etag := filer2.ETagEntry(entry)
+	etag := filer.ETagEntry(entry)
 	if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" {
 		w.WriteHeader(http.StatusNotModified)
 		return
@@ -115,7 +115,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
 		ext := filepath.Ext(filename)
 		width, height, mode, shouldResize := shouldResizeImages(ext, r)
 		if shouldResize {
-			data, err := filer2.ReadAll(fs.filer.MasterClient, entry.Chunks)
+			data, err := filer.ReadAll(fs.filer.MasterClient, entry.Chunks)
 			if err != nil {
 				glog.Errorf("failed to read %s: %v", path, err)
 				w.WriteHeader(http.StatusNotModified)
@@ -128,7 +128,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
 	}
 
 	processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
-		return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
+		return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
 	})
 
 }
diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go
index ae28fc1db..9ca0209f4 100644
--- a/weed/server/filer_server_handlers_read_dir.go
+++ b/weed/server/filer_server_handlers_read_dir.go
@@ -32,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
 
 	lastFileName := r.FormValue("lastFileName")
 
-	entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit)
+	entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit, "")
 
 	if err != nil {
 		glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index 1d037f85f..0f6176356 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -13,7 +13,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/operation"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -86,7 +86,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
 		return nil, nil, err
 	}
 
-	fileChunks, replyerr = filer2.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, ttlString, fsync), fileChunks)
+	fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, ttlString, fsync), fileChunks)
 	if replyerr != nil {
 		glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
 		return
@@ -108,7 +108,7 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
 		return nil, nil, err
 	}
 
-	fileChunks, replyerr = filer2.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, ttlString, fsync), fileChunks)
+	fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, ttlString, fsync), fileChunks)
 	if replyerr != nil {
 		glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
 		return
@@ -149,9 +149,9 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
 	}
 
 	glog.V(4).Infoln("saving", path)
-	entry := &filer2.Entry{
+	entry := &filer.Entry{
 		FullPath: util.FullPath(path),
-		Attr: filer2.Attr{
+		Attr: filer.Attr{
 			Mtime:       time.Now(),
 			Crtime:      crTime,
 			Mode:        os.FileMode(mode),
@@ -236,7 +236,7 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht
 	return uploadResult, err
 }
 
-func (fs *FilerServer) saveAsChunk(replication string, collection string, dataCenter string, ttlString string, fsync bool) filer2.SaveDataAsChunkFunctionType {
+func (fs *FilerServer) saveAsChunk(replication string, collection string, dataCenter string, ttlString string, fsync bool) filer.SaveDataAsChunkFunctionType {
 
 	return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) {
 		// assign one file id for one chunk
diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go
index 670399425..60082a8d4 100644
--- a/weed/server/filer_server_handlers_write_cipher.go
+++ b/weed/server/filer_server_handlers_write_cipher.go
@@ -7,7 +7,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/operation"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -58,9 +58,9 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
 		}
 	}
 
-	entry := &filer2.Entry{
+	entry := &filer.Entry{
 		FullPath: util.FullPath(path),
-		Attr: filer2.Attr{
+		Attr: filer.Attr{
 			Mtime:       time.Now(),
 			Crtime:      time.Now(),
 			Mode:        0660,
diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go
index d310a27d4..108892f92 100644
--- a/weed/server/master_grpc_server.go
+++ b/weed/server/master_grpc_server.go
@@ -136,7 +136,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
 		}
 
 		if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards {
-			glog.V(1).Infof("master recieved ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
+			glog.V(1).Infof("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
 			newShards, deletedShards := ms.Topo.SyncDataNodeEcShards(heartbeat.EcShards, dn)
 
 			// broadcast the ec vid changes to master clients
diff --git a/weed/server/master_server.go b/weed/server/master_server.go
index ae59636ad..657b170c2 100644
--- a/weed/server/master_server.go
+++ b/weed/server/master_server.go
@@ -209,7 +209,7 @@ func (ms *MasterServer) startAdminScripts() {
 		scriptLines = append(scriptLines, "unlock")
 	}
 
-	masterAddress := fmt.Sprintf("%s:%d",ms.option.Host, ms.option.Port)
+	masterAddress := fmt.Sprintf("%s:%d", ms.option.Host, ms.option.Port)
 
 	var shellOptions shell.ShellOptions
 	shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master")
diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go
index 5c7d5572c..cd2b53c8a 100644
--- a/weed/server/volume_grpc_copy.go
+++ b/weed/server/volume_grpc_copy.go
@@ -27,17 +27,12 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
 
 		glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId)
 
-		err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId))
-		if err != nil {
-			return nil, fmt.Errorf("failed to mount existing volume %d: %v", req.VolumeId, err)
-		}
-
-		err = vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
+		err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
 		if err != nil {
 			return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err)
 		}
 
-		glog.V(0).Infof("deleted exisitng volume %d before copying.", req.VolumeId)
+		glog.V(0).Infof("deleted existing volume %d before copying.", req.VolumeId)
 	}
 
 	location := vs.store.FindFreeLocation()
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index f06189e34..57723ab0b 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -19,7 +19,7 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/util"
 	"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/security"
 )
@@ -41,7 +41,7 @@ type WebDavOption struct {
 type WebDavServer struct {
 	option         *WebDavOption
 	secret         security.SigningKey
-	filer          *filer2.Filer
+	filer          *filer.Filer
 	grpcDialOption grpc.DialOption
 	Handler        *webdav.Handler
 }
@@ -67,7 +67,7 @@ func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) {
 type WebDavFileSystem struct {
 	option         *WebDavOption
 	secret         security.SigningKey
-	filer          *filer2.Filer
+	filer          *filer.Filer
 	grpcDialOption grpc.DialOption
 	chunkCache     *chunk_cache.TieredChunkCache
 	signature      int32
@@ -94,7 +94,7 @@ type WebDavFile struct {
 	isDirectory    bool
 	off            int64
 	entry          *filer_pb.Entry
-	entryViewCache []filer2.VisibleInterval
+	entryViewCache []filer.VisibleInterval
 	reader         io.ReaderAt
 }
 
@@ -338,7 +338,7 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F
 	if err != nil {
 		return nil, err
 	}
-	fi.size = int64(filer2.FileSize(entry))
+	fi.size = int64(filer.FileSize(entry))
 	fi.name = string(fullpath)
 	fi.mode = os.FileMode(entry.Attributes.FileMode)
 	fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0)
@@ -471,17 +471,17 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
 	if err != nil {
 		return 0, err
 	}
-	fileSize := int64(filer2.FileSize(f.entry))
+	fileSize := int64(filer.FileSize(f.entry))
 	if fileSize == 0 {
 		return 0, io.EOF
 	}
 	if f.entryViewCache == nil {
-		f.entryViewCache, _ = filer2.NonOverlappingVisibleIntervals(filer2.LookupFn(f.fs), f.entry.Chunks)
+		f.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.Chunks)
 		f.reader = nil
 	}
 	if f.reader == nil {
-		chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
-		f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache, fileSize)
+		chunkViews := filer.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
+		f.reader = filer.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache, fileSize)
 	}
 
 	readSize, err = f.reader.ReadAt(p, f.off)
@@ -509,7 +509,7 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
 
 	err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) error {
 		fi := FileInfo{
-			size:          int64(filer2.FileSize(entry)),
+			size:          int64(filer.FileSize(entry)),
 			name:          entry.Name,
 			mode:          os.FileMode(entry.Attributes.FileMode),
 			modifiledTime: time.Unix(entry.Attributes.Mtime, 0),
diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go
index 7177d8ac3..3c5e13663 100644
--- a/weed/shell/command_fs_cat.go
+++ b/weed/shell/command_fs_cat.go
@@ -5,7 +5,7 @@ import (
 	"io"
 	"math"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
@@ -52,7 +52,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write
 			return err
 		}
 
-		return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
+		return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
 
 	})
 
diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go
index 5404b0cdb..71003714d 100644
--- a/weed/shell/command_fs_du.go
+++ b/weed/shell/command_fs_du.go
@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"io"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
@@ -70,7 +70,7 @@ func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir
 			}
 		} else {
 			fileBlockCount = uint64(len(entry.Chunks))
-			fileByteCount = filer2.FileSize(entry)
+			fileByteCount = filer.FileSize(entry)
 			blockCount += fileBlockCount
 			byteCount += fileByteCount
 		}
diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go
index 4110c7b8d..592ec8be0 100644
--- a/weed/shell/command_fs_ls.go
+++ b/weed/shell/command_fs_ls.go
@@ -8,7 +8,7 @@ import (
 	"strconv"
 	"strings"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
@@ -95,7 +95,7 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
 			fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n",
 				fileMode, len(entry.Chunks),
 				userName, groupName,
-				filer2.FileSize(entry), dir, entry.Name)
+				filer.FileSize(entry), dir, entry.Name)
 		} else {
 			fmt.Fprintf(writer, "%s\n", entry.Name)
 		}
diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index cf5ad6d6d..4b3568acb 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -11,7 +11,7 @@ import (
 	"path/filepath"
 	"sync"
 
-	"github.com/chrislusf/seaweedfs/weed/filer2"
+	"github.com/chrislusf/seaweedfs/weed/filer"
 	"github.com/chrislusf/seaweedfs/weed/operation"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@@ -197,7 +197,7 @@ func (c *commandVolumeFsck) collectFilerFileIds(tempFolder string, volumeIdToSer
 			files[i.vid].Write(buffer)
 		}
 	}, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
-		dChunks, mChunks, resolveErr := filer2.ResolveChunkManifest(filer2.LookupFn(c.env), entry.Entry.Chunks)
+		dChunks, mChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks)
 		if resolveErr != nil {
 			return nil
 		}
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index c309b3f92..9ecc57459 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -174,6 +174,9 @@ func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e er
 }
 
 func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) {
+	l.volumesLock.Lock()
+	defer l.volumesLock.Unlock()
+
 	v, ok := l.volumes[vid]
 	if !ok {
 		return
diff --git a/weed/storage/needle/volume_ttl_test.go b/weed/storage/needle/volume_ttl_test.go
index 0afebebf5..f75453593 100644
--- a/weed/storage/needle/volume_ttl_test.go
+++ b/weed/storage/needle/volume_ttl_test.go
@@ -30,6 +30,11 @@ func TestTTLReadWrite(t *testing.T) {
 		t.Errorf("5d ttl:%v", ttl)
 	}
 
+	ttl, _ = ReadTTL("50d")
+	if ttl.Minutes() != 50*24*60 {
+		t.Errorf("50d ttl:%v", ttl)
+	}
+
 	ttl, _ = ReadTTL("5w")
 	if ttl.Minutes() != 5*7*24*60 {
 		t.Errorf("5w ttl:%v", ttl)
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 3f16688bf..48cbeb3d1 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -380,7 +380,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
 		Ttl:              v.Ttl.ToUint32(),
 	}
 	for _, location := range s.Locations {
-		if found, error := location.deleteVolumeById(i); found && error == nil {
+		if found, err := location.deleteVolumeById(i); found && err == nil {
 			glog.V(0).Infof("DeleteVolume %d", i)
 			s.DeletedVolumesChan <- message
 			return nil
diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go
index 38159496e..32666a417 100644
--- a/weed/storage/store_vacuum.go
+++ b/weed/storage/store_vacuum.go
@@ -2,6 +2,7 @@ package storage
 
 import (
 	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/stats"
 
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -16,6 +17,10 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
 }
 func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error {
 	if v := s.findVolume(vid); v != nil {
+		s := stats.NewDiskStatus(v.dir)
+		if int64(s.Free) < preallocate {
+			return fmt.Errorf("free space: %d bytes, not enough for %d bytes", s.Free, preallocate)
+		}
 		return v.Compact2(preallocate, compactionBytePerSecond)
 	}
 	return fmt.Errorf("volume id %d is not found during compact", vid)