Browse Source

Merge pull request #14 from chrislusf/master

sync
pull/1480/head
hilimd 4 years ago
committed by GitHub
parent
commit
23baa3c36c
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      unmaintained/see_log_entry/see_log_entry.go
  2. 34
      weed/filer/abstract_sql/abstract_sql_store.go
  3. 87
      weed/filer/abstract_sql/abstract_sql_store_kv.go
  4. 0
      weed/filer/cassandra/README.txt
  5. 21
      weed/filer/cassandra/cassandra_store.go
  6. 18
      weed/filer/cassandra/cassandra_store_kv.go
  7. 2
      weed/filer/configuration.go
  8. 2
      weed/filer/entry.go
  9. 2
      weed/filer/entry_codec.go
  10. 22
      weed/filer/etcd/etcd_store.go
  11. 44
      weed/filer/etcd/etcd_store_kv.go
  12. 2
      weed/filer/filechunk_manifest.go
  13. 2
      weed/filer/filechunk_manifest_test.go
  14. 2
      weed/filer/filechunks.go
  15. 2
      weed/filer/filechunks2_test.go
  16. 2
      weed/filer/filechunks_test.go
  17. 12
      weed/filer/filer.go
  18. 4
      weed/filer/filer_buckets.go
  19. 4
      weed/filer/filer_delete_entry.go
  20. 2
      weed/filer/filer_deletion.go
  21. 6
      weed/filer/filer_notify.go
  22. 2
      weed/filer/filer_notify_append.go
  23. 2
      weed/filer/filer_notify_test.go
  24. 79
      weed/filer/filerstore.go
  25. 25
      weed/filer/leveldb/leveldb_store.go
  26. 45
      weed/filer/leveldb/leveldb_store_kv.go
  27. 24
      weed/filer/leveldb/leveldb_store_test.go
  28. 4
      weed/filer/leveldb2/leveldb2_local_store.go
  29. 29
      weed/filer/leveldb2/leveldb2_store.go
  30. 56
      weed/filer/leveldb2/leveldb2_store_kv.go
  31. 24
      weed/filer/leveldb2/leveldb2_store_test.go
  32. 2
      weed/filer/meta_aggregator.go
  33. 2
      weed/filer/meta_replay.go
  34. 25
      weed/filer/mongodb/mongodb_store.go
  35. 72
      weed/filer/mongodb/mongodb_store_kv.go
  36. 12
      weed/filer/mysql/mysql_store.go
  37. 2
      weed/filer/permission.go
  38. 0
      weed/filer/postgres/README.txt
  39. 10
      weed/filer/postgres/postgres_store.go
  40. 2
      weed/filer/reader_at.go
  41. 2
      weed/filer/reader_at_test.go
  42. 4
      weed/filer/redis/redis_cluster_store.go
  43. 4
      weed/filer/redis/redis_store.go
  44. 16
      weed/filer/redis/universal_redis_store.go
  45. 42
      weed/filer/redis/universal_redis_store_kv.go
  46. 4
      weed/filer/redis2/redis_cluster_store.go
  47. 4
      weed/filer/redis2/redis_store.go
  48. 16
      weed/filer/redis2/universal_redis_store.go
  49. 42
      weed/filer/redis2/universal_redis_store_kv.go
  50. 2
      weed/filer/stream.go
  51. 2
      weed/filer/topics.go
  52. 8
      weed/filesys/dir.go
  53. 4
      weed/filesys/dir_link.go
  54. 14
      weed/filesys/file.go
  55. 20
      weed/filesys/filehandle.go
  56. 20
      weed/filesys/meta_cache/meta_cache.go
  57. 4
      weed/filesys/meta_cache/meta_cache_init.go
  58. 6
      weed/filesys/meta_cache/meta_cache_subscribe.go
  59. 6
      weed/filesys/wfs_deletion.go
  60. 4
      weed/filesys/wfs_write.go
  61. 6
      weed/messaging/broker/broker_grpc_server.go
  62. 4
      weed/messaging/broker/broker_grpc_server_publish.go
  63. 6
      weed/messaging/broker/broker_grpc_server_subscribe.go
  64. 4
      weed/messaging/broker/topic_manager.go
  65. 6
      weed/replication/sink/azuresink/azure_sink.go
  66. 6
      weed/replication/sink/b2sink/b2_sink.go
  67. 24
      weed/replication/sink/filersink/filer_sink.go
  68. 6
      weed/replication/sink/gcssink/gcs_sink.go
  69. 8
      weed/replication/sink/s3sink/s3_sink.go
  70. 6
      weed/replication/sink/s3sink/s3_write.go
  71. 8
      weed/s3api/filer_multipart.go
  72. 6
      weed/s3api/s3api_objects_list_handlers.go
  73. 43
      weed/server/filer_grpc_server.go
  74. 16
      weed/server/filer_grpc_server_rename.go
  75. 6
      weed/server/filer_grpc_server_sub_meta.go
  76. 24
      weed/server/filer_server.go
  77. 8
      weed/server/filer_server_handlers_read.go
  78. 2
      weed/server/filer_server_handlers_read_dir.go
  79. 12
      weed/server/filer_server_handlers_write_autochunk.go
  80. 6
      weed/server/filer_server_handlers_write_cipher.go
  81. 2
      weed/server/master_grpc_server.go
  82. 2
      weed/server/master_server.go
  83. 9
      weed/server/volume_grpc_copy.go
  84. 20
      weed/server/webdav_server.go
  85. 4
      weed/shell/command_fs_cat.go
  86. 4
      weed/shell/command_fs_du.go
  87. 4
      weed/shell/command_fs_ls.go
  88. 4
      weed/shell/command_volume_fsck.go
  89. 3
      weed/storage/disk_location.go
  90. 5
      weed/storage/needle/volume_ttl_test.go
  91. 2
      weed/storage/store.go
  92. 5
      weed/storage/store_vacuum.go

4
unmaintained/see_log_entry/see_log_entry.go

@ -9,13 +9,13 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
var ( var (
logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir)
logdataFile = flag.String("logdata", "", "log data file saved under "+ filer.SystemLogDir)
) )
func main() { func main() {

34
weed/filer2/abstract_sql/abstract_sql_store.go → weed/filer/abstract_sql/abstract_sql_store.go

@ -4,11 +4,11 @@ import (
"context" "context"
"database/sql" "database/sql"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"strings"
) )
type AbstractSqlStore struct { type AbstractSqlStore struct {
@ -59,7 +59,7 @@ func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
return store.DB return store.DB
} }
func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
@ -69,12 +69,9 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta) res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta)
if err != nil { if err != nil {
return fmt.Errorf("insert %s: %s", entry.FullPath, err)
if !strings.Contains(strings.ToLower(err.Error()), "duplicate") {
return fmt.Errorf("kv insert: %s", err)
} }
affectedRows, err := res.RowsAffected()
if err == nil && affectedRows > 0 {
return nil
} }
// now the insert failed possibly due to duplication constraints // now the insert failed possibly due to duplication constraints
@ -93,7 +90,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En
} }
func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
@ -113,16 +110,20 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En
return nil return nil
} }
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer2.Entry, error) {
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) {
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir) row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
var data []byte var data []byte
if err := row.Scan(&data); err != nil { if err := row.Scan(&data); err != nil {
if err == sql.ErrNoRows {
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
return nil, fmt.Errorf("find %s: %v", fullpath, err)
}
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
} }
if err := entry.DecodeAttributesAndChunks(data); err != nil { if err := entry.DecodeAttributesAndChunks(data); err != nil {
@ -164,14 +165,13 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
return nil return nil
} }
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
sqlText := store.SqlListExclusive sqlText := store.SqlListExclusive
if inclusive { if inclusive {
sqlText = store.SqlListInclusive sqlText = store.SqlListInclusive
} }
rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), limit)
rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), prefix, limit)
if err != nil { if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err) return nil, fmt.Errorf("list %s : %v", fullpath, err)
} }
@ -185,7 +185,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat
return nil, fmt.Errorf("scan %s: %v", fullpath, err) return nil, fmt.Errorf("scan %s: %v", fullpath, err)
} }
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: util.NewFullPath(string(fullpath), name), FullPath: util.NewFullPath(string(fullpath), name),
} }
if err = entry.DecodeAttributesAndChunks(data); err != nil { if err = entry.DecodeAttributesAndChunks(data); err != nil {
@ -199,6 +199,10 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat
return entries, nil return entries, nil
} }
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
}
func (store *AbstractSqlStore) Shutdown() { func (store *AbstractSqlStore) Shutdown() {
store.DB.Close() store.DB.Close()
} }

87
weed/filer/abstract_sql/abstract_sql_store_kv.go

@ -0,0 +1,87 @@
package abstract_sql
import (
"context"
"database/sql"
"fmt"
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
dirStr, dirHash, name := genDirAndName(key)
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, dirHash, name, dirStr, value)
if err != nil {
if !strings.Contains(strings.ToLower(err.Error()), "duplicate") {
return fmt.Errorf("kv insert: %s", err)
}
}
// now the insert failed possibly due to duplication constraints
glog.V(1).Infof("kv insert falls back to update: %s", err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)
if err != nil {
return fmt.Errorf("kv upsert: %s", err)
}
_, err = res.RowsAffected()
if err != nil {
return fmt.Errorf("kv upsert no rows affected: %s", err)
}
return nil
}
func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
dirStr, dirHash, name := genDirAndName(key)
row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, dirHash, name, dirStr)
err = row.Scan(&value)
if err == sql.ErrNoRows {
return nil, filer.ErrKvNotFound
}
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
}
return
}
func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) {
dirStr, dirHash, name := genDirAndName(key)
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, dirHash, name, dirStr)
if err != nil {
return fmt.Errorf("kv delete: %s", err)
}
_, err = res.RowsAffected()
if err != nil {
return fmt.Errorf("kv delete no rows affected: %s", err)
}
return nil
}
func genDirAndName(key []byte) (dirStr string, dirHash int64, name string) {
for len(key) < 8 {
key = append(key, 0)
}
dirHash = int64(util.BytesToUint64(key[:8]))
dirStr = string(key[:8])
name = string(key[8:])
return
}

0
weed/filer2/cassandra/README.txt → weed/filer/cassandra/README.txt

21
weed/filer2/cassandra/cassandra_store.go → weed/filer/cassandra/cassandra_store.go

@ -3,17 +3,16 @@ package cassandra
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/gocql/gocql" "github.com/gocql/gocql"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &CassandraStore{})
filer.Stores = append(filer.Stores, &CassandraStore{})
} }
type CassandraStore struct { type CassandraStore struct {
@ -53,7 +52,7 @@ func (store *CassandraStore) RollbackTransaction(ctx context.Context) error {
return nil return nil
} }
func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
@ -70,12 +69,12 @@ func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entr
return nil return nil
} }
func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry) return store.InsertEntry(ctx, entry)
} }
func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
var data []byte var data []byte
@ -91,7 +90,7 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPa
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
entry = &filer2.Entry{
entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
} }
err = entry.DecodeAttributesAndChunks(data) err = entry.DecodeAttributesAndChunks(data)
@ -126,8 +125,12 @@ func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath
return nil return nil
} }
func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
}
func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
limit int) (entries []*filer.Entry, err error) {
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
if inclusive { if inclusive {
@ -138,7 +141,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
var name string var name string
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter() iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
for iter.Scan(&name, &data) { for iter.Scan(&name, &data) {
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: util.NewFullPath(string(fullpath), name), FullPath: util.NewFullPath(string(fullpath), name),
} }
if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil {

18
weed/filer/cassandra/cassandra_store_kv.go

@ -0,0 +1,18 @@
package cassandra
import (
"context"
"github.com/chrislusf/seaweedfs/weed/filer"
)
func (store *CassandraStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
return filer.ErrKvNotImplemented
}
func (store *CassandraStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
return nil, filer.ErrKvNotImplemented
}
func (store *CassandraStore) KvDelete(ctx context.Context, key []byte) (err error) {
return filer.ErrKvNotImplemented
}

2
weed/filer2/configuration.go → weed/filer/configuration.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"os" "os"

2
weed/filer2/entry.go → weed/filer/entry.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"os" "os"

2
weed/filer2/entry_codec.go → weed/filer/entry_codec.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"bytes" "bytes"

22
weed/filer2/etcd/etcd_store.go → weed/filer/etcd/etcd_store.go

@ -8,7 +8,7 @@ import (
"go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/clientv3"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util" weed_util "github.com/chrislusf/seaweedfs/weed/util"
@ -19,7 +19,7 @@ const (
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &EtcdStore{})
filer.Stores = append(filer.Stores, &EtcdStore{})
} }
type EtcdStore struct { type EtcdStore struct {
@ -73,7 +73,7 @@ func (store *EtcdStore) RollbackTransaction(ctx context.Context) error {
return nil return nil
} }
func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
key := genKey(entry.DirAndName()) key := genKey(entry.DirAndName())
value, err := entry.EncodeAttributesAndChunks() value, err := entry.EncodeAttributesAndChunks()
@ -88,11 +88,11 @@ func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (e
return nil return nil
} }
func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry) return store.InsertEntry(ctx, entry)
} }
func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
key := genKey(fullpath.DirAndName()) key := genKey(fullpath.DirAndName())
resp, err := store.client.Get(ctx, string(key)) resp, err := store.client.Get(ctx, string(key))
@ -104,7 +104,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPa
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
entry = &filer2.Entry{
entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
} }
err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value) err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value)
@ -135,9 +135,11 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_
return nil return nil
} }
func (store *EtcdStore) ListDirectoryEntries(
ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
) (entries []*filer2.Entry, err error) {
func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
}
func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "") directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
resp, err := store.client.Get(ctx, string(directoryPrefix), resp, err := store.client.Get(ctx, string(directoryPrefix),
@ -158,7 +160,7 @@ func (store *EtcdStore) ListDirectoryEntries(
if limit < 0 { if limit < 0 {
break break
} }
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: weed_util.NewFullPath(string(fullpath), fileName), FullPath: weed_util.NewFullPath(string(fullpath), fileName),
} }
if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil {

44
weed/filer/etcd/etcd_store_kv.go

@ -0,0 +1,44 @@
package etcd
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
)
func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
_, err = store.client.Put(ctx, string(key), string(value))
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
return nil
}
func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
resp, err := store.client.Get(ctx, string(key), nil)
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
}
if len(resp.Kvs) == 0 {
return nil, filer.ErrKvNotFound
}
return resp.Kvs[0].Value, nil
}
func (store *EtcdStore) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.client.Delete(ctx, string(key))
if err != nil {
return fmt.Errorf("kv delete: %v", err)
}
return nil
}

2
weed/filer2/filechunk_manifest.go → weed/filer/filechunk_manifest.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"bytes" "bytes"

2
weed/filer2/filechunk_manifest_test.go → weed/filer/filechunk_manifest_test.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"bytes" "bytes"

2
weed/filer2/filechunks.go → weed/filer/filechunks.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"fmt" "fmt"

2
weed/filer2/filechunks2_test.go → weed/filer/filechunks2_test.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"sort" "sort"

2
weed/filer2/filechunks_test.go → weed/filer/filechunks_test.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"fmt" "fmt"

12
weed/filer2/filer.go → weed/filer/filer.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"context" "context"
@ -246,15 +246,15 @@ func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, e
} }
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) ([]*Entry, error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 { if strings.HasSuffix(string(p), "/") && len(p) > 1 {
p = p[0 : len(p)-1] p = p[0 : len(p)-1]
} }
var makeupEntries []*Entry var makeupEntries []*Entry
entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix)
for expiredCount > 0 && err == nil { for expiredCount > 0 && err == nil {
makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount)
makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix)
if err == nil { if err == nil {
entries = append(entries, makeupEntries...) entries = append(entries, makeupEntries...)
} }
@ -263,8 +263,8 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, start
return entries, err return entries, err
} }
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) {
listedEntries, listErr := f.Store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*Entry, expiredCount int, lastFileName string, err error) {
listedEntries, listErr := f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix)
if listErr != nil { if listErr != nil {
return listedEntries, expiredCount, "", listErr return listedEntries, expiredCount, "", listErr
} }

4
weed/filer2/filer_buckets.go → weed/filer/filer_buckets.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"context" "context"
@ -29,7 +29,7 @@ func (f *Filer) LoadBuckets() {
limit := math.MaxInt32 limit := math.MaxInt32
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "")
if err != nil { if err != nil {
glog.V(1).Infof("no buckets found: %v", err) glog.V(1).Infof("no buckets found: %v", err)

4
weed/filer2/filer_delete_entry.go → weed/filer/filer_delete_entry.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"context" "context"
@ -58,7 +58,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
lastFileName := "" lastFileName := ""
includeLastFile := false includeLastFile := false
for { for {
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize)
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
if err != nil { if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err) glog.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)

2
weed/filer2/filer_deletion.go → weed/filer/filer_deletion.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"strings" "strings"

6
weed/filer2/filer_notify.go → weed/filer/filer_notify.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"context" "context"
@ -100,13 +100,13 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
sizeBuf := make([]byte, 4) sizeBuf := make([]byte, 4)
startTsNs := startTime.UnixNano() startTsNs := startTime.UnixNano()
dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366)
dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "")
if listDayErr != nil { if listDayErr != nil {
return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr) return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
} }
for _, dayEntry := range dayEntries { for _, dayEntry := range dayEntries {
// println("checking day", dayEntry.FullPath) // println("checking day", dayEntry.FullPath)
hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60)
hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "")
if listHourMinuteErr != nil { if listHourMinuteErr != nil {
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
} }

2
weed/filer2/filer_notify_append.go → weed/filer/filer_notify_append.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"context" "context"

2
weed/filer2/filer_notify_test.go → weed/filer/filer_notify_test.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"testing" "testing"

79
weed/filer2/filerstore.go → weed/filer/filerstore.go

@ -1,7 +1,9 @@
package filer2
package filer
import ( import (
"context" "context"
"errors"
"strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -9,6 +11,12 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
var (
ErrUnsupportedListDirectoryPrefixed = errors.New("unsupported directory prefix listing")
ErrKvNotImplemented = errors.New("kv not implemented yet")
ErrKvNotFound = errors.New("kv: not found")
)
type FilerStore interface { type FilerStore interface {
// GetName gets the name to locate the configuration in filer.toml file // GetName gets the name to locate the configuration in filer.toml file
GetName() string GetName() string
@ -21,11 +29,16 @@ type FilerStore interface {
DeleteEntry(context.Context, util.FullPath) (err error) DeleteEntry(context.Context, util.FullPath) (err error)
DeleteFolderChildren(context.Context, util.FullPath) (err error) DeleteFolderChildren(context.Context, util.FullPath) (err error)
ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error)
BeginTransaction(ctx context.Context) (context.Context, error) BeginTransaction(ctx context.Context) (context.Context, error)
CommitTransaction(ctx context.Context) error CommitTransaction(ctx context.Context) error
RollbackTransaction(ctx context.Context) error RollbackTransaction(ctx context.Context) error
KvPut(ctx context.Context, key []byte, value []byte) (err error)
KvGet(ctx context.Context, key []byte) (value []byte, err error)
KvDelete(ctx context.Context, key []byte) (err error)
Shutdown() Shutdown()
} }
@ -135,6 +148,60 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath
return entries, err return entries, err
} }
func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error) {
stats.FilerStoreCounter.WithLabelValues(fsw.ActualStore.GetName(), "prefixList").Inc()
start := time.Now()
defer func() {
stats.FilerStoreHistogram.WithLabelValues(fsw.ActualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds())
}()
entries, err := fsw.ActualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix)
if err == ErrUnsupportedListDirectoryPrefixed {
entries, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix)
}
if err != nil {
return nil, err
}
for _, entry := range entries {
filer_pb.AfterEntryDeserialization(entry.Chunks)
}
return entries, nil
}
func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) (entries []*Entry, err error) {
entries, err = fsw.ActualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
if err != nil {
return nil, err
}
if prefix == "" {
return
}
count := 0
var lastFileName string
notPrefixed := entries
entries = nil
for count < limit && len(notPrefixed) > 0 {
for _, entry := range notPrefixed {
lastFileName = entry.Name()
if strings.HasPrefix(entry.Name(), prefix) {
count++
entries = append(entries, entry)
if count >= limit {
break
}
}
}
if count < limit {
notPrefixed, err = fsw.ActualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit)
if err != nil {
return
}
}
}
return
}
func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) { func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) {
return fsw.ActualStore.BeginTransaction(ctx) return fsw.ActualStore.BeginTransaction(ctx)
} }
@ -150,3 +217,13 @@ func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error {
func (fsw *FilerStoreWrapper) Shutdown() { func (fsw *FilerStoreWrapper) Shutdown() {
fsw.ActualStore.Shutdown() fsw.ActualStore.Shutdown()
} }
func (fsw *FilerStoreWrapper) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
return fsw.ActualStore.KvPut(ctx, key, value)
}
func (fsw *FilerStoreWrapper) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
return fsw.ActualStore.KvGet(ctx, key)
}
func (fsw *FilerStoreWrapper) KvDelete(ctx context.Context, key []byte) (err error) {
return fsw.ActualStore.KvDelete(ctx, key)
}

25
weed/filer2/leveldb/leveldb_store.go → weed/filer/leveldb/leveldb_store.go

@ -4,13 +4,12 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util" leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util" weed_util "github.com/chrislusf/seaweedfs/weed/util"
@ -21,7 +20,7 @@ const (
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &LevelDBStore{})
filer.Stores = append(filer.Stores, &LevelDBStore{})
} }
type LevelDBStore struct { type LevelDBStore struct {
@ -50,7 +49,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
} }
if store.db, err = leveldb.OpenFile(dir, opts); err != nil { if store.db, err = leveldb.OpenFile(dir, opts); err != nil {
if errors.IsCorrupted(err) {
if leveldb_errors.IsCorrupted(err) {
store.db, err = leveldb.RecoverFile(dir, opts) store.db, err = leveldb.RecoverFile(dir, opts)
} }
if err != nil { if err != nil {
@ -71,7 +70,7 @@ func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error {
return nil return nil
} }
func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
key := genKey(entry.DirAndName()) key := genKey(entry.DirAndName())
value, err := entry.EncodeAttributesAndChunks() value, err := entry.EncodeAttributesAndChunks()
@ -90,12 +89,12 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry)
return nil return nil
} }
func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry) return store.InsertEntry(ctx, entry)
} }
func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
key := genKey(fullpath.DirAndName()) key := genKey(fullpath.DirAndName())
data, err := store.db.Get(key, nil) data, err := store.db.Get(key, nil)
@ -107,7 +106,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
} }
entry = &filer2.Entry{
entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
} }
err = entry.DecodeAttributesAndChunks(data) err = entry.DecodeAttributesAndChunks(data)
@ -159,8 +158,12 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath we
return nil return nil
} }
func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
}
func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
limit int) (entries []*filer.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "") directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
@ -181,7 +184,7 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath we
if limit < 0 { if limit < 0 {
break break
} }
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: weed_util.NewFullPath(string(fullpath), fileName), FullPath: weed_util.NewFullPath(string(fullpath), fileName),
} }
if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {

45
weed/filer/leveldb/leveldb_store_kv.go

@ -0,0 +1,45 @@
package leveldb
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/syndtr/goleveldb/leveldb"
)
func (store *LevelDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
err = store.db.Put(key, value, nil)
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
return nil
}
func (store *LevelDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
value, err = store.db.Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, filer.ErrKvNotFound
}
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
}
return
}
func (store *LevelDBStore) KvDelete(ctx context.Context, key []byte) (err error) {
err = store.db.Delete(key, nil)
if err != nil {
return fmt.Errorf("kv delete: %v", err)
}
return nil
}

24
weed/filer2/leveldb/leveldb_store_test.go → weed/filer/leveldb/leveldb_store_test.go

@ -6,37 +6,37 @@ import (
"os" "os"
"testing" "testing"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
store.initialize(dir) store.initialize(dir)
filer.SetStore(store)
testFiler.SetStore(store)
fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background() ctx := context.Background()
entry1 := &filer2.Entry{
entry1 := &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
Attr: filer2.Attr{
Attr: filer.Attr{
Mode: 0440, Mode: 0440,
Uid: 1234, Uid: 1234,
Gid: 5678, Gid: 5678,
}, },
} }
if err := filer.CreateEntry(ctx, entry1, false, false, nil); err != nil {
if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err) t.Errorf("create entry %v: %v", entry1.FullPath, err)
return return
} }
entry, err := filer.FindEntry(ctx, fullpath)
entry, err := testFiler.FindEntry(ctx, fullpath)
if err != nil { if err != nil {
t.Errorf("find entry: %v", err) t.Errorf("find entry: %v", err)
@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
} }
// checking one upper directory // checking one upper directory
entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
entries, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "")
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
} }
// checking one upper directory // checking one upper directory
entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
entries, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
@ -65,17 +65,17 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
store.initialize(dir) store.initialize(dir)
filer.SetStore(store)
testFiler.SetStore(store)
ctx := context.Background() ctx := context.Background()
// checking one upper directory // checking one upper directory
entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
entries, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
if err != nil { if err != nil {
t.Errorf("list entries: %v", err) t.Errorf("list entries: %v", err)
return return

4
weed/filer2/leveldb2/leveldb2_local_store.go → weed/filer/leveldb2/leveldb2_local_store.go

@ -3,12 +3,12 @@ package leveldb
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
var ( var (
_ = filer2.FilerLocalStore(&LevelDB2Store{})
_ = filer.FilerLocalStore(&LevelDB2Store{})
) )
func (store *LevelDB2Store) UpdateOffset(filer string, lastTsNs int64) error { func (store *LevelDB2Store) UpdateOffset(filer string, lastTsNs int64) error {

29
weed/filer2/leveldb2/leveldb2_store.go → weed/filer/leveldb2/leveldb2_store.go

@ -5,22 +5,21 @@ import (
"context" "context"
"crypto/md5" "crypto/md5"
"fmt" "fmt"
"io"
"os"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util" leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"io"
"os"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util" weed_util "github.com/chrislusf/seaweedfs/weed/util"
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &LevelDB2Store{})
filer.Stores = append(filer.Stores, &LevelDB2Store{})
} }
type LevelDB2Store struct { type LevelDB2Store struct {
@ -53,7 +52,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
dbFolder := fmt.Sprintf("%s/%02d", dir, d) dbFolder := fmt.Sprintf("%s/%02d", dir, d)
os.MkdirAll(dbFolder, 0755) os.MkdirAll(dbFolder, 0755)
db, dbErr := leveldb.OpenFile(dbFolder, opts) db, dbErr := leveldb.OpenFile(dbFolder, opts)
if errors.IsCorrupted(dbErr) {
if leveldb_errors.IsCorrupted(dbErr) {
db, dbErr = leveldb.RecoverFile(dbFolder, opts) db, dbErr = leveldb.RecoverFile(dbFolder, opts)
} }
if dbErr != nil { if dbErr != nil {
@ -77,7 +76,7 @@ func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error {
return nil return nil
} }
func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.DirAndName() dir, name := entry.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount) key, partitionId := genKey(dir, name, store.dbCount)
@ -97,12 +96,12 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry
return nil return nil
} }
func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry) return store.InsertEntry(ctx, entry)
} }
func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount) key, partitionId := genKey(dir, name, store.dbCount)
@ -115,7 +114,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.Fu
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
} }
entry = &filer2.Entry{
entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
} }
err = entry.DecodeAttributesAndChunks(data) err = entry.DecodeAttributesAndChunks(data)
@ -168,8 +167,12 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath w
return nil return nil
} }
func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
}
func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
limit int) (entries []*filer.Entry, err error) {
directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount) lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount)
@ -191,7 +194,7 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath w
if limit < 0 { if limit < 0 {
break break
} }
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: weed_util.NewFullPath(string(fullpath), fileName), FullPath: weed_util.NewFullPath(string(fullpath), fileName),
} }

56
weed/filer/leveldb2/leveldb2_store_kv.go

@ -0,0 +1,56 @@
package leveldb
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/syndtr/goleveldb/leveldb"
)
func (store *LevelDB2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
partitionId := bucketKvKey(key, store.dbCount)
err = store.dbs[partitionId].Put(key, value, nil)
if err != nil {
return fmt.Errorf("kv bucket %d put: %v", partitionId, err)
}
return nil
}
func (store *LevelDB2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
partitionId := bucketKvKey(key, store.dbCount)
value, err = store.dbs[partitionId].Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, filer.ErrKvNotFound
}
if err != nil {
return nil, fmt.Errorf("kv bucket %d get: %v", partitionId, err)
}
return
}
func (store *LevelDB2Store) KvDelete(ctx context.Context, key []byte) (err error) {
partitionId := bucketKvKey(key, store.dbCount)
err = store.dbs[partitionId].Delete(key, nil)
if err != nil {
return fmt.Errorf("kv bucket %d delete: %v", partitionId, err)
}
return nil
}
func bucketKvKey(key []byte, dbCount int) (partitionId int) {
return int(key[len(key)-1]) % dbCount
}

24
weed/filer2/leveldb2/leveldb2_store_test.go → weed/filer/leveldb2/leveldb2_store_test.go

@ -6,37 +6,37 @@ import (
"os" "os"
"testing" "testing"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB2Store{} store := &LevelDB2Store{}
store.initialize(dir, 2) store.initialize(dir, 2)
filer.SetStore(store)
testFiler.SetStore(store)
fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background() ctx := context.Background()
entry1 := &filer2.Entry{
entry1 := &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
Attr: filer2.Attr{
Attr: filer.Attr{
Mode: 0440, Mode: 0440,
Uid: 1234, Uid: 1234,
Gid: 5678, Gid: 5678,
}, },
} }
if err := filer.CreateEntry(ctx, entry1, false, false, nil); err != nil {
if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err) t.Errorf("create entry %v: %v", entry1.FullPath, err)
return return
} }
entry, err := filer.FindEntry(ctx, fullpath)
entry, err := testFiler.FindEntry(ctx, fullpath)
if err != nil { if err != nil {
t.Errorf("find entry: %v", err) t.Errorf("find entry: %v", err)
@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
} }
// checking one upper directory // checking one upper directory
entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
entries, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "")
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
} }
// checking one upper directory // checking one upper directory
entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
entries, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
@ -65,17 +65,17 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB2Store{} store := &LevelDB2Store{}
store.initialize(dir, 2) store.initialize(dir, 2)
filer.SetStore(store)
testFiler.SetStore(store)
ctx := context.Background() ctx := context.Background()
// checking one upper directory // checking one upper directory
entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
entries, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
if err != nil { if err != nil {
t.Errorf("list entries: %v", err) t.Errorf("list entries: %v", err)
return return

2
weed/filer2/meta_aggregator.go → weed/filer/meta_aggregator.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"context" "context"

2
weed/filer2/meta_replay.go → weed/filer/meta_replay.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"context" "context"

25
weed/filer2/mongodb/mongodb_store.go → weed/filer/mongodb/mongodb_store.go

@ -3,7 +3,7 @@ package mongodb
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -15,7 +15,7 @@ import (
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &MongodbStore{})
filer.Stores = append(filer.Stores, &MongodbStore{})
} }
type MongodbStore struct { type MongodbStore struct {
@ -93,7 +93,7 @@ func (store *MongodbStore) RollbackTransaction(ctx context.Context) error {
return nil return nil
} }
func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
@ -109,14 +109,18 @@ func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer2.Entry)
Meta: meta, Meta: meta,
}) })
if err != nil {
return fmt.Errorf("InsertEntry %st: %v", entry.FullPath, err)
}
return nil return nil
} }
func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry) return store.InsertEntry(ctx, entry)
} }
func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
var data Model var data Model
@ -124,6 +128,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
var where = bson.M{"directory": dir, "name": name} var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil { if err != mongo.ErrNoDocuments && err != nil {
glog.Error("find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
@ -131,7 +136,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
entry = &filer2.Entry{
entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
} }
@ -167,7 +172,11 @@ func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath ut
return nil return nil
} }
func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
}
func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}} var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}}
if inclusive { if inclusive {
@ -185,7 +194,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
return nil, err return nil, err
} }
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: util.NewFullPath(string(fullpath), data.Name), FullPath: util.NewFullPath(string(fullpath), data.Name),
} }
if decodeErr := entry.DecodeAttributesAndChunks(data.Meta); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(data.Meta); decodeErr != nil {

72
weed/filer/mongodb/mongodb_store_kv.go

@ -0,0 +1,72 @@
package mongodb
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
dir, name := genDirAndName(key)
c := store.connect.Database(store.database).Collection(store.collectionName)
_, err = c.InsertOne(ctx, Model{
Directory: dir,
Name: name,
Meta: value,
})
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
return nil
}
func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
dir, name := genDirAndName(key)
var data Model
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
glog.Error("kv get: %v", err)
return nil, filer.ErrKvNotFound
}
if len(data.Meta) == 0 {
return nil, filer.ErrKvNotFound
}
return data.Meta, nil
}
func (store *MongodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
dir, name := genDirAndName(key)
where := bson.M{"directory": dir, "name": name}
_, err = store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
if err != nil {
return fmt.Errorf("kv delete %s : %v", err)
}
return nil
}
func genDirAndName(key []byte) (dir string, name string) {
for len(key) < 8 {
key = append(key, 0)
}
dir = string(key[:8])
name = string(key[8:])
return
}

12
weed/filer2/mysql/mysql_store.go → weed/filer/mysql/mysql_store.go

@ -4,8 +4,8 @@ import (
"database/sql" "database/sql"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
_ "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql"
) )
@ -15,7 +15,7 @@ const (
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &MysqlStore{})
filer.Stores = append(filer.Stores, &MysqlStore{})
} }
type MysqlStore struct { type MysqlStore struct {
@ -41,14 +41,14 @@ func (store *MysqlStore) Initialize(configuration util.Configuration, prefix str
func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int, func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int,
interpolateParams bool) (err error) { interpolateParams bool) (err error) {
//
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)" store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)"
store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?" store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?" store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?" store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?" store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? AND name like CONCAT(?,'%') ORDER BY NAME ASC LIMIT ?"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? AND name like CONCAT(?,'%') ORDER BY NAME ASC LIMIT ?"
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
if interpolateParams { if interpolateParams {

2
weed/filer2/permission.go → weed/filer/permission.go

@ -1,4 +1,4 @@
package filer2
package filer
func hasWritePermission(dir *Entry, entry *Entry) bool { func hasWritePermission(dir *Entry, entry *Entry) bool {

0
weed/filer2/postgres/README.txt → weed/filer/postgres/README.txt

10
weed/filer2/postgres/postgres_store.go → weed/filer/postgres/postgres_store.go

@ -4,8 +4,8 @@ import (
"database/sql" "database/sql"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
_ "github.com/lib/pq" _ "github.com/lib/pq"
) )
@ -15,7 +15,7 @@ const (
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &PostgresStore{})
filer.Stores = append(filer.Stores, &PostgresStore{})
} }
type PostgresStore struct { type PostgresStore struct {
@ -46,8 +46,8 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2" store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like CONCAT($4,'%')ORDER BY NAME ASC LIMIT $5"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like CONCAT($4,'%') ORDER BY NAME ASC LIMIT $5"
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode) sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode)
if password != "" { if password != "" {

2
weed/filer2/reader_at.go → weed/filer/reader_at.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"context" "context"

2
weed/filer2/reader_at_test.go → weed/filer/reader_at_test.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"fmt" "fmt"

4
weed/filer2/redis/redis_cluster_store.go → weed/filer/redis/redis_cluster_store.go

@ -1,13 +1,13 @@
package redis package redis
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis" "github.com/go-redis/redis"
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &RedisClusterStore{})
filer.Stores = append(filer.Stores, &RedisClusterStore{})
} }
type RedisClusterStore struct { type RedisClusterStore struct {

4
weed/filer2/redis/redis_store.go → weed/filer/redis/redis_store.go

@ -1,13 +1,13 @@
package redis package redis
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis" "github.com/go-redis/redis"
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &RedisStore{})
filer.Stores = append(filer.Stores, &RedisStore{})
} }
type RedisStore struct { type RedisStore struct {

16
weed/filer2/redis/universal_redis_store.go → weed/filer/redis/universal_redis_store.go

@ -9,7 +9,7 @@ import (
"github.com/go-redis/redis" "github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -33,7 +33,7 @@ func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error
return nil return nil
} }
func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
value, err := entry.EncodeAttributesAndChunks() value, err := entry.EncodeAttributesAndChunks()
if err != nil { if err != nil {
@ -57,12 +57,12 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2
return nil return nil
} }
func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry) return store.InsertEntry(ctx, entry)
} }
func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result() data, err := store.Client.Get(string(fullpath)).Result()
if err == redis.Nil { if err == redis.Nil {
@ -73,7 +73,7 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.F
return nil, fmt.Errorf("get %s : %v", fullpath, err) return nil, fmt.Errorf("get %s : %v", fullpath, err)
} }
entry = &filer2.Entry{
entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
} }
err = entry.DecodeAttributesAndChunks([]byte(data)) err = entry.DecodeAttributesAndChunks([]byte(data))
@ -121,8 +121,12 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full
return nil return nil
} }
func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
}
func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
limit int) (entries []*filer.Entry, err error) {
dirListKey := genDirectoryListKey(string(fullpath)) dirListKey := genDirectoryListKey(string(fullpath))
members, err := store.Client.SMembers(dirListKey).Result() members, err := store.Client.SMembers(dirListKey).Result()

42
weed/filer/redis/universal_redis_store_kv.go

@ -0,0 +1,42 @@
package redis
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/go-redis/redis"
)
func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
_, err = store.Client.Set(string(key), value, 0).Result()
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
return nil
}
func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
data, err := store.Client.Get(string(key)).Result()
if err == redis.Nil {
return nil, filer.ErrKvNotFound
}
return []byte(data), err
}
func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.Client.Del(string(key)).Result()
if err != nil {
return fmt.Errorf("kv delete: %v", err)
}
return nil
}

4
weed/filer2/redis2/redis_cluster_store.go → weed/filer/redis2/redis_cluster_store.go

@ -1,13 +1,13 @@
package redis2 package redis2
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis" "github.com/go-redis/redis"
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &RedisCluster2Store{})
filer.Stores = append(filer.Stores, &RedisCluster2Store{})
} }
type RedisCluster2Store struct { type RedisCluster2Store struct {

4
weed/filer2/redis2/redis_store.go → weed/filer/redis2/redis_store.go

@ -1,13 +1,13 @@
package redis2 package redis2
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis" "github.com/go-redis/redis"
) )
func init() { func init() {
filer2.Stores = append(filer2.Stores, &Redis2Store{})
filer.Stores = append(filer.Stores, &Redis2Store{})
} }
type Redis2Store struct { type Redis2Store struct {

16
weed/filer2/redis2/universal_redis_store.go → weed/filer/redis2/universal_redis_store.go

@ -7,7 +7,7 @@ import (
"github.com/go-redis/redis" "github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -31,7 +31,7 @@ func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) erro
return nil return nil
} }
func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
value, err := entry.EncodeAttributesAndChunks() value, err := entry.EncodeAttributesAndChunks()
if err != nil { if err != nil {
@ -52,12 +52,12 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer
return nil return nil
} }
func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry) return store.InsertEntry(ctx, entry)
} }
func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result() data, err := store.Client.Get(string(fullpath)).Result()
if err == redis.Nil { if err == redis.Nil {
@ -68,7 +68,7 @@ func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.
return nil, fmt.Errorf("get %s : %v", fullpath, err) return nil, fmt.Errorf("get %s : %v", fullpath, err)
} }
entry = &filer2.Entry{
entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
} }
err = entry.DecodeAttributesAndChunks([]byte(data)) err = entry.DecodeAttributesAndChunks([]byte(data))
@ -116,8 +116,12 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
return nil return nil
} }
func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
return nil, filer.ErrUnsupportedListDirectoryPrefixed
}
func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
limit int) (entries []*filer.Entry, err error) {
dirListKey := genDirectoryListKey(string(fullpath)) dirListKey := genDirectoryListKey(string(fullpath))
start := int64(0) start := int64(0)

42
weed/filer/redis2/universal_redis_store_kv.go

@ -0,0 +1,42 @@
package redis2
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/go-redis/redis"
)
func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
_, err = store.Client.Set(string(key), value, 0).Result()
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
return nil
}
func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
data, err := store.Client.Get(string(key)).Result()
if err == redis.Nil {
return nil, filer.ErrKvNotFound
}
return []byte(data), err
}
func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.Client.Del(string(key)).Result()
if err != nil {
return fmt.Errorf("kv delete: %v", err)
}
return nil
}

2
weed/filer2/stream.go → weed/filer/stream.go

@ -1,4 +1,4 @@
package filer2
package filer
import ( import (
"bytes" "bytes"

2
weed/filer2/topics.go → weed/filer/topics.go

@ -1,4 +1,4 @@
package filer2
package filer
const ( const (
TopicsDir = "/topics" TopicsDir = "/topics"

8
weed/filesys/dir.go

@ -11,7 +11,7 @@ import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -156,7 +156,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
return fuse.EIO return fuse.EIO
} }
dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil return nil
}); err != nil { }); err != nil {
@ -205,7 +205,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
return err return err
} }
dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil return nil
}) })
@ -471,7 +471,7 @@ func (dir *Dir) saveEntry() error {
return fuse.EIO return fuse.EIO
} }
dir.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil return nil
}) })

4
weed/filesys/dir_link.go

@ -6,7 +6,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
@ -43,7 +43,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
return fuse.EIO return fuse.EIO
} }
dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil return nil
}) })

14
weed/filesys/file.go

@ -10,7 +10,7 @@ import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -33,7 +33,7 @@ type File struct {
dir *Dir dir *Dir
wfs *WFS wfs *WFS
entry *filer_pb.Entry entry *filer_pb.Entry
entryViewCache []filer2.VisibleInterval
entryViewCache []filer.VisibleInterval
isOpen int isOpen int
reader io.ReaderAt reader io.ReaderAt
dirtyMetadata bool dirtyMetadata bool
@ -56,7 +56,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Inode = file.fullpath().AsInode() attr.Inode = file.fullpath().AsInode()
attr.Valid = time.Second attr.Valid = time.Second
attr.Mode = os.FileMode(file.entry.Attributes.FileMode) attr.Mode = os.FileMode(file.entry.Attributes.FileMode)
attr.Size = filer2.FileSize(file.entry)
attr.Size = filer.FileSize(file.entry)
if file.isOpen > 0 { if file.isOpen > 0 {
attr.Size = file.entry.Attributes.FileSize attr.Size = file.entry.Attributes.FileSize
glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size) glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
@ -118,7 +118,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
if req.Valid.Size() { if req.Valid.Size() {
glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks)) glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
if req.Size < filer2.FileSize(file.entry) {
if req.Size < filer.FileSize(file.entry) {
// fmt.Printf("truncate %v \n", fullPath) // fmt.Printf("truncate %v \n", fullPath)
var chunks []*filer_pb.FileChunk var chunks []*filer_pb.FileChunk
var truncatedChunks []*filer_pb.FileChunk var truncatedChunks []*filer_pb.FileChunk
@ -273,7 +273,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
}) })
for _, chunk := range chunks { for _, chunk := range chunks {
file.entryViewCache = filer2.MergeIntoVisibles(file.entryViewCache, chunk)
file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk)
} }
file.reader = nil file.reader = nil
@ -285,7 +285,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
func (file *File) setEntry(entry *filer_pb.Entry) { func (file *File) setEntry(entry *filer_pb.Entry) {
file.entry = entry file.entry = entry
file.entryViewCache, _ = filer2.NonOverlappingVisibleIntervals(filer2.LookupFn(file.wfs), file.entry.Chunks)
file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), file.entry.Chunks)
file.reader = nil file.reader = nil
} }
@ -305,7 +305,7 @@ func (file *File) saveEntry() error {
return fuse.EIO return fuse.EIO
} }
file.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
file.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil return nil
}) })

20
weed/filesys/filehandle.go

@ -13,7 +13,7 @@ import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
@ -41,7 +41,7 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle {
Gid: gid, Gid: gid,
} }
if fh.f.entry != nil { if fh.f.entry != nil {
fh.f.entry.Attributes.FileSize = filer2.FileSize(fh.f.entry)
fh.f.entry.Attributes.FileSize = filer.FileSize(fh.f.entry)
} }
return fh return fh
@ -99,7 +99,7 @@ func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxSto
func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
fileSize := int64(filer2.FileSize(fh.f.entry))
fileSize := int64(filer.FileSize(fh.f.entry))
if fileSize == 0 { if fileSize == 0 {
glog.V(1).Infof("empty fh %v", fh.f.fullpath()) glog.V(1).Infof("empty fh %v", fh.f.fullpath())
@ -108,7 +108,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
var chunkResolveErr error var chunkResolveErr error
if fh.f.entryViewCache == nil { if fh.f.entryViewCache == nil {
fh.f.entryViewCache, chunkResolveErr = filer2.NonOverlappingVisibleIntervals(filer2.LookupFn(fh.f.wfs), fh.f.entry.Chunks)
fh.f.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(filer.LookupFn(fh.f.wfs), fh.f.entry.Chunks)
if chunkResolveErr != nil { if chunkResolveErr != nil {
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr) return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
} }
@ -116,8 +116,8 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
} }
if fh.f.reader == nil { if fh.f.reader == nil {
chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache, fileSize)
chunkViews := filer.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
fh.f.reader = filer.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache, fileSize)
} }
totalRead, err := fh.f.reader.ReadAt(buff, offset) totalRead, err := fh.f.reader.ReadAt(buff, offset)
@ -254,10 +254,10 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
} }
manifestChunks, nonManifestChunks := filer2.SeparateManifestChunks(fh.f.entry.Chunks)
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks)
chunks, _ := filer2.CompactFileChunks(filer2.LookupFn(fh.f.wfs), nonManifestChunks)
chunks, manifestErr := filer2.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.dir.FullPath()), chunks)
chunks, _ := filer.CompactFileChunks(filer.LookupFn(fh.f.wfs), nonManifestChunks)
chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.dir.FullPath()), chunks)
if manifestErr != nil { if manifestErr != nil {
// not good, but should be ok // not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", manifestErr) glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
@ -270,7 +270,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
} }
fh.f.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
fh.f.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil return nil
}) })

20
weed/filesys/meta_cache/meta_cache.go

@ -5,8 +5,8 @@ import (
"os" "os"
"sync" "sync"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/leveldb"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -17,7 +17,7 @@ import (
// e.g. fill fileId field for chunks // e.g. fill fileId field for chunks
type MetaCache struct { type MetaCache struct {
actualStore filer2.FilerStore
actualStore filer.FilerStore
sync.RWMutex sync.RWMutex
visitedBoundary *bounded_tree.BoundedTree visitedBoundary *bounded_tree.BoundedTree
} }
@ -29,7 +29,7 @@ func NewMetaCache(dbFolder string) *MetaCache {
} }
} }
func openMetaStore(dbFolder string) filer2.FilerStore {
func openMetaStore(dbFolder string) filer.FilerStore {
os.RemoveAll(dbFolder) os.RemoveAll(dbFolder)
os.MkdirAll(dbFolder, 0755) os.MkdirAll(dbFolder, 0755)
@ -47,18 +47,18 @@ func openMetaStore(dbFolder string) filer2.FilerStore {
} }
func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer2.Entry) error {
func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error {
mc.Lock() mc.Lock()
defer mc.Unlock() defer mc.Unlock()
return mc.doInsertEntry(ctx, entry) return mc.doInsertEntry(ctx, entry)
} }
func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer2.Entry) error {
func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) error {
filer_pb.BeforeEntrySerialization(entry.Chunks) filer_pb.BeforeEntrySerialization(entry.Chunks)
return mc.actualStore.InsertEntry(ctx, entry) return mc.actualStore.InsertEntry(ctx, entry)
} }
func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPath, newEntry *filer2.Entry) error {
func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error {
mc.Lock() mc.Lock()
defer mc.Unlock() defer mc.Unlock()
@ -89,14 +89,14 @@ func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPat
return nil return nil
} }
func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer2.Entry) error {
func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error {
mc.Lock() mc.Lock()
defer mc.Unlock() defer mc.Unlock()
filer_pb.BeforeEntrySerialization(entry.Chunks) filer_pb.BeforeEntrySerialization(entry.Chunks)
return mc.actualStore.UpdateEntry(ctx, entry) return mc.actualStore.UpdateEntry(ctx, entry)
} }
func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer2.Entry, err error) {
func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) {
mc.RLock() mc.RLock()
defer mc.RUnlock() defer mc.RUnlock()
entry, err = mc.actualStore.FindEntry(ctx, fp) entry, err = mc.actualStore.FindEntry(ctx, fp)
@ -113,7 +113,7 @@ func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err err
return mc.actualStore.DeleteEntry(ctx, fp) return mc.actualStore.DeleteEntry(ctx, fp)
} }
func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer2.Entry, error) {
func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer.Entry, error) {
mc.RLock() mc.RLock()
defer mc.RUnlock() defer mc.RUnlock()

4
weed/filesys/meta_cache/meta_cache_init.go

@ -4,7 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -17,7 +17,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
glog.V(4).Infof("ReadDirAllEntries %s ...", path) glog.V(4).Infof("ReadDirAllEntries %s ...", path)
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error { err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer2.FromPbEntry(string(dirPath), pbEntry)
entry := filer.FromPbEntry(string(dirPath), pbEntry)
if err := mc.doInsertEntry(context.Background(), entry); err != nil { if err := mc.doInsertEntry(context.Background(), entry); err != nil {
glog.V(0).Infof("read %s: %v", entry.FullPath, err) glog.V(0).Infof("read %s: %v", entry.FullPath, err)
return err return err

6
weed/filesys/meta_cache/meta_cache_subscribe.go

@ -6,7 +6,7 @@ import (
"io" "io"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -24,7 +24,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
} }
var oldPath util.FullPath var oldPath util.FullPath
var newEntry *filer2.Entry
var newEntry *filer.Entry
if message.OldEntry != nil { if message.OldEntry != nil {
oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name) oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
glog.V(4).Infof("deleting %v", oldPath) glog.V(4).Infof("deleting %v", oldPath)
@ -37,7 +37,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
} }
key := util.NewFullPath(dir, message.NewEntry.Name) key := util.NewFullPath(dir, message.NewEntry.Name)
glog.V(4).Infof("creating %v", key) glog.V(4).Infof("creating %v", key)
newEntry = filer2.FromPbEntry(dir, message.NewEntry)
newEntry = filer.FromPbEntry(dir, message.NewEntry)
} }
return mc.AtomicUpdateEntry(context.Background(), oldPath, newEntry) return mc.AtomicUpdateEntry(context.Background(), oldPath, newEntry)
} }

6
weed/filesys/wfs_deletion.go

@ -5,7 +5,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -22,7 +22,7 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
fileIds = append(fileIds, chunk.GetFileIdString()) fileIds = append(fileIds, chunk.GetFileIdString())
continue continue
} }
dataChunks, manifestResolveErr := filer2.ResolveOneChunkManifest(filer2.LookupFn(wfs), chunk)
dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk)
if manifestResolveErr != nil { if manifestResolveErr != nil {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
} }
@ -42,7 +42,7 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se
var vids []string var vids []string
for _, fileId := range fileIds { for _, fileId := range fileIds {
vids = append(vids, filer2.VolumeId(fileId))
vids = append(vids, filer.VolumeId(fileId))
} }
lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {

4
weed/filesys/wfs_write.go

@ -5,14 +5,14 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
) )
func (wfs *WFS) saveDataAsChunk(dir string) filer2.SaveDataAsChunkFunctionType {
func (wfs *WFS) saveDataAsChunk(dir string) filer.SaveDataAsChunkFunctionType {
return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) { return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) {
var fileId, host string var fileId, host string

6
weed/messaging/broker/broker_grpc_server.go

@ -4,7 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
) )
@ -29,9 +29,9 @@ func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *m
} }
func genTopicDir(namespace, topic string) string { func genTopicDir(namespace, topic string) string {
return fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, namespace, topic)
return fmt.Sprintf("%s/%s/%s", filer.TopicsDir, namespace, topic)
} }
func genTopicDirEntry(namespace, topic string) (dir, entry string) { func genTopicDirEntry(namespace, topic string) (dir, entry string) {
return fmt.Sprintf("%s/%s", filer2.TopicsDir, namespace), topic
return fmt.Sprintf("%s/%s", filer.TopicsDir, namespace), topic
} }

4
weed/messaging/broker/broker_grpc_server_publish.go

@ -7,7 +7,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
@ -49,7 +49,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
Partition: in.Init.Partition, Partition: in.Init.Partition,
} }
tpDir := fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, tp.Namespace, tp.Topic)
tpDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, tp.Namespace, tp.Topic)
md5File := fmt.Sprintf("p%02d.md5", tp.Partition) md5File := fmt.Sprintf("p%02d.md5", tp.Partition)
// println("chan data stored under", tpDir, "as", md5File) // println("chan data stored under", tpDir, "as", md5File)

6
weed/messaging/broker/broker_grpc_server_subscribe.go

@ -8,7 +8,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
@ -147,9 +147,9 @@ func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTim
return nil return nil
} }
// println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name) // println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name)
chunkedFileReader := filer2.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
chunkedFileReader := filer.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
defer chunkedFileReader.Close() defer chunkedFileReader.Close()
if _, err := filer2.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
if _, err := filer.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
chunkedFileReader.Close() chunkedFileReader.Close()
if err == io.EOF { if err == io.EOF {
return err return err

4
weed/messaging/broker/topic_manager.go

@ -5,7 +5,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer" "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
@ -59,7 +59,7 @@ func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topi
startTime, stopTime = startTime.UTC(), stopTime.UTC() startTime, stopTime = startTime.UTC(), stopTime.UTC()
targetFile := fmt.Sprintf( targetFile := fmt.Sprintf(
"%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d", "%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d",
filer2.TopicsDir, tp.Namespace, tp.Topic,
filer.TopicsDir, tp.Namespace, tp.Topic,
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
tp.Partition, tp.Partition,
) )

6
weed/replication/sink/azuresink/azure_sink.go

@ -8,7 +8,7 @@ import (
"strings" "strings"
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
@ -95,8 +95,8 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
return nil return nil
} }
totalSize := filer2.FileSize(entry)
chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
// Create a URL that references a to-be-created blob in your // Create a URL that references a to-be-created blob in your
// Azure Storage account's container. // Azure Storage account's container.

6
weed/replication/sink/b2sink/b2_sink.go

@ -4,7 +4,7 @@ import (
"context" "context"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/replication/source"
@ -84,8 +84,8 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
return nil return nil
} }
totalSize := filer2.FileSize(entry)
chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
bucket, err := g.client.Bucket(context.Background(), g.bucket) bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil { if err != nil {

24
weed/replication/sink/filersink/filer_sink.go

@ -8,7 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
@ -92,7 +92,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
} }
glog.V(1).Infof("lookup: %v", lookupRequest) glog.V(1).Infof("lookup: %v", lookupRequest)
if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
if filer2.ETag(resp.Entry) == filer2.ETag(entry) {
if filer.ETag(resp.Entry) == filer.ETag(entry) {
glog.V(0).Infof("already replicated %s", key) glog.V(0).Infof("already replicated %s", key)
return nil return nil
} }
@ -164,13 +164,13 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
// skip if already changed // skip if already changed
// this usually happens when the messages are not ordered // this usually happens when the messages are not ordered
glog.V(0).Infof("late updates %s", key) glog.V(0).Infof("late updates %s", key)
} else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) {
} else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
// skip if no change // skip if no change
// this usually happens when retrying the replication // this usually happens when retrying the replication
glog.V(0).Infof("already replicated %s", key) glog.V(0).Infof("already replicated %s", key)
} else { } else {
// find out what changed // find out what changed
deletedChunks, newChunks, err := compareChunks(filer2.LookupFn(fs), oldEntry, newEntry)
deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
if err != nil { if err != nil {
return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err) return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
} }
@ -178,7 +178,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
// delete the chunks that are deleted from the source // delete the chunks that are deleted from the source
if deleteIncludeChunks { if deleteIncludeChunks {
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
existingEntry.Chunks = filer2.DoMinusChunks(existingEntry.Chunks, deletedChunks)
existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks)
} }
// replicate the chunks that are new in the source // replicate the chunks that are new in the source
@ -207,21 +207,21 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
}) })
} }
func compareChunks(lookupFileIdFn filer2.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := filer2.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
func compareChunks(lookupFileIdFn filer.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
if aErr != nil { if aErr != nil {
return nil, nil, aErr return nil, nil, aErr
} }
bData, bMeta, bErr := filer2.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
if bErr != nil { if bErr != nil {
return nil, nil, bErr return nil, nil, bErr
} }
deletedChunks = append(deletedChunks, filer2.DoMinusChunks(aData, bData)...)
deletedChunks = append(deletedChunks, filer2.DoMinusChunks(aMeta, bMeta)...)
deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
newChunks = append(newChunks, filer2.DoMinusChunks(bData, aData)...)
newChunks = append(newChunks, filer2.DoMinusChunks(bMeta, aMeta)...)
newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
return return
} }

6
weed/replication/sink/gcssink/gcs_sink.go

@ -8,7 +8,7 @@ import (
"cloud.google.com/go/storage" "cloud.google.com/go/storage"
"google.golang.org/api/option" "google.golang.org/api/option"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
@ -89,8 +89,8 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
return nil return nil
} }
totalSize := filer2.FileSize(entry)
chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())

8
weed/replication/sink/s3sink/s3_sink.go

@ -12,7 +12,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
@ -107,8 +107,8 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
return err return err
} }
totalSize := filer2.FileSize(entry)
chunkViews := filer2.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
parts := make([]*s3.CompletedPart, len(chunkViews)) parts := make([]*s3.CompletedPart, len(chunkViews))
@ -116,7 +116,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
for chunkIndex, chunk := range chunkViews { for chunkIndex, chunk := range chunkViews {
partId := chunkIndex + 1 partId := chunkIndex + 1
wg.Add(1) wg.Add(1)
go func(chunk *filer2.ChunkView, index int) {
go func(chunk *filer.ChunkView, index int) {
defer wg.Done() defer wg.Done()
if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
err = uploadErr err = uploadErr

6
weed/replication/sink/s3sink/s3_write.go

@ -9,7 +9,7 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -103,7 +103,7 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
} }
// To upload a part // To upload a part
func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.ChunkView) (*s3.CompletedPart, error) {
var readSeeker io.ReadSeeker var readSeeker io.ReadSeeker
readSeeker, err := s3sink.buildReadSeeker(chunk) readSeeker, err := s3sink.buildReadSeeker(chunk)
@ -156,7 +156,7 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
return err return err
} }
func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) {
func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) {
fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId) fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
if err != nil { if err != nil {
return nil, err return nil, err

8
weed/s3api/filer_multipart.go

@ -12,7 +12,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
@ -108,7 +108,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)), Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)),
Bucket: input.Bucket, Bucket: input.Bucket,
ETag: aws.String("\"" + filer2.ETagChunks(finalParts) + "\""),
ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""),
Key: objectKey(input.Key), Key: objectKey(input.Key),
}, },
} }
@ -208,8 +208,8 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
output.Parts = append(output.Parts, &s3.Part{ output.Parts = append(output.Parts, &s3.Part{
PartNumber: aws.Int64(int64(partNumber)), PartNumber: aws.Int64(int64(partNumber)),
LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()), LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()),
Size: aws.Int64(int64(filer2.FileSize(entry))),
ETag: aws.String("\"" + filer2.ETag(entry) + "\""),
Size: aws.Int64(int64(filer.FileSize(entry))),
ETag: aws.String("\"" + filer.ETag(entry) + "\""),
}) })
} }
} }

6
weed/s3api/s3api_objects_list_handlers.go

@ -12,7 +12,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
@ -139,8 +139,8 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
contents = append(contents, ListEntry{ contents = append(contents, ListEntry{
Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):], Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
ETag: "\"" + filer2.ETag(entry) + "\"",
Size: int64(filer2.FileSize(entry)),
ETag: "\"" + filer.ETag(entry) + "\"",
Size: int64(filer.FileSize(entry)),
Owner: CanonicalUser{ Owner: CanonicalUser{
ID: fmt.Sprintf("%x", entry.Attributes.Uid), ID: fmt.Sprintf("%x", entry.Attributes.Uid),
DisplayName: entry.Attributes.UserName, DisplayName: entry.Attributes.UserName,

43
weed/server/filer_grpc_server.go

@ -6,10 +6,9 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -35,7 +34,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
Name: req.Name, Name: req.Name,
IsDirectory: entry.IsDirectory(), IsDirectory: entry.IsDirectory(),
Attributes: filer2.EntryAttributeToPb(entry),
Attributes: filer.EntryAttributeToPb(entry),
Chunks: entry.Chunks, Chunks: entry.Chunks,
Extended: entry.Extended, Extended: entry.Extended,
}, },
@ -51,7 +50,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
limit = fs.option.DirListingLimit limit = fs.option.DirListingLimit
} }
paginationLimit := filer2.PaginationSize
paginationLimit := filer.PaginationSize
if limit < paginationLimit { if limit < paginationLimit {
paginationLimit = limit paginationLimit = limit
} }
@ -59,7 +58,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
lastFileName := req.StartFromFileName lastFileName := req.StartFromFileName
includeLastFile := req.InclusiveStartFrom includeLastFile := req.InclusiveStartFrom
for limit > 0 { for limit > 0 {
entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit)
entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit, req.Prefix)
if err != nil { if err != nil {
return err return err
@ -74,18 +73,12 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
lastFileName = entry.Name() lastFileName = entry.Name()
if req.Prefix != "" {
if !strings.HasPrefix(entry.Name(), req.Prefix) {
continue
}
}
if err := stream.Send(&filer_pb.ListEntriesResponse{ if err := stream.Send(&filer_pb.ListEntriesResponse{
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
Name: entry.Name(), Name: entry.Name(),
IsDirectory: entry.IsDirectory(), IsDirectory: entry.IsDirectory(),
Chunks: entry.Chunks, Chunks: entry.Chunks,
Attributes: filer2.EntryAttributeToPb(entry),
Attributes: filer.EntryAttributeToPb(entry),
Extended: entry.Extended, Extended: entry.Extended,
}, },
}); err != nil { }); err != nil {
@ -167,9 +160,9 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
return return
} }
createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{
createErr := fs.filer.CreateEntry(ctx, &filer.Entry{
FullPath: util.JoinPath(req.Directory, req.Entry.Name), FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: filer2.PbToEntryAttribute(req.Entry.Attributes),
Attr: filer.PbToEntryAttribute(req.Entry.Attributes),
Chunks: chunks, Chunks: chunks,
}, req.OExcl, req.IsFromOtherCluster, req.Signatures) }, req.OExcl, req.IsFromOtherCluster, req.Signatures)
@ -198,7 +191,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2) return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2)
} }
newEntry := &filer2.Entry{
newEntry := &filer.Entry{
FullPath: util.JoinPath(req.Directory, req.Entry.Name), FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: entry.Attr, Attr: entry.Attr,
Extended: req.Entry.Extended, Extended: req.Entry.Extended,
@ -225,7 +218,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
} }
if filer2.EqualEntry(entry, newEntry) {
if filer.EqualEntry(entry, newEntry) {
return &filer_pb.UpdateEntryResponse{}, err return &filer_pb.UpdateEntryResponse{}, err
} }
@ -240,23 +233,23 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
return &filer_pb.UpdateEntryResponse{}, err return &filer_pb.UpdateEntryResponse{}, err
} }
func (fs *FilerServer) cleanupChunks(existingEntry *filer2.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) {
func (fs *FilerServer) cleanupChunks(existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) {
// remove old chunks if not included in the new ones // remove old chunks if not included in the new ones
if existingEntry != nil { if existingEntry != nil {
garbage, err = filer2.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks)
garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks)
if err != nil { if err != nil {
return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err) return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err)
} }
} }
// files with manifest chunks are usually large and append only, skip calculating covered chunks // files with manifest chunks are usually large and append only, skip calculating covered chunks
manifestChunks, nonManifestChunks := filer2.SeparateManifestChunks(newEntry.Chunks)
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.Chunks)
chunks, coveredChunks := filer2.CompactFileChunks(fs.lookupFileId, nonManifestChunks)
chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks)
garbage = append(garbage, coveredChunks...) garbage = append(garbage, coveredChunks...)
chunks, err = filer2.MaybeManifestize(fs.saveAsChunk(
chunks, err = filer.MaybeManifestize(fs.saveAsChunk(
newEntry.Attributes.Replication, newEntry.Attributes.Replication,
newEntry.Attributes.Collection, newEntry.Attributes.Collection,
"", "",
@ -280,9 +273,9 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
var offset int64 = 0 var offset int64 = 0
entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath)) entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
entry = &filer2.Entry{
entry = &filer.Entry{
FullPath: fullpath, FullPath: fullpath,
Attr: filer2.Attr{
Attr: filer.Attr{
Crtime: time.Now(), Crtime: time.Now(),
Mtime: time.Now(), Mtime: time.Now(),
Mode: os.FileMode(0644), Mode: os.FileMode(0644),
@ -291,7 +284,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
}, },
} }
} else { } else {
offset = int64(filer2.TotalSize(entry.Chunks))
offset = int64(filer.TotalSize(entry.Chunks))
} }
for _, chunk := range req.Chunks { for _, chunk := range req.Chunks {
@ -301,7 +294,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
entry.Chunks = append(entry.Chunks, req.Chunks...) entry.Chunks = append(entry.Chunks, req.Chunks...)
entry.Chunks, err = filer2.MaybeManifestize(fs.saveAsChunk(
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(
entry.Replication, entry.Replication,
entry.Collection, entry.Collection,
"", "",

16
weed/server/filer_grpc_server_rename.go

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"path/filepath" "path/filepath"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -43,7 +43,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
return &filer_pb.AtomicRenameEntryResponse{}, nil return &filer_pb.AtomicRenameEntryResponse{}, nil
} }
func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error { if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error {
if entry.IsDirectory() { if entry.IsDirectory() {
@ -59,7 +59,7 @@ func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, e
return nil return nil
} }
func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
currentDirPath := oldParent.Child(entry.Name()) currentDirPath := oldParent.Child(entry.Name())
newDirPath := newParent.Child(newName) newDirPath := newParent.Child(newName)
@ -70,7 +70,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
includeLastFile := false includeLastFile := false
for { for {
entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024)
entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "")
if err != nil { if err != nil {
return err return err
} }
@ -92,7 +92,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
return nil return nil
} }
func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents,
func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents,
moveFolderSubEntries func() error) error { moveFolderSubEntries func() error) error {
oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName)
@ -105,7 +105,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
} }
// add to new directory // add to new directory
newEntry := &filer2.Entry{
newEntry := &filer.Entry{
FullPath: newPath, FullPath: newPath,
Attr: entry.Attr, Attr: entry.Attr,
Chunks: entry.Chunks, Chunks: entry.Chunks,
@ -136,6 +136,6 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
} }
type MoveEvents struct { type MoveEvents struct {
oldEntries []*filer2.Entry
newEntries []*filer2.Entry
oldEntries []*filer.Entry
newEntries []*filer.Entry
} }

6
weed/server/filer_grpc_server_sub_meta.go

@ -7,7 +7,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -63,7 +63,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn) eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
if _, ok := fs.filer.Store.ActualStore.(filer2.FilerLocalStore); ok {
if _, ok := fs.filer.Store.ActualStore.(filer.FilerLocalStore); ok {
// println("reading from persisted logs ...") // println("reading from persisted logs ...")
processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn) processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
if err != nil { if err != nil {
@ -124,7 +124,7 @@ func eachEventNotificationFn(req *filer_pb.SubscribeMetadataRequest, stream file
fullpath := util.Join(dirPath, entryName) fullpath := util.Join(dirPath, entryName)
// skip on filer internal meta logs // skip on filer internal meta logs
if strings.HasPrefix(fullpath, filer2.SystemLogDir) {
if strings.HasPrefix(fullpath, filer.SystemLogDir) {
return nil return nil
} }

24
weed/server/filer_server.go

@ -18,16 +18,16 @@ import (
"github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/filer2"
_ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer2/etcd"
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2"
_ "github.com/chrislusf/seaweedfs/weed/filer2/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis2"
"github.com/chrislusf/seaweedfs/weed/filer"
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2"
_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"
@ -58,7 +58,7 @@ type FilerOption struct {
type FilerServer struct { type FilerServer struct {
option *FilerOption option *FilerOption
secret security.SigningKey secret security.SigningKey
filer *filer2.Filer
filer *filer.Filer
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
// notifying clients // notifying clients
@ -82,7 +82,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
glog.Fatal("master list is required!") glog.Fatal("master list is required!")
} }
fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, func() {
fs.filer = filer.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, func() {
fs.listenersCond.Broadcast() fs.listenersCond.Broadcast()
}) })
fs.filer.Cipher = option.Cipher fs.filer.Cipher = option.Cipher

8
weed/server/filer_server_handlers_read.go

@ -11,7 +11,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/images"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -94,7 +94,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
} }
// set etag // set etag
etag := filer2.ETagEntry(entry)
etag := filer.ETagEntry(entry)
if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" { if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" {
w.WriteHeader(http.StatusNotModified) w.WriteHeader(http.StatusNotModified)
return return
@ -115,7 +115,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
ext := filepath.Ext(filename) ext := filepath.Ext(filename)
width, height, mode, shouldResize := shouldResizeImages(ext, r) width, height, mode, shouldResize := shouldResizeImages(ext, r)
if shouldResize { if shouldResize {
data, err := filer2.ReadAll(fs.filer.MasterClient, entry.Chunks)
data, err := filer.ReadAll(fs.filer.MasterClient, entry.Chunks)
if err != nil { if err != nil {
glog.Errorf("failed to read %s: %v", path, err) glog.Errorf("failed to read %s: %v", path, err)
w.WriteHeader(http.StatusNotModified) w.WriteHeader(http.StatusNotModified)
@ -128,7 +128,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
} }
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
}) })
} }

2
weed/server/filer_server_handlers_read_dir.go

@ -32,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
lastFileName := r.FormValue("lastFileName") lastFileName := r.FormValue("lastFileName")
entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit)
entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit, "")
if err != nil { if err != nil {
glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)

12
weed/server/filer_server_handlers_write_autochunk.go

@ -13,7 +13,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -86,7 +86,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
return nil, nil, err return nil, nil, err
} }
fileChunks, replyerr = filer2.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, ttlString, fsync), fileChunks)
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, ttlString, fsync), fileChunks)
if replyerr != nil { if replyerr != nil {
glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr) glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
return return
@ -108,7 +108,7 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
return nil, nil, err return nil, nil, err
} }
fileChunks, replyerr = filer2.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, ttlString, fsync), fileChunks)
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(replication, collection, dataCenter, ttlString, fsync), fileChunks)
if replyerr != nil { if replyerr != nil {
glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr) glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
return return
@ -149,9 +149,9 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
} }
glog.V(4).Infoln("saving", path) glog.V(4).Infoln("saving", path)
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: util.FullPath(path), FullPath: util.FullPath(path),
Attr: filer2.Attr{
Attr: filer.Attr{
Mtime: time.Now(), Mtime: time.Now(),
Crtime: crTime, Crtime: crTime,
Mode: os.FileMode(mode), Mode: os.FileMode(mode),
@ -236,7 +236,7 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht
return uploadResult, err return uploadResult, err
} }
func (fs *FilerServer) saveAsChunk(replication string, collection string, dataCenter string, ttlString string, fsync bool) filer2.SaveDataAsChunkFunctionType {
func (fs *FilerServer) saveAsChunk(replication string, collection string, dataCenter string, ttlString string, fsync bool) filer.SaveDataAsChunkFunctionType {
return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) { return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) {
// assign one file id for one chunk // assign one file id for one chunk

6
weed/server/filer_server_handlers_write_cipher.go

@ -7,7 +7,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -58,9 +58,9 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
} }
} }
entry := &filer2.Entry{
entry := &filer.Entry{
FullPath: util.FullPath(path), FullPath: util.FullPath(path),
Attr: filer2.Attr{
Attr: filer.Attr{
Mtime: time.Now(), Mtime: time.Now(),
Crtime: time.Now(), Crtime: time.Now(),
Mode: 0660, Mode: 0660,

2
weed/server/master_grpc_server.go

@ -136,7 +136,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
} }
if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards { if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards {
glog.V(1).Infof("master recieved ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
glog.V(1).Infof("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
newShards, deletedShards := ms.Topo.SyncDataNodeEcShards(heartbeat.EcShards, dn) newShards, deletedShards := ms.Topo.SyncDataNodeEcShards(heartbeat.EcShards, dn)
// broadcast the ec vid changes to master clients // broadcast the ec vid changes to master clients

2
weed/server/master_server.go

@ -209,7 +209,7 @@ func (ms *MasterServer) startAdminScripts() {
scriptLines = append(scriptLines, "unlock") scriptLines = append(scriptLines, "unlock")
} }
masterAddress := fmt.Sprintf("%s:%d",ms.option.Host, ms.option.Port)
masterAddress := fmt.Sprintf("%s:%d", ms.option.Host, ms.option.Port)
var shellOptions shell.ShellOptions var shellOptions shell.ShellOptions
shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master") shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master")

9
weed/server/volume_grpc_copy.go

@ -27,17 +27,12 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId) glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId)
err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId))
if err != nil {
return nil, fmt.Errorf("failed to mount existing volume %d: %v", req.VolumeId, err)
}
err = vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err) return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err)
} }
glog.V(0).Infof("deleted exisitng volume %d before copying.", req.VolumeId)
glog.V(0).Infof("deleted existing volume %d before copying.", req.VolumeId)
} }
location := vs.store.FindFreeLocation() location := vs.store.FindFreeLocation()

20
weed/server/webdav_server.go

@ -19,7 +19,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache" "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
) )
@ -41,7 +41,7 @@ type WebDavOption struct {
type WebDavServer struct { type WebDavServer struct {
option *WebDavOption option *WebDavOption
secret security.SigningKey secret security.SigningKey
filer *filer2.Filer
filer *filer.Filer
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
Handler *webdav.Handler Handler *webdav.Handler
} }
@ -67,7 +67,7 @@ func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) {
type WebDavFileSystem struct { type WebDavFileSystem struct {
option *WebDavOption option *WebDavOption
secret security.SigningKey secret security.SigningKey
filer *filer2.Filer
filer *filer.Filer
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
chunkCache *chunk_cache.TieredChunkCache chunkCache *chunk_cache.TieredChunkCache
signature int32 signature int32
@ -94,7 +94,7 @@ type WebDavFile struct {
isDirectory bool isDirectory bool
off int64 off int64
entry *filer_pb.Entry entry *filer_pb.Entry
entryViewCache []filer2.VisibleInterval
entryViewCache []filer.VisibleInterval
reader io.ReaderAt reader io.ReaderAt
} }
@ -338,7 +338,7 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F
if err != nil { if err != nil {
return nil, err return nil, err
} }
fi.size = int64(filer2.FileSize(entry))
fi.size = int64(filer.FileSize(entry))
fi.name = string(fullpath) fi.name = string(fullpath)
fi.mode = os.FileMode(entry.Attributes.FileMode) fi.mode = os.FileMode(entry.Attributes.FileMode)
fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0) fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0)
@ -471,17 +471,17 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
fileSize := int64(filer2.FileSize(f.entry))
fileSize := int64(filer.FileSize(f.entry))
if fileSize == 0 { if fileSize == 0 {
return 0, io.EOF return 0, io.EOF
} }
if f.entryViewCache == nil { if f.entryViewCache == nil {
f.entryViewCache, _ = filer2.NonOverlappingVisibleIntervals(filer2.LookupFn(f.fs), f.entry.Chunks)
f.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.Chunks)
f.reader = nil f.reader = nil
} }
if f.reader == nil { if f.reader == nil {
chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache, fileSize)
chunkViews := filer.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
f.reader = filer.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache, fileSize)
} }
readSize, err = f.reader.ReadAt(p, f.off) readSize, err = f.reader.ReadAt(p, f.off)
@ -509,7 +509,7 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) error { err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) error {
fi := FileInfo{ fi := FileInfo{
size: int64(filer2.FileSize(entry)),
size: int64(filer.FileSize(entry)),
name: entry.Name, name: entry.Name,
mode: os.FileMode(entry.Attributes.FileMode), mode: os.FileMode(entry.Attributes.FileMode),
modifiledTime: time.Unix(entry.Attributes.Mtime, 0), modifiledTime: time.Unix(entry.Attributes.Mtime, 0),

4
weed/shell/command_fs_cat.go

@ -5,7 +5,7 @@ import (
"io" "io"
"math" "math"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -52,7 +52,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write
return err return err
} }
return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
}) })

4
weed/shell/command_fs_du.go

@ -4,7 +4,7 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -70,7 +70,7 @@ func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir
} }
} else { } else {
fileBlockCount = uint64(len(entry.Chunks)) fileBlockCount = uint64(len(entry.Chunks))
fileByteCount = filer2.FileSize(entry)
fileByteCount = filer.FileSize(entry)
blockCount += fileBlockCount blockCount += fileBlockCount
byteCount += fileByteCount byteCount += fileByteCount
} }

4
weed/shell/command_fs_ls.go

@ -8,7 +8,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -95,7 +95,7 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n",
fileMode, len(entry.Chunks), fileMode, len(entry.Chunks),
userName, groupName, userName, groupName,
filer2.FileSize(entry), dir, entry.Name)
filer.FileSize(entry), dir, entry.Name)
} else { } else {
fmt.Fprintf(writer, "%s\n", entry.Name) fmt.Fprintf(writer, "%s\n", entry.Name)
} }

4
weed/shell/command_volume_fsck.go

@ -11,7 +11,7 @@ import (
"path/filepath" "path/filepath"
"sync" "sync"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -197,7 +197,7 @@ func (c *commandVolumeFsck) collectFilerFileIds(tempFolder string, volumeIdToSer
files[i.vid].Write(buffer) files[i.vid].Write(buffer)
} }
}, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) { }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
dChunks, mChunks, resolveErr := filer2.ResolveChunkManifest(filer2.LookupFn(c.env), entry.Entry.Chunks)
dChunks, mChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks)
if resolveErr != nil { if resolveErr != nil {
return nil return nil
} }

3
weed/storage/disk_location.go

@ -174,6 +174,9 @@ func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e er
} }
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) { func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid] v, ok := l.volumes[vid]
if !ok { if !ok {
return return

5
weed/storage/needle/volume_ttl_test.go

@ -30,6 +30,11 @@ func TestTTLReadWrite(t *testing.T) {
t.Errorf("5d ttl:%v", ttl) t.Errorf("5d ttl:%v", ttl)
} }
ttl, _ = ReadTTL("50d")
if ttl.Minutes() != 50*24*60 {
t.Errorf("50d ttl:%v", ttl)
}
ttl, _ = ReadTTL("5w") ttl, _ = ReadTTL("5w")
if ttl.Minutes() != 5*7*24*60 { if ttl.Minutes() != 5*7*24*60 {
t.Errorf("5w ttl:%v", ttl) t.Errorf("5w ttl:%v", ttl)

2
weed/storage/store.go

@ -380,7 +380,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
Ttl: v.Ttl.ToUint32(), Ttl: v.Ttl.ToUint32(),
} }
for _, location := range s.Locations { for _, location := range s.Locations {
if found, error := location.deleteVolumeById(i); found && error == nil {
if found, err := location.deleteVolumeById(i); found && err == nil {
glog.V(0).Infof("DeleteVolume %d", i) glog.V(0).Infof("DeleteVolume %d", i)
s.DeletedVolumesChan <- message s.DeletedVolumesChan <- message
return nil return nil

5
weed/storage/store_vacuum.go

@ -2,6 +2,7 @@ package storage
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
@ -16,6 +17,10 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
} }
func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error { func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error {
if v := s.findVolume(vid); v != nil { if v := s.findVolume(vid); v != nil {
s := stats.NewDiskStatus(v.dir)
if int64(s.Free) < preallocate {
return fmt.Errorf("free space: %d bytes, not enough for %d bytes", s.Free, preallocate)
}
return v.Compact2(preallocate, compactionBytePerSecond) return v.Compact2(preallocate, compactionBytePerSecond)
} }
return fmt.Errorf("volume id %d is not found during compact", vid) return fmt.Errorf("volume id %d is not found during compact", vid)

Loading…
Cancel
Save