Chris Lu
7 years ago
88 changed files with 4512 additions and 3337 deletions
-
48weed/command/filer.go
-
216weed/command/filer_copy.go
-
7weed/command/mount.go
-
9weed/command/mount_std.go
-
21weed/command/server.go
-
9weed/command/volume.go
-
96weed/filer/cassandra_store/cassandra_store.go
-
22weed/filer/cassandra_store/schema.cql
-
26weed/filer/embedded_filer/design.txt
-
15weed/filer/embedded_filer/directory.go
-
312weed/filer/embedded_filer/directory_in_map.go
-
86weed/filer/embedded_filer/directory_test.go
-
156weed/filer/embedded_filer/filer_embedded.go
-
87weed/filer/embedded_filer/files_in_leveldb.go
-
29weed/filer/filer.go
-
66weed/filer/flat_namespace/flat_namespace_filer.go
-
9weed/filer/flat_namespace/flat_namespace_store.go
-
67weed/filer/mysql_store/README.md
-
274weed/filer/mysql_store/mysql_store.go
-
30weed/filer/mysql_store/mysql_store_test.go
-
456weed/filer/postgres_store/postgres_native.go
-
149weed/filer/postgres_store/postgres_store.go
-
50weed/filer/redis_store/redis_store.go
-
45weed/filer/vasto_store/design.txt
-
130weed/filer2/abstract_sql/abstract_sql_store.go
-
32weed/filer2/abstract_sql/hashing.go
-
14weed/filer2/cassandra/README.txt
-
131weed/filer2/cassandra/cassandra_store.go
-
126weed/filer2/configuration.go
-
42weed/filer2/embedded/embedded_store.go
-
42weed/filer2/entry.go
-
45weed/filer2/entry_codec.go
-
245weed/filer2/filechunks.go
-
316weed/filer2/filechunks_test.go
-
91weed/filer2/filer.go
-
60weed/filer2/filer_master.go
-
66weed/filer2/filer_structure.go
-
18weed/filer2/filerstore.go
-
31weed/filer2/fullpath.go
-
169weed/filer2/leveldb/leveldb_store.go
-
61weed/filer2/leveldb/leveldb_store_test.go
-
90weed/filer2/memdb/memdb_store.go
-
46weed/filer2/memdb/memdb_store_test.go
-
67weed/filer2/mysql/mysql_store.go
-
17weed/filer2/postgres/README.txt
-
68weed/filer2/postgres/postgres_store.go
-
167weed/filer2/redis/redis_store.go
-
153weed/filesys/dir.go
-
165weed/filesys/dirty_page.go
-
125weed/filesys/file.go
-
219weed/filesys/filehandle.go
-
13weed/filesys/wfs.go
-
2weed/images/favicon.go
-
2weed/operation/assign_file_id.go
-
31weed/operation/filer/register.go
-
73weed/pb/filer.proto
-
588weed/pb/filer_pb/filer.pb.go
-
143weed/pb/master_pb/seaweed.pb.go
-
9weed/pb/seaweed.proto
-
13weed/server/common.go
-
204weed/server/filer_grpc_server.go
-
164weed/server/filer_server.go
-
41weed/server/filer_server_handlers_admin.go
-
247weed/server/filer_server_handlers_read.go
-
70weed/server/filer_server_handlers_read_dir.go
-
412weed/server/filer_server_handlers_write.go
-
189weed/server/filer_server_handlers_write_autochunk.go
-
139weed/server/filer_server_handlers_write_monopart.go
-
39weed/server/filer_server_handlers_write_multipart.go
-
24weed/server/filer_ui/breadcrumb.go
-
52weed/server/filer_ui/templates.go
-
14weed/server/master_grpc_server.go
-
2weed/server/raft_server.go
-
46weed/server/volume_grpc_client.go
-
25weed/server/volume_server.go
-
4weed/server/volume_server_handlers.go
-
6weed/server/volume_server_handlers_admin.go
-
16weed/server/volume_server_handlers_read.go
-
6weed/server/volume_server_handlers_ui.go
-
2weed/server/volume_server_handlers_vacuum.go
-
6weed/server/volume_server_handlers_write.go
-
4weed/server/volume_server_ui/templates.go
-
4weed/storage/needle_map_memory.go
-
51weed/storage/store.go
-
2weed/storage/volume_checking.go
-
1weed/topology/store_replicate.go
-
2weed/util/constants.go
-
68weed/util/http_util.go
@ -1,96 +0,0 @@ |
|||||
package cassandra_store |
|
||||
|
|
||||
import ( |
|
||||
"fmt" |
|
||||
"strings" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
"github.com/chrislusf/seaweedfs/weed/glog" |
|
||||
|
|
||||
"github.com/gocql/gocql" |
|
||||
) |
|
||||
|
|
||||
/* |
|
||||
|
|
||||
Basically you need a table just like this: |
|
||||
|
|
||||
CREATE TABLE seaweed_files ( |
|
||||
path varchar, |
|
||||
fids list<varchar>, |
|
||||
PRIMARY KEY (path) |
|
||||
); |
|
||||
|
|
||||
Need to match flat_namespace.FlatNamespaceStore interface |
|
||||
Put(fullFileName string, fid string) (err error) |
|
||||
Get(fullFileName string) (fid string, err error) |
|
||||
Delete(fullFileName string) (fid string, err error) |
|
||||
|
|
||||
*/ |
|
||||
type CassandraStore struct { |
|
||||
cluster *gocql.ClusterConfig |
|
||||
session *gocql.Session |
|
||||
} |
|
||||
|
|
||||
func NewCassandraStore(keyspace string, hosts string) (c *CassandraStore, err error) { |
|
||||
c = &CassandraStore{} |
|
||||
s := strings.Split(hosts, ",") |
|
||||
if len(s) == 1 { |
|
||||
glog.V(2).Info("Only one cassandra node to connect! A cluster is Recommended! Now using:", string(hosts)) |
|
||||
c.cluster = gocql.NewCluster(hosts) |
|
||||
} else if len(s) > 1 { |
|
||||
c.cluster = gocql.NewCluster(s...) |
|
||||
} |
|
||||
c.cluster.Keyspace = keyspace |
|
||||
c.cluster.Consistency = gocql.LocalQuorum |
|
||||
c.session, err = c.cluster.CreateSession() |
|
||||
if err != nil { |
|
||||
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func (c *CassandraStore) Put(fullFileName string, fid string) (err error) { |
|
||||
var input []string |
|
||||
input = append(input, fid) |
|
||||
if err := c.session.Query( |
|
||||
`INSERT INTO seaweed_files (path, fids) VALUES (?, ?)`, |
|
||||
fullFileName, input).Exec(); err != nil { |
|
||||
glog.V(0).Infof("Failed to save file %s with id %s: %v", fullFileName, fid, err) |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
func (c *CassandraStore) Get(fullFileName string) (fid string, err error) { |
|
||||
var output []string |
|
||||
if err := c.session.Query( |
|
||||
`select fids FROM seaweed_files WHERE path = ? LIMIT 1`, |
|
||||
fullFileName).Consistency(gocql.One).Scan(&output); err != nil { |
|
||||
if err != gocql.ErrNotFound { |
|
||||
glog.V(0).Infof("Failed to find file %s: %v", fullFileName, fid, err) |
|
||||
return "", filer.ErrNotFound |
|
||||
} |
|
||||
} |
|
||||
if len(output) == 0 { |
|
||||
return "", fmt.Errorf("No file id found for %s", fullFileName) |
|
||||
} |
|
||||
return output[0], nil |
|
||||
} |
|
||||
|
|
||||
// Currently the fid is not returned
|
|
||||
func (c *CassandraStore) Delete(fullFileName string) (err error) { |
|
||||
if err := c.session.Query( |
|
||||
`DELETE FROM seaweed_files WHERE path = ?`, |
|
||||
fullFileName).Exec(); err != nil { |
|
||||
if err != gocql.ErrNotFound { |
|
||||
glog.V(0).Infof("Failed to delete file %s: %v", fullFileName, err) |
|
||||
} |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (c *CassandraStore) Close() { |
|
||||
if c.session != nil { |
|
||||
c.session.Close() |
|
||||
} |
|
||||
} |
|
@ -1,22 +0,0 @@ |
|||||
/* |
|
||||
|
|
||||
Here is the CQL to create the table.CassandraStore |
|
||||
|
|
||||
Optionally you can adjust the keyspace name and replication settings. |
|
||||
|
|
||||
For production server, very likely you want to set replication_factor to 3 |
|
||||
|
|
||||
*/ |
|
||||
|
|
||||
create keyspace seaweed WITH replication = { |
|
||||
'class':'SimpleStrategy', |
|
||||
'replication_factor':1 |
|
||||
}; |
|
||||
|
|
||||
use seaweed; |
|
||||
|
|
||||
CREATE TABLE seaweed_files ( |
|
||||
path varchar, |
|
||||
fids list<varchar>, |
|
||||
PRIMARY KEY (path) |
|
||||
); |
|
@ -1,26 +0,0 @@ |
|||||
Design Assumptions: |
|
||||
1. the number of directories are magnitudely smaller than the number of files |
|
||||
2. unlimited number of files under any directories |
|
||||
Phylosophy: |
|
||||
metadata for directories and files should be separated |
|
||||
Design: |
|
||||
Store directories in normal map |
|
||||
all of directories hopefully all be in memory |
|
||||
efficient to move/rename/list_directories |
|
||||
Log directory changes to append only log file |
|
||||
Store files in sorted string table in <dir_id/filename> format |
|
||||
efficient to list_files, just simple iterator |
|
||||
efficient to locate files, binary search |
|
||||
|
|
||||
Testing: |
|
||||
1. starting server, "weed server -filer=true" |
|
||||
2. posting files to different folders |
|
||||
curl -F "filename=@design.txt" "http://localhost:8888/sources/" |
|
||||
curl -F "filename=@design.txt" "http://localhost:8888/design/" |
|
||||
curl -F "filename=@directory.go" "http://localhost:8888/sources/weed/go/" |
|
||||
curl -F "filename=@directory.go" "http://localhost:8888/sources/testing/go/" |
|
||||
curl -F "filename=@filer.go" "http://localhost:8888/sources/weed/go/" |
|
||||
curl -F "filename=@filer_in_leveldb.go" "http://localhost:8888/sources/weed/go/" |
|
||||
curl "http://localhost:8888/?pretty=y" |
|
||||
curl "http://localhost:8888/sources/weed/go/?pretty=y" |
|
||||
curl "http://localhost:8888/sources/weed/go/?pretty=y" |
|
@ -1,15 +0,0 @@ |
|||||
package embedded_filer |
|
||||
|
|
||||
import ( |
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
) |
|
||||
|
|
||||
type DirectoryManager interface { |
|
||||
FindDirectory(dirPath string) (DirectoryId, error) |
|
||||
ListDirectories(dirPath string) (dirs []filer.DirectoryName, err error) |
|
||||
MakeDirectory(currentDirPath string, dirName string) (DirectoryId, error) |
|
||||
MoveUnderDirectory(oldDirPath string, newParentDirPath string) error |
|
||||
DeleteDirectory(dirPath string) error |
|
||||
//functions used by FUSE
|
|
||||
FindDirectoryById(DirectoryId, error) |
|
||||
} |
|
@ -1,312 +0,0 @@ |
|||||
package embedded_filer |
|
||||
|
|
||||
import ( |
|
||||
"bufio" |
|
||||
"fmt" |
|
||||
"io" |
|
||||
"os" |
|
||||
"path/filepath" |
|
||||
"strconv" |
|
||||
"strings" |
|
||||
"sync" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
"github.com/chrislusf/seaweedfs/weed/util" |
|
||||
) |
|
||||
|
|
||||
var writeLock sync.Mutex //serialize changes to dir.log
|
|
||||
|
|
||||
type DirectoryId int32 |
|
||||
|
|
||||
type DirectoryEntryInMap struct { |
|
||||
sync.Mutex |
|
||||
Name string |
|
||||
Parent *DirectoryEntryInMap |
|
||||
subDirectories map[string]*DirectoryEntryInMap |
|
||||
Id DirectoryId |
|
||||
} |
|
||||
|
|
||||
func (de *DirectoryEntryInMap) getChild(dirName string) (*DirectoryEntryInMap, bool) { |
|
||||
de.Lock() |
|
||||
defer de.Unlock() |
|
||||
child, ok := de.subDirectories[dirName] |
|
||||
return child, ok |
|
||||
} |
|
||||
func (de *DirectoryEntryInMap) addChild(dirName string, child *DirectoryEntryInMap) { |
|
||||
de.Lock() |
|
||||
defer de.Unlock() |
|
||||
de.subDirectories[dirName] = child |
|
||||
} |
|
||||
func (de *DirectoryEntryInMap) removeChild(dirName string) { |
|
||||
de.Lock() |
|
||||
defer de.Unlock() |
|
||||
delete(de.subDirectories, dirName) |
|
||||
} |
|
||||
func (de *DirectoryEntryInMap) hasChildren() bool { |
|
||||
de.Lock() |
|
||||
defer de.Unlock() |
|
||||
return len(de.subDirectories) > 0 |
|
||||
} |
|
||||
func (de *DirectoryEntryInMap) children() (dirNames []filer.DirectoryName) { |
|
||||
de.Lock() |
|
||||
defer de.Unlock() |
|
||||
for k, _ := range de.subDirectories { |
|
||||
dirNames = append(dirNames, filer.DirectoryName(k)) |
|
||||
} |
|
||||
return dirNames |
|
||||
} |
|
||||
|
|
||||
type DirectoryManagerInMap struct { |
|
||||
Root *DirectoryEntryInMap |
|
||||
max DirectoryId |
|
||||
logFile *os.File |
|
||||
isLoading bool |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) newDirectoryEntryInMap(parent *DirectoryEntryInMap, name string) (d *DirectoryEntryInMap, err error) { |
|
||||
d = &DirectoryEntryInMap{Name: name, Parent: parent, subDirectories: make(map[string]*DirectoryEntryInMap)} |
|
||||
var parts []string |
|
||||
for p := d; p != nil && p.Name != ""; p = p.Parent { |
|
||||
parts = append(parts, p.Name) |
|
||||
} |
|
||||
n := len(parts) |
|
||||
if n <= 0 { |
|
||||
return nil, fmt.Errorf("Failed to create folder %s/%s", parent.Name, name) |
|
||||
} |
|
||||
for i := 0; i < n/2; i++ { |
|
||||
parts[i], parts[n-1-i] = parts[n-1-i], parts[i] |
|
||||
} |
|
||||
dm.max++ |
|
||||
d.Id = dm.max |
|
||||
dm.log("add", "/"+strings.Join(parts, "/"), strconv.Itoa(int(d.Id))) |
|
||||
return d, nil |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) log(words ...string) { |
|
||||
if !dm.isLoading { |
|
||||
dm.logFile.WriteString(strings.Join(words, "\t") + "\n") |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func NewDirectoryManagerInMap(dirLogFile string) (dm *DirectoryManagerInMap, err error) { |
|
||||
dm = &DirectoryManagerInMap{} |
|
||||
//dm.Root do not use newDirectoryEntryInMap, since dm.max will be changed
|
|
||||
dm.Root = &DirectoryEntryInMap{subDirectories: make(map[string]*DirectoryEntryInMap)} |
|
||||
if dm.logFile, err = os.OpenFile(dirLogFile, os.O_RDWR|os.O_CREATE, 0644); err != nil { |
|
||||
return nil, fmt.Errorf("cannot write directory log file %s: %v", dirLogFile, err) |
|
||||
} |
|
||||
return dm, dm.load() |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) processEachLine(line string) error { |
|
||||
if strings.HasPrefix(line, "#") { |
|
||||
return nil |
|
||||
} |
|
||||
if line == "" { |
|
||||
return nil |
|
||||
} |
|
||||
parts := strings.Split(line, "\t") |
|
||||
if len(parts) == 0 { |
|
||||
return nil |
|
||||
} |
|
||||
switch parts[0] { |
|
||||
case "add": |
|
||||
v, pe := strconv.Atoi(parts[2]) |
|
||||
if pe != nil { |
|
||||
return pe |
|
||||
} |
|
||||
if e := dm.loadDirectory(parts[1], DirectoryId(v)); e != nil { |
|
||||
return e |
|
||||
} |
|
||||
case "mov": |
|
||||
newName := "" |
|
||||
if len(parts) >= 4 { |
|
||||
newName = parts[3] |
|
||||
} |
|
||||
if e := dm.MoveUnderDirectory(parts[1], parts[2], newName); e != nil { |
|
||||
return e |
|
||||
} |
|
||||
case "del": |
|
||||
if e := dm.DeleteDirectory(parts[1]); e != nil { |
|
||||
return e |
|
||||
} |
|
||||
default: |
|
||||
fmt.Printf("line %s has %s!\n", line, parts[0]) |
|
||||
return nil |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
func (dm *DirectoryManagerInMap) load() error { |
|
||||
dm.max = 0 |
|
||||
lines := bufio.NewReader(dm.logFile) |
|
||||
dm.isLoading = true |
|
||||
defer func() { dm.isLoading = false }() |
|
||||
for { |
|
||||
line, err := util.Readln(lines) |
|
||||
if err != nil && err != io.EOF { |
|
||||
return err |
|
||||
} |
|
||||
if pe := dm.processEachLine(string(line)); pe != nil { |
|
||||
return pe |
|
||||
} |
|
||||
if err == io.EOF { |
|
||||
return nil |
|
||||
} |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) findDirectory(dirPath string) (*DirectoryEntryInMap, error) { |
|
||||
if dirPath == "" { |
|
||||
return dm.Root, nil |
|
||||
} |
|
||||
dirPath = CleanFilePath(dirPath) |
|
||||
if dirPath == "/" { |
|
||||
return dm.Root, nil |
|
||||
} |
|
||||
parts := strings.Split(dirPath, "/") |
|
||||
dir := dm.Root |
|
||||
for i := 1; i < len(parts); i++ { |
|
||||
if sub, ok := dir.getChild(parts[i]); ok { |
|
||||
dir = sub |
|
||||
} else { |
|
||||
return dm.Root, filer.ErrNotFound |
|
||||
} |
|
||||
} |
|
||||
return dir, nil |
|
||||
} |
|
||||
func (dm *DirectoryManagerInMap) findDirectoryId(dirPath string) (DirectoryId, error) { |
|
||||
d, e := dm.findDirectory(dirPath) |
|
||||
if e == nil { |
|
||||
return d.Id, nil |
|
||||
} |
|
||||
return dm.Root.Id, e |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) loadDirectory(dirPath string, dirId DirectoryId) error { |
|
||||
dirPath = CleanFilePath(dirPath) |
|
||||
if dirPath == "/" { |
|
||||
return nil |
|
||||
} |
|
||||
parts := strings.Split(dirPath, "/") |
|
||||
dir := dm.Root |
|
||||
for i := 1; i < len(parts); i++ { |
|
||||
sub, ok := dir.getChild(parts[i]) |
|
||||
if !ok { |
|
||||
writeLock.Lock() |
|
||||
if sub2, createdByOtherThread := dir.getChild(parts[i]); createdByOtherThread { |
|
||||
sub = sub2 |
|
||||
} else { |
|
||||
if i != len(parts)-1 { |
|
||||
writeLock.Unlock() |
|
||||
return fmt.Errorf("%s should be created after parent %s", dirPath, parts[i]) |
|
||||
} |
|
||||
var err error |
|
||||
sub, err = dm.newDirectoryEntryInMap(dir, parts[i]) |
|
||||
if err != nil { |
|
||||
writeLock.Unlock() |
|
||||
return err |
|
||||
} |
|
||||
if sub.Id != dirId { |
|
||||
writeLock.Unlock() |
|
||||
// the dir.log should be the same order as in-memory directory id
|
|
||||
return fmt.Errorf("%s should be have id %v instead of %v", dirPath, sub.Id, dirId) |
|
||||
} |
|
||||
dir.addChild(parts[i], sub) |
|
||||
} |
|
||||
writeLock.Unlock() |
|
||||
} |
|
||||
dir = sub |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) makeDirectory(dirPath string) (dir *DirectoryEntryInMap, created bool) { |
|
||||
dirPath = CleanFilePath(dirPath) |
|
||||
if dirPath == "/" { |
|
||||
return dm.Root, false |
|
||||
} |
|
||||
parts := strings.Split(dirPath, "/") |
|
||||
dir = dm.Root |
|
||||
for i := 1; i < len(parts); i++ { |
|
||||
sub, ok := dir.getChild(parts[i]) |
|
||||
if !ok { |
|
||||
writeLock.Lock() |
|
||||
if sub2, createdByOtherThread := dir.getChild(parts[i]); createdByOtherThread { |
|
||||
sub = sub2 |
|
||||
} else { |
|
||||
var err error |
|
||||
sub, err = dm.newDirectoryEntryInMap(dir, parts[i]) |
|
||||
if err != nil { |
|
||||
writeLock.Unlock() |
|
||||
return nil, false |
|
||||
} |
|
||||
dir.addChild(parts[i], sub) |
|
||||
created = true |
|
||||
} |
|
||||
writeLock.Unlock() |
|
||||
} |
|
||||
dir = sub |
|
||||
} |
|
||||
return dir, created |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) MakeDirectory(dirPath string) (DirectoryId, error) { |
|
||||
dir, _ := dm.makeDirectory(dirPath) |
|
||||
return dir.Id, nil |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) MoveUnderDirectory(oldDirPath string, newParentDirPath string, newName string) error { |
|
||||
writeLock.Lock() |
|
||||
defer writeLock.Unlock() |
|
||||
oldDir, oe := dm.findDirectory(oldDirPath) |
|
||||
if oe != nil { |
|
||||
return oe |
|
||||
} |
|
||||
parentDir, pe := dm.findDirectory(newParentDirPath) |
|
||||
if pe != nil { |
|
||||
return pe |
|
||||
} |
|
||||
dm.log("mov", oldDirPath, newParentDirPath, newName) |
|
||||
oldDir.Parent.removeChild(oldDir.Name) |
|
||||
if newName == "" { |
|
||||
newName = oldDir.Name |
|
||||
} |
|
||||
parentDir.addChild(newName, oldDir) |
|
||||
oldDir.Name = newName |
|
||||
oldDir.Parent = parentDir |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (dm *DirectoryManagerInMap) ListDirectories(dirPath string) (dirNames []filer.DirectoryName, err error) { |
|
||||
d, e := dm.findDirectory(dirPath) |
|
||||
if e != nil { |
|
||||
return dirNames, e |
|
||||
} |
|
||||
return d.children(), nil |
|
||||
} |
|
||||
func (dm *DirectoryManagerInMap) DeleteDirectory(dirPath string) error { |
|
||||
writeLock.Lock() |
|
||||
defer writeLock.Unlock() |
|
||||
if dirPath == "/" { |
|
||||
return fmt.Errorf("Can not delete %s", dirPath) |
|
||||
} |
|
||||
d, e := dm.findDirectory(dirPath) |
|
||||
if e != nil { |
|
||||
return e |
|
||||
} |
|
||||
if d.hasChildren() { |
|
||||
return fmt.Errorf("dir %s still has sub directories", dirPath) |
|
||||
} |
|
||||
d.Parent.removeChild(d.Name) |
|
||||
d.Parent = nil |
|
||||
dm.log("del", dirPath) |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func CleanFilePath(fp string) string { |
|
||||
ret := filepath.Clean(fp) |
|
||||
if os.PathSeparator == '\\' { |
|
||||
return strings.Replace(ret, "\\", "/", -1) |
|
||||
} |
|
||||
return ret |
|
||||
} |
|
@ -1,86 +0,0 @@ |
|||||
package embedded_filer |
|
||||
|
|
||||
import ( |
|
||||
"os" |
|
||||
"strings" |
|
||||
"testing" |
|
||||
) |
|
||||
|
|
||||
func TestDirectory(t *testing.T) { |
|
||||
dm, _ := NewDirectoryManagerInMap("/tmp/dir.log") |
|
||||
defer func() { |
|
||||
if true { |
|
||||
os.Remove("/tmp/dir.log") |
|
||||
} |
|
||||
}() |
|
||||
dm.MakeDirectory("/a/b/c") |
|
||||
dm.MakeDirectory("/a/b/d") |
|
||||
dm.MakeDirectory("/a/b/e") |
|
||||
dm.MakeDirectory("/a/b/e/f") |
|
||||
dm.MakeDirectory("/a/b/e/f/g") |
|
||||
dm.MoveUnderDirectory("/a/b/e/f/g", "/a/b", "t") |
|
||||
if _, err := dm.findDirectoryId("/a/b/e/f/g"); err == nil { |
|
||||
t.Fatal("/a/b/e/f/g should not exist any more after moving") |
|
||||
} |
|
||||
if _, err := dm.findDirectoryId("/a/b/t"); err != nil { |
|
||||
t.Fatal("/a/b/t should exist after moving") |
|
||||
} |
|
||||
if _, err := dm.findDirectoryId("/a/b/g"); err == nil { |
|
||||
t.Fatal("/a/b/g should not exist after moving") |
|
||||
} |
|
||||
dm.MoveUnderDirectory("/a/b/e/f", "/a/b", "") |
|
||||
if _, err := dm.findDirectoryId("/a/b/f"); err != nil { |
|
||||
t.Fatal("/a/b/g should not exist after moving") |
|
||||
} |
|
||||
dm.MakeDirectory("/a/b/g/h/i") |
|
||||
dm.DeleteDirectory("/a/b/e/f") |
|
||||
dm.DeleteDirectory("/a/b/e") |
|
||||
dirNames, _ := dm.ListDirectories("/a/b/e") |
|
||||
for _, v := range dirNames { |
|
||||
println("sub1 dir:", v) |
|
||||
} |
|
||||
dm.logFile.Close() |
|
||||
|
|
||||
var path []string |
|
||||
printTree(dm.Root, path) |
|
||||
|
|
||||
dm2, e := NewDirectoryManagerInMap("/tmp/dir.log") |
|
||||
if e != nil { |
|
||||
println("load error", e.Error()) |
|
||||
} |
|
||||
if !compare(dm.Root, dm2.Root) { |
|
||||
t.Fatal("restored dir not the same!") |
|
||||
} |
|
||||
printTree(dm2.Root, path) |
|
||||
} |
|
||||
|
|
||||
func printTree(node *DirectoryEntryInMap, path []string) { |
|
||||
println(strings.Join(path, "/") + "/" + node.Name) |
|
||||
path = append(path, node.Name) |
|
||||
for _, v := range node.subDirectories { |
|
||||
printTree(v, path) |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func compare(root1 *DirectoryEntryInMap, root2 *DirectoryEntryInMap) bool { |
|
||||
if len(root1.subDirectories) != len(root2.subDirectories) { |
|
||||
return false |
|
||||
} |
|
||||
if root1.Name != root2.Name { |
|
||||
return false |
|
||||
} |
|
||||
if root1.Id != root2.Id { |
|
||||
return false |
|
||||
} |
|
||||
if !(root1.Parent == nil && root2.Parent == nil) { |
|
||||
if root1.Parent.Id != root2.Parent.Id { |
|
||||
return false |
|
||||
} |
|
||||
} |
|
||||
for k, v := range root1.subDirectories { |
|
||||
if !compare(v, root2.subDirectories[k]) { |
|
||||
return false |
|
||||
} |
|
||||
} |
|
||||
return true |
|
||||
} |
|
@ -1,156 +0,0 @@ |
|||||
package embedded_filer |
|
||||
|
|
||||
import ( |
|
||||
"errors" |
|
||||
"fmt" |
|
||||
"path/filepath" |
|
||||
"strings" |
|
||||
"sync" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
"github.com/chrislusf/seaweedfs/weed/operation" |
|
||||
) |
|
||||
|
|
||||
type FilerEmbedded struct { |
|
||||
master string |
|
||||
directories *DirectoryManagerInMap |
|
||||
files *FileListInLevelDb |
|
||||
mvMutex sync.Mutex |
|
||||
} |
|
||||
|
|
||||
func NewFilerEmbedded(master string, dir string) (filer *FilerEmbedded, err error) { |
|
||||
dm, de := NewDirectoryManagerInMap(filepath.Join(dir, "dir.log")) |
|
||||
if de != nil { |
|
||||
return nil, de |
|
||||
} |
|
||||
fl, fe := NewFileListInLevelDb(dir) |
|
||||
if fe != nil { |
|
||||
return nil, fe |
|
||||
} |
|
||||
filer = &FilerEmbedded{ |
|
||||
master: master, |
|
||||
directories: dm, |
|
||||
files: fl, |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func (filer *FilerEmbedded) CreateFile(filePath string, fid string) (err error) { |
|
||||
dir, file := filepath.Split(filePath) |
|
||||
dirId, e := filer.directories.MakeDirectory(dir) |
|
||||
if e != nil { |
|
||||
return e |
|
||||
} |
|
||||
return filer.files.CreateFile(dirId, file, fid) |
|
||||
} |
|
||||
func (filer *FilerEmbedded) FindFile(filePath string) (fid string, err error) { |
|
||||
dir, file := filepath.Split(filePath) |
|
||||
return filer.findFileEntry(dir, file) |
|
||||
} |
|
||||
func (filer *FilerEmbedded) findFileEntry(parentPath string, fileName string) (fid string, err error) { |
|
||||
dirId, e := filer.directories.findDirectoryId(parentPath) |
|
||||
if e != nil { |
|
||||
return "", e |
|
||||
} |
|
||||
return filer.files.FindFile(dirId, fileName) |
|
||||
} |
|
||||
|
|
||||
func (filer *FilerEmbedded) LookupDirectoryEntry(dirPath string, name string) (found bool, fileId string, err error) { |
|
||||
if _, err = filer.directories.findDirectory(filepath.Join(dirPath, name)); err == nil { |
|
||||
return true, "", nil |
|
||||
} |
|
||||
if fileId, err = filer.findFileEntry(dirPath, name); err == nil { |
|
||||
return true, fileId, nil |
|
||||
} |
|
||||
return false, "", err |
|
||||
} |
|
||||
func (filer *FilerEmbedded) ListDirectories(dirPath string) (dirs []filer.DirectoryName, err error) { |
|
||||
return filer.directories.ListDirectories(dirPath) |
|
||||
} |
|
||||
func (filer *FilerEmbedded) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) { |
|
||||
dirId, e := filer.directories.findDirectoryId(dirPath) |
|
||||
if e != nil { |
|
||||
return nil, e |
|
||||
} |
|
||||
return filer.files.ListFiles(dirId, lastFileName, limit), nil |
|
||||
} |
|
||||
func (filer *FilerEmbedded) DeleteDirectory(dirPath string, recursive bool) (err error) { |
|
||||
dirId, e := filer.directories.findDirectoryId(dirPath) |
|
||||
if e != nil { |
|
||||
return e |
|
||||
} |
|
||||
if sub_dirs, sub_err := filer.directories.ListDirectories(dirPath); sub_err == nil { |
|
||||
if len(sub_dirs) > 0 && !recursive { |
|
||||
return fmt.Errorf("Fail to delete directory %s: %d sub directories found!", dirPath, len(sub_dirs)) |
|
||||
} |
|
||||
for _, sub := range sub_dirs { |
|
||||
if delete_sub_err := filer.DeleteDirectory(filepath.Join(dirPath, string(sub)), recursive); delete_sub_err != nil { |
|
||||
return delete_sub_err |
|
||||
} |
|
||||
} |
|
||||
} |
|
||||
list := filer.files.ListFiles(dirId, "", 100) |
|
||||
if len(list) != 0 && !recursive { |
|
||||
if !recursive { |
|
||||
return fmt.Errorf("Fail to delete non-empty directory %s!", dirPath) |
|
||||
} |
|
||||
} |
|
||||
for { |
|
||||
if len(list) == 0 { |
|
||||
return filer.directories.DeleteDirectory(dirPath) |
|
||||
} |
|
||||
var fids []string |
|
||||
for _, fileEntry := range list { |
|
||||
fids = append(fids, string(fileEntry.Id)) |
|
||||
} |
|
||||
if result_list, delete_file_err := operation.DeleteFiles(filer.master, fids); delete_file_err != nil { |
|
||||
return delete_file_err |
|
||||
} else { |
|
||||
if len(result_list.Errors) > 0 { |
|
||||
return errors.New(strings.Join(result_list.Errors, "\n")) |
|
||||
} |
|
||||
} |
|
||||
lastFile := list[len(list)-1] |
|
||||
list = filer.files.ListFiles(dirId, lastFile.Name, 100) |
|
||||
} |
|
||||
|
|
||||
} |
|
||||
|
|
||||
func (filer *FilerEmbedded) DeleteFile(filePath string) (fid string, err error) { |
|
||||
dir, file := filepath.Split(filePath) |
|
||||
dirId, e := filer.directories.findDirectoryId(dir) |
|
||||
if e != nil { |
|
||||
return "", e |
|
||||
} |
|
||||
return filer.files.DeleteFile(dirId, file) |
|
||||
} |
|
||||
|
|
||||
/* |
|
||||
Move a folder or a file, with 4 Use cases: |
|
||||
mv fromDir toNewDir |
|
||||
mv fromDir toOldDir |
|
||||
mv fromFile toDir |
|
||||
mv fromFile toFile |
|
||||
*/ |
|
||||
func (filer *FilerEmbedded) Move(fromPath string, toPath string) error { |
|
||||
filer.mvMutex.Lock() |
|
||||
defer filer.mvMutex.Unlock() |
|
||||
|
|
||||
if _, dir_err := filer.directories.findDirectoryId(fromPath); dir_err == nil { |
|
||||
if _, err := filer.directories.findDirectoryId(toPath); err == nil { |
|
||||
// move folder under an existing folder
|
|
||||
return filer.directories.MoveUnderDirectory(fromPath, toPath, "") |
|
||||
} |
|
||||
// move folder to a new folder
|
|
||||
return filer.directories.MoveUnderDirectory(fromPath, filepath.Dir(toPath), filepath.Base(toPath)) |
|
||||
} |
|
||||
if fid, file_err := filer.DeleteFile(fromPath); file_err == nil { |
|
||||
if _, err := filer.directories.findDirectoryId(toPath); err == nil { |
|
||||
// move file under an existing folder
|
|
||||
return filer.CreateFile(filepath.Join(toPath, filepath.Base(fromPath)), fid) |
|
||||
} |
|
||||
// move to a folder with new name
|
|
||||
return filer.CreateFile(toPath, fid) |
|
||||
} |
|
||||
return fmt.Errorf("File %s is not found!", fromPath) |
|
||||
} |
|
@ -1,87 +0,0 @@ |
|||||
package embedded_filer |
|
||||
|
|
||||
import ( |
|
||||
"bytes" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
"github.com/chrislusf/seaweedfs/weed/glog" |
|
||||
"github.com/syndtr/goleveldb/leveldb" |
|
||||
"github.com/syndtr/goleveldb/leveldb/util" |
|
||||
) |
|
||||
|
|
||||
/* |
|
||||
The entry in level db has this format: |
|
||||
key: genKey(dirId, fileName) |
|
||||
value: []byte(fid) |
|
||||
And genKey(dirId, fileName) use first 4 bytes to store dirId, and rest for fileName |
|
||||
*/ |
|
||||
|
|
||||
type FileListInLevelDb struct { |
|
||||
db *leveldb.DB |
|
||||
} |
|
||||
|
|
||||
func NewFileListInLevelDb(dir string) (fl *FileListInLevelDb, err error) { |
|
||||
fl = &FileListInLevelDb{} |
|
||||
if fl.db, err = leveldb.OpenFile(dir, nil); err != nil { |
|
||||
return |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func genKey(dirId DirectoryId, fileName string) []byte { |
|
||||
ret := make([]byte, 0, 4+len(fileName)) |
|
||||
for i := 3; i >= 0; i-- { |
|
||||
ret = append(ret, byte(dirId>>(uint(i)*8))) |
|
||||
} |
|
||||
ret = append(ret, []byte(fileName)...) |
|
||||
return ret |
|
||||
} |
|
||||
|
|
||||
func (fl *FileListInLevelDb) CreateFile(dirId DirectoryId, fileName string, fid string) (err error) { |
|
||||
glog.V(4).Infoln("directory", dirId, "fileName", fileName, "fid", fid) |
|
||||
return fl.db.Put(genKey(dirId, fileName), []byte(fid), nil) |
|
||||
} |
|
||||
func (fl *FileListInLevelDb) DeleteFile(dirId DirectoryId, fileName string) (fid string, err error) { |
|
||||
if fid, err = fl.FindFile(dirId, fileName); err != nil { |
|
||||
if err == leveldb.ErrNotFound { |
|
||||
return "", nil |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
err = fl.db.Delete(genKey(dirId, fileName), nil) |
|
||||
return fid, err |
|
||||
} |
|
||||
func (fl *FileListInLevelDb) FindFile(dirId DirectoryId, fileName string) (fid string, err error) { |
|
||||
data, e := fl.db.Get(genKey(dirId, fileName), nil) |
|
||||
if e == leveldb.ErrNotFound { |
|
||||
return "", filer.ErrNotFound |
|
||||
} else if e != nil { |
|
||||
return "", e |
|
||||
} |
|
||||
return string(data), nil |
|
||||
} |
|
||||
func (fl *FileListInLevelDb) ListFiles(dirId DirectoryId, lastFileName string, limit int) (files []filer.FileEntry) { |
|
||||
glog.V(4).Infoln("directory", dirId, "lastFileName", lastFileName, "limit", limit) |
|
||||
dirKey := genKey(dirId, "") |
|
||||
iter := fl.db.NewIterator(&util.Range{Start: genKey(dirId, lastFileName)}, nil) |
|
||||
limitCounter := 0 |
|
||||
for iter.Next() { |
|
||||
key := iter.Key() |
|
||||
if !bytes.HasPrefix(key, dirKey) { |
|
||||
break |
|
||||
} |
|
||||
fileName := string(key[len(dirKey):]) |
|
||||
if fileName == lastFileName { |
|
||||
continue |
|
||||
} |
|
||||
limitCounter++ |
|
||||
if limit > 0 { |
|
||||
if limitCounter > limit { |
|
||||
break |
|
||||
} |
|
||||
} |
|
||||
files = append(files, filer.FileEntry{Name: fileName, Id: filer.FileId(string(iter.Value()))}) |
|
||||
} |
|
||||
iter.Release() |
|
||||
return |
|
||||
} |
|
@ -1,29 +0,0 @@ |
|||||
package filer |
|
||||
|
|
||||
import ( |
|
||||
"errors" |
|
||||
) |
|
||||
|
|
||||
type FileId string //file id in SeaweedFS
|
|
||||
|
|
||||
type FileEntry struct { |
|
||||
Name string `json:"name,omitempty"` //file name without path
|
|
||||
Id FileId `json:"fid,omitempty"` |
|
||||
} |
|
||||
|
|
||||
type DirectoryName string |
|
||||
|
|
||||
type Filer interface { |
|
||||
CreateFile(fullFileName string, fid string) (err error) |
|
||||
FindFile(fullFileName string) (fid string, err error) |
|
||||
DeleteFile(fullFileName string) (fid string, err error) |
|
||||
|
|
||||
//Optional functions. embedded filer support these
|
|
||||
ListDirectories(dirPath string) (dirs []DirectoryName, err error) |
|
||||
ListFiles(dirPath string, lastFileName string, limit int) (files []FileEntry, err error) |
|
||||
DeleteDirectory(dirPath string, recursive bool) (err error) |
|
||||
Move(fromPath string, toPath string) (err error) |
|
||||
LookupDirectoryEntry(dirPath string, name string) (found bool, fileId string, err error) |
|
||||
} |
|
||||
|
|
||||
var ErrNotFound = errors.New("filer: no entry is found in filer store") |
|
@ -1,66 +0,0 @@ |
|||||
package flat_namespace |
|
||||
|
|
||||
import ( |
|
||||
"errors" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
"path/filepath" |
|
||||
) |
|
||||
|
|
||||
type FlatNamespaceFiler struct { |
|
||||
master string |
|
||||
store FlatNamespaceStore |
|
||||
} |
|
||||
|
|
||||
var ( |
|
||||
ErrNotImplemented = errors.New("Not Implemented for flat namespace meta data store") |
|
||||
) |
|
||||
|
|
||||
func NewFlatNamespaceFiler(master string, store FlatNamespaceStore) *FlatNamespaceFiler { |
|
||||
return &FlatNamespaceFiler{ |
|
||||
master: master, |
|
||||
store: store, |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func (filer *FlatNamespaceFiler) CreateFile(fullFileName string, fid string) (err error) { |
|
||||
return filer.store.Put(fullFileName, fid) |
|
||||
} |
|
||||
func (filer *FlatNamespaceFiler) FindFile(fullFileName string) (fid string, err error) { |
|
||||
return filer.store.Get(fullFileName) |
|
||||
} |
|
||||
func (filer *FlatNamespaceFiler) LookupDirectoryEntry(dirPath string, name string) (found bool, fileId string, err error) { |
|
||||
if fileId, err = filer.FindFile(filepath.Join(dirPath, name)); err == nil { |
|
||||
return true, fileId, nil |
|
||||
} |
|
||||
return false, "", err |
|
||||
} |
|
||||
func (filer *FlatNamespaceFiler) ListDirectories(dirPath string) (dirs []filer.DirectoryName, err error) { |
|
||||
return nil, ErrNotImplemented |
|
||||
} |
|
||||
func (filer *FlatNamespaceFiler) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) { |
|
||||
return nil, ErrNotImplemented |
|
||||
} |
|
||||
func (filer *FlatNamespaceFiler) DeleteDirectory(dirPath string, recursive bool) (err error) { |
|
||||
return ErrNotImplemented |
|
||||
} |
|
||||
|
|
||||
func (filer *FlatNamespaceFiler) DeleteFile(fullFileName string) (fid string, err error) { |
|
||||
fid, err = filer.FindFile(fullFileName) |
|
||||
if err != nil { |
|
||||
return "", err |
|
||||
} |
|
||||
|
|
||||
err = filer.store.Delete(fullFileName) |
|
||||
if err != nil { |
|
||||
return "", err |
|
||||
} |
|
||||
|
|
||||
return fid, nil |
|
||||
//return filer.store.Delete(fullFileName)
|
|
||||
//are you kidding me!!!!
|
|
||||
} |
|
||||
|
|
||||
func (filer *FlatNamespaceFiler) Move(fromPath string, toPath string) error { |
|
||||
return ErrNotImplemented |
|
||||
} |
|
@ -1,9 +0,0 @@ |
|||||
package flat_namespace |
|
||||
|
|
||||
import () |
|
||||
|
|
||||
type FlatNamespaceStore interface { |
|
||||
Put(fullFileName string, fid string) (err error) |
|
||||
Get(fullFileName string) (fid string, err error) |
|
||||
Delete(fullFileName string) (err error) |
|
||||
} |
|
@ -1,67 +0,0 @@ |
|||||
#MySQL filer mapping store |
|
||||
|
|
||||
## Schema format |
|
||||
|
|
||||
|
|
||||
Basically, uriPath and fid are the key elements stored in MySQL. In view of the optimization and user's usage, |
|
||||
adding primary key with integer type and involving createTime, updateTime, status fields should be somewhat meaningful. |
|
||||
Of course, you could customize the schema per your concretely circumstance freely. |
|
||||
|
|
||||
<pre><code> |
|
||||
CREATE TABLE IF NOT EXISTS `filer_mapping` ( |
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT, |
|
||||
`uriPath` char(256) NOT NULL DEFAULT "" COMMENT 'http uriPath', |
|
||||
`fid` char(36) NOT NULL DEFAULT "" COMMENT 'seaweedfs fid', |
|
||||
`createTime` int(10) NOT NULL DEFAULT 0 COMMENT 'createdTime in unix timestamp', |
|
||||
`updateTime` int(10) NOT NULL DEFAULT 0 COMMENT 'updatedTime in unix timestamp', |
|
||||
`remark` varchar(20) NOT NULL DEFAULT "" COMMENT 'reserverd field', |
|
||||
`status` tinyint(2) DEFAULT '1' COMMENT 'resource status', |
|
||||
PRIMARY KEY (`id`), |
|
||||
UNIQUE KEY `index_uriPath` (`uriPath`) |
|
||||
) DEFAULT CHARSET=utf8; |
|
||||
</code></pre> |
|
||||
|
|
||||
|
|
||||
The MySQL 's config params is not added into the weed command option as other stores(redis,cassandra). Instead, |
|
||||
We created a config file(json format) for them. TOML,YAML or XML also should be OK. But TOML and YAML need import thirdparty package |
|
||||
while XML is a little bit complex. |
|
||||
|
|
||||
The sample config file's content is below: |
|
||||
|
|
||||
<pre><code> |
|
||||
{ |
|
||||
"mysql": [ |
|
||||
{ |
|
||||
"User": "root", |
|
||||
"Password": "root", |
|
||||
"HostName": "127.0.0.1", |
|
||||
"Port": 3306, |
|
||||
"DataBase": "seaweedfs" |
|
||||
}, |
|
||||
{ |
|
||||
"User": "root", |
|
||||
"Password": "root", |
|
||||
"HostName": "127.0.0.2", |
|
||||
"Port": 3306, |
|
||||
"DataBase": "seaweedfs" |
|
||||
} |
|
||||
], |
|
||||
"IsSharding":true, |
|
||||
"ShardCount":1024 |
|
||||
} |
|
||||
</code></pre> |
|
||||
|
|
||||
|
|
||||
The "mysql" field in above conf file is an array which include all mysql instances you prepared to store sharding data. |
|
||||
|
|
||||
1. If one mysql instance is enough, just keep one instance in "mysql" field. |
|
||||
|
|
||||
2. If table sharding at a specific mysql instance is needed , mark "IsSharding" field with true and specify total table sharding numbers using "ShardCount" field. |
|
||||
|
|
||||
3. If the mysql service could be auto scaled transparently in your environment, just config one mysql instance(usually it's a frondend proxy or VIP),and mark "IsSharding" with false value |
|
||||
|
|
||||
4. If you prepare more than one mysql instance and have no plan to use table sharding for any instance(mark isSharding with false), instance sharding will still be done implicitly |
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
@ -1,274 +0,0 @@ |
|||||
package mysql_store |
|
||||
|
|
||||
import ( |
|
||||
"database/sql" |
|
||||
"fmt" |
|
||||
"hash/crc32" |
|
||||
"sync" |
|
||||
"time" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
|
|
||||
_ "github.com/go-sql-driver/mysql" |
|
||||
) |
|
||||
|
|
||||
const ( |
|
||||
sqlUrl = "%s:%s@tcp(%s:%d)/%s?charset=utf8" |
|
||||
default_maxIdleConnections = 100 |
|
||||
default_maxOpenConnections = 50 |
|
||||
default_maxTableNums = 1024 |
|
||||
tableName = "filer_mapping" |
|
||||
) |
|
||||
|
|
||||
var ( |
|
||||
_init_db sync.Once |
|
||||
_db_connections []*sql.DB |
|
||||
) |
|
||||
|
|
||||
type MySqlConf struct { |
|
||||
User string |
|
||||
Password string |
|
||||
HostName string |
|
||||
Port int |
|
||||
DataBase string |
|
||||
MaxIdleConnections int |
|
||||
MaxOpenConnections int |
|
||||
} |
|
||||
|
|
||||
type ShardingConf struct { |
|
||||
IsSharding bool `json:"isSharding"` |
|
||||
ShardCount int `json:"shardCount"` |
|
||||
} |
|
||||
|
|
||||
type MySqlStore struct { |
|
||||
dbs []*sql.DB |
|
||||
isSharding bool |
|
||||
shardCount int |
|
||||
} |
|
||||
|
|
||||
func getDbConnection(confs []MySqlConf) []*sql.DB { |
|
||||
_init_db.Do(func() { |
|
||||
for _, conf := range confs { |
|
||||
|
|
||||
sqlUrl := fmt.Sprintf(sqlUrl, conf.User, conf.Password, conf.HostName, conf.Port, conf.DataBase) |
|
||||
var dbErr error |
|
||||
_db_connection, dbErr := sql.Open("mysql", sqlUrl) |
|
||||
if dbErr != nil { |
|
||||
_db_connection.Close() |
|
||||
_db_connection = nil |
|
||||
panic(dbErr) |
|
||||
} |
|
||||
var maxIdleConnections, maxOpenConnections int |
|
||||
|
|
||||
if conf.MaxIdleConnections != 0 { |
|
||||
maxIdleConnections = conf.MaxIdleConnections |
|
||||
} else { |
|
||||
maxIdleConnections = default_maxIdleConnections |
|
||||
} |
|
||||
if conf.MaxOpenConnections != 0 { |
|
||||
maxOpenConnections = conf.MaxOpenConnections |
|
||||
} else { |
|
||||
maxOpenConnections = default_maxOpenConnections |
|
||||
} |
|
||||
|
|
||||
_db_connection.SetMaxIdleConns(maxIdleConnections) |
|
||||
_db_connection.SetMaxOpenConns(maxOpenConnections) |
|
||||
_db_connections = append(_db_connections, _db_connection) |
|
||||
} |
|
||||
}) |
|
||||
return _db_connections |
|
||||
} |
|
||||
|
|
||||
func NewMysqlStore(confs []MySqlConf, isSharding bool, shardCount int) *MySqlStore { |
|
||||
ms := &MySqlStore{ |
|
||||
dbs: getDbConnection(confs), |
|
||||
isSharding: isSharding, |
|
||||
shardCount: shardCount, |
|
||||
} |
|
||||
|
|
||||
for _, db := range ms.dbs { |
|
||||
if !isSharding { |
|
||||
ms.shardCount = 1 |
|
||||
} else { |
|
||||
if ms.shardCount == 0 { |
|
||||
ms.shardCount = default_maxTableNums |
|
||||
} |
|
||||
} |
|
||||
for i := 0; i < ms.shardCount; i++ { |
|
||||
if err := ms.createTables(db, tableName, i); err != nil { |
|
||||
fmt.Printf("create table failed %v", err) |
|
||||
} |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
return ms |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) hash(fullFileName string) (instance_offset, table_postfix int) { |
|
||||
hash_value := crc32.ChecksumIEEE([]byte(fullFileName)) |
|
||||
instance_offset = int(hash_value) % len(s.dbs) |
|
||||
table_postfix = int(hash_value) % s.shardCount |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) parseFilerMappingInfo(path string) (instanceId int, tableFullName string, err error) { |
|
||||
instance_offset, table_postfix := s.hash(path) |
|
||||
instanceId = instance_offset |
|
||||
if s.isSharding { |
|
||||
tableFullName = fmt.Sprintf("%s_%04d", tableName, table_postfix) |
|
||||
} else { |
|
||||
tableFullName = tableName |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) Get(fullFilePath string) (fid string, err error) { |
|
||||
instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) |
|
||||
if err != nil { |
|
||||
return "", fmt.Errorf("MySqlStore Get operation can not parse file path %s: err is %v", fullFilePath, err) |
|
||||
} |
|
||||
fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName) |
|
||||
if err == sql.ErrNoRows { |
|
||||
//Could not found
|
|
||||
err = filer.ErrNotFound |
|
||||
} |
|
||||
return fid, err |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) Put(fullFilePath string, fid string) (err error) { |
|
||||
var tableFullName string |
|
||||
|
|
||||
instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) |
|
||||
if err != nil { |
|
||||
return fmt.Errorf("MySqlStore Put operation can not parse file path %s: err is %v", fullFilePath, err) |
|
||||
} |
|
||||
var old_fid string |
|
||||
if old_fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil && err != sql.ErrNoRows { |
|
||||
return fmt.Errorf("MySqlStore Put operation failed when querying path %s: err is %v", fullFilePath, err) |
|
||||
} else { |
|
||||
if len(old_fid) == 0 { |
|
||||
err = s.insert(fullFilePath, fid, s.dbs[instance_offset], tableFullName) |
|
||||
if err != nil { |
|
||||
err = fmt.Errorf("MySqlStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err) |
|
||||
} |
|
||||
} else { |
|
||||
err = s.update(fullFilePath, fid, s.dbs[instance_offset], tableFullName) |
|
||||
if err != nil { |
|
||||
err = fmt.Errorf("MySqlStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err) |
|
||||
} |
|
||||
} |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) Delete(fullFilePath string) (err error) { |
|
||||
var fid string |
|
||||
instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) |
|
||||
if err != nil { |
|
||||
return fmt.Errorf("MySqlStore Delete operation can not parse file path %s: err is %v", fullFilePath, err) |
|
||||
} |
|
||||
if fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { |
|
||||
return fmt.Errorf("MySqlStore Delete operation failed when querying path %s: err is %v", fullFilePath, err) |
|
||||
} else if fid == "" { |
|
||||
return nil |
|
||||
} |
|
||||
if err = s.delete(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { |
|
||||
return fmt.Errorf("MySqlStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err) |
|
||||
} else { |
|
||||
return nil |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) Close() { |
|
||||
for _, db := range s.dbs { |
|
||||
db.Close() |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
var createTable = ` |
|
||||
CREATE TABLE IF NOT EXISTS %s ( |
|
||||
id bigint(20) NOT NULL AUTO_INCREMENT, |
|
||||
uriPath char(255) NOT NULL DEFAULT "" COMMENT 'http uriPath', |
|
||||
fid char(36) NOT NULL DEFAULT "" COMMENT 'seaweedfs fid', |
|
||||
createTime int(10) NOT NULL DEFAULT 0 COMMENT 'createdTime in unix timestamp', |
|
||||
updateTime int(10) NOT NULL DEFAULT 0 COMMENT 'updatedTime in unix timestamp', |
|
||||
remark varchar(20) NOT NULL DEFAULT "" COMMENT 'reserverd field', |
|
||||
status tinyint(2) DEFAULT '1' COMMENT 'resource status', |
|
||||
PRIMARY KEY (id), |
|
||||
UNIQUE KEY index_uriPath (uriPath) |
|
||||
) DEFAULT CHARSET=utf8; |
|
||||
` |
|
||||
|
|
||||
func (s *MySqlStore) createTables(db *sql.DB, tableName string, postfix int) error { |
|
||||
var realTableName string |
|
||||
if s.isSharding { |
|
||||
realTableName = fmt.Sprintf("%s_%04d", tableName, postfix) |
|
||||
} else { |
|
||||
realTableName = tableName |
|
||||
} |
|
||||
|
|
||||
stmt, err := db.Prepare(fmt.Sprintf(createTable, realTableName)) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
defer stmt.Close() |
|
||||
|
|
||||
_, err = stmt.Exec() |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) query(uriPath string, db *sql.DB, tableName string) (string, error) { |
|
||||
sqlStatement := "SELECT fid FROM %s WHERE uriPath=?" |
|
||||
row := db.QueryRow(fmt.Sprintf(sqlStatement, tableName), uriPath) |
|
||||
var fid string |
|
||||
err := row.Scan(&fid) |
|
||||
if err != nil { |
|
||||
return "", err |
|
||||
} |
|
||||
return fid, nil |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) update(uriPath string, fid string, db *sql.DB, tableName string) error { |
|
||||
sqlStatement := "UPDATE %s SET fid=?, updateTime=? WHERE uriPath=?" |
|
||||
res, err := db.Exec(fmt.Sprintf(sqlStatement, tableName), fid, time.Now().Unix(), uriPath) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
_, err = res.RowsAffected() |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) insert(uriPath string, fid string, db *sql.DB, tableName string) error { |
|
||||
sqlStatement := "INSERT INTO %s (uriPath,fid,createTime) VALUES(?,?,?)" |
|
||||
res, err := db.Exec(fmt.Sprintf(sqlStatement, tableName), uriPath, fid, time.Now().Unix()) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
_, err = res.RowsAffected() |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (s *MySqlStore) delete(uriPath string, db *sql.DB, tableName string) error { |
|
||||
sqlStatement := "DELETE FROM %s WHERE uriPath=?" |
|
||||
res, err := db.Exec(fmt.Sprintf(sqlStatement, tableName), uriPath) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
_, err = res.RowsAffected() |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
@ -1,30 +0,0 @@ |
|||||
package mysql_store |
|
||||
|
|
||||
import ( |
|
||||
"encoding/json" |
|
||||
"hash/crc32" |
|
||||
"testing" |
|
||||
) |
|
||||
|
|
||||
func TestGenerateMysqlConf(t *testing.T) { |
|
||||
var conf []MySqlConf |
|
||||
conf = append(conf, MySqlConf{ |
|
||||
User: "root", |
|
||||
Password: "root", |
|
||||
HostName: "localhost", |
|
||||
Port: 3306, |
|
||||
DataBase: "seaweedfs", |
|
||||
}) |
|
||||
body, err := json.Marshal(conf) |
|
||||
if err != nil { |
|
||||
t.Errorf("json encoding err %s", err.Error()) |
|
||||
} |
|
||||
t.Logf("json output is %s", string(body)) |
|
||||
} |
|
||||
|
|
||||
func TestCRC32FullPathName(t *testing.T) { |
|
||||
fullPathName := "/prod-bucket/law632191483895612493300-signed.pdf" |
|
||||
hash_value := crc32.ChecksumIEEE([]byte(fullPathName)) |
|
||||
table_postfix := int(hash_value) % 1024 |
|
||||
t.Logf("table postfix %d", table_postfix) |
|
||||
} |
|
@ -1,456 +0,0 @@ |
|||||
package postgres_store |
|
||||
|
|
||||
import ( |
|
||||
"database/sql" |
|
||||
"fmt" |
|
||||
"path/filepath" |
|
||||
"time" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
"github.com/chrislusf/seaweedfs/weed/glog" |
|
||||
|
|
||||
_ "github.com/lib/pq" |
|
||||
_ "path/filepath" |
|
||||
"strings" |
|
||||
) |
|
||||
|
|
||||
type DirectoryId int32 |
|
||||
|
|
||||
func databaseExists(db *sql.DB, databaseName string) (bool, error) { |
|
||||
sqlStatement := "SELECT datname from pg_database WHERE datname='%s'" |
|
||||
row := db.QueryRow(fmt.Sprintf(sqlStatement, databaseName)) |
|
||||
|
|
||||
var dbName string |
|
||||
err := row.Scan(&dbName) |
|
||||
if err != nil { |
|
||||
if err == sql.ErrNoRows { |
|
||||
return false, nil |
|
||||
} |
|
||||
return false, err |
|
||||
} |
|
||||
return true, nil |
|
||||
} |
|
||||
|
|
||||
func createDatabase(db *sql.DB, databaseName string) error { |
|
||||
sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'" |
|
||||
_, err := db.Exec(fmt.Sprintf(sqlStatement, databaseName)) |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
func getDbConnection(conf PostgresConf) *sql.DB { |
|
||||
_init_db.Do(func() { |
|
||||
|
|
||||
sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode) |
|
||||
glog.V(3).Infoln("Opening postgres master database") |
|
||||
|
|
||||
var dbErr error |
|
||||
_db_connection, dbErr := sql.Open("postgres", sqlUrl) |
|
||||
if dbErr != nil { |
|
||||
_db_connection.Close() |
|
||||
_db_connection = nil |
|
||||
panic(dbErr) |
|
||||
} |
|
||||
|
|
||||
pingErr := _db_connection.Ping() |
|
||||
if pingErr != nil { |
|
||||
_db_connection.Close() |
|
||||
_db_connection = nil |
|
||||
panic(pingErr) |
|
||||
} |
|
||||
|
|
||||
glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase) |
|
||||
var existsErr error |
|
||||
dbExists, existsErr := databaseExists(_db_connection, conf.DataBase) |
|
||||
if existsErr != nil { |
|
||||
_db_connection.Close() |
|
||||
_db_connection = nil |
|
||||
panic(existsErr) |
|
||||
} |
|
||||
|
|
||||
if !dbExists { |
|
||||
glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase) |
|
||||
createErr := createDatabase(_db_connection, conf.DataBase) |
|
||||
if createErr != nil { |
|
||||
_db_connection.Close() |
|
||||
_db_connection = nil |
|
||||
panic(createErr) |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase) |
|
||||
_db_connection.Close() |
|
||||
_db_connection = nil |
|
||||
|
|
||||
sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode) |
|
||||
_db_connection, dbErr = sql.Open("postgres", sqlUrl) |
|
||||
if dbErr != nil { |
|
||||
_db_connection.Close() |
|
||||
_db_connection = nil |
|
||||
panic(dbErr) |
|
||||
} |
|
||||
|
|
||||
pingErr = _db_connection.Ping() |
|
||||
if pingErr != nil { |
|
||||
_db_connection.Close() |
|
||||
_db_connection = nil |
|
||||
panic(pingErr) |
|
||||
} |
|
||||
|
|
||||
maxIdleConnections, maxOpenConnections := default_maxIdleConnections, default_maxOpenConnections |
|
||||
if conf.MaxIdleConnections != 0 { |
|
||||
maxIdleConnections = conf.MaxIdleConnections |
|
||||
} |
|
||||
if conf.MaxOpenConnections != 0 { |
|
||||
maxOpenConnections = conf.MaxOpenConnections |
|
||||
} |
|
||||
|
|
||||
_db_connection.SetMaxIdleConns(maxIdleConnections) |
|
||||
_db_connection.SetMaxOpenConns(maxOpenConnections) |
|
||||
}) |
|
||||
return _db_connection |
|
||||
} |
|
||||
|
|
||||
var createDirectoryTable = ` |
|
||||
|
|
||||
CREATE TABLE IF NOT EXISTS %s ( |
|
||||
id BIGSERIAL NOT NULL, |
|
||||
directoryRoot VARCHAR(1024) NOT NULL DEFAULT '', |
|
||||
directoryName VARCHAR(1024) NOT NULL DEFAULT '', |
|
||||
CONSTRAINT unique_directory UNIQUE (directoryRoot, directoryName) |
|
||||
); |
|
||||
` |
|
||||
|
|
||||
var createFileTable = ` |
|
||||
|
|
||||
CREATE TABLE IF NOT EXISTS %s ( |
|
||||
id BIGSERIAL NOT NULL, |
|
||||
directoryPart VARCHAR(1024) NOT NULL DEFAULT '', |
|
||||
filePart VARCHAR(1024) NOT NULL DEFAULT '', |
|
||||
fid VARCHAR(36) NOT NULL DEFAULT '', |
|
||||
createTime BIGINT NOT NULL DEFAULT 0, |
|
||||
updateTime BIGINT NOT NULL DEFAULT 0, |
|
||||
remark VARCHAR(20) NOT NULL DEFAULT '', |
|
||||
status SMALLINT NOT NULL DEFAULT '1', |
|
||||
PRIMARY KEY (id), |
|
||||
CONSTRAINT %s_unique_file UNIQUE (directoryPart, filePart) |
|
||||
); |
|
||||
` |
|
||||
|
|
||||
func (s *PostgresStore) createDirectoriesTable() error { |
|
||||
glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", directoriesTableName) |
|
||||
|
|
||||
sqlCreate := fmt.Sprintf(createDirectoryTable, directoriesTableName) |
|
||||
|
|
||||
stmt, err := s.db.Prepare(sqlCreate) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
defer stmt.Close() |
|
||||
|
|
||||
_, err = stmt.Exec() |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) createFilesTable() error { |
|
||||
|
|
||||
glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", filesTableName) |
|
||||
|
|
||||
sqlCreate := fmt.Sprintf(createFileTable, filesTableName, filesTableName) |
|
||||
|
|
||||
stmt, err := s.db.Prepare(sqlCreate) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
defer stmt.Close() |
|
||||
|
|
||||
_, err = stmt.Exec() |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) query(uriPath string) (string, error) { |
|
||||
directoryPart, filePart := filepath.Split(uriPath) |
|
||||
sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE directoryPart=$1 AND filePart=$2", filesTableName) |
|
||||
|
|
||||
row := s.db.QueryRow(sqlStatement, directoryPart, filePart) |
|
||||
var fid string |
|
||||
err := row.Scan(&fid) |
|
||||
|
|
||||
glog.V(3).Infof("Postgres query -- looking up path '%s' and found id '%s' ", uriPath, fid) |
|
||||
|
|
||||
if err != nil { |
|
||||
return "", err |
|
||||
} |
|
||||
return fid, nil |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) update(uriPath string, fid string) error { |
|
||||
directoryPart, filePart := filepath.Split(uriPath) |
|
||||
sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE directoryPart=$3 AND filePart=$4", filesTableName) |
|
||||
|
|
||||
glog.V(3).Infof("Postgres query -- updating path '%s' with id '%s'", uriPath, fid) |
|
||||
|
|
||||
res, err := s.db.Exec(sqlStatement, fid, time.Now().Unix(), directoryPart, filePart) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
_, err = res.RowsAffected() |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) insert(uriPath string, fid string) error { |
|
||||
directoryPart, filePart := filepath.Split(uriPath) |
|
||||
|
|
||||
existingId, _, _ := s.lookupDirectory(directoryPart) |
|
||||
if existingId == 0 { |
|
||||
s.recursiveInsertDirectory(directoryPart) |
|
||||
} |
|
||||
|
|
||||
sqlStatement := fmt.Sprintf("INSERT INTO %s (directoryPart,filePart,fid,createTime) VALUES($1, $2, $3, $4)", filesTableName) |
|
||||
glog.V(3).Infof("Postgres query -- inserting path '%s' with id '%s'", uriPath, fid) |
|
||||
|
|
||||
res, err := s.db.Exec(sqlStatement, directoryPart, filePart, fid, time.Now().Unix()) |
|
||||
|
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
rows, err := res.RowsAffected() |
|
||||
if rows != 1 { |
|
||||
return fmt.Errorf("Postgres insert -- rows affected = %d. Expecting 1", rows) |
|
||||
} |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) recursiveInsertDirectory(dirPath string) { |
|
||||
pathParts := strings.Split(dirPath, "/") |
|
||||
|
|
||||
var workingPath string = "/" |
|
||||
for _, part := range pathParts { |
|
||||
if part == "" { |
|
||||
continue |
|
||||
} |
|
||||
workingPath += (part + "/") |
|
||||
existingId, _, _ := s.lookupDirectory(workingPath) |
|
||||
if existingId == 0 { |
|
||||
s.insertDirectory(workingPath) |
|
||||
} |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) insertDirectory(dirPath string) { |
|
||||
pathParts := strings.Split(dirPath, "/") |
|
||||
|
|
||||
directoryRoot := "/" |
|
||||
directoryName := "" |
|
||||
if len(pathParts) > 1 { |
|
||||
directoryRoot = strings.Join(pathParts[0:len(pathParts)-2], "/") + "/" |
|
||||
directoryName = strings.Join(pathParts[len(pathParts)-2:], "/") |
|
||||
} else if len(pathParts) == 1 { |
|
||||
directoryRoot = "/" |
|
||||
directoryName = pathParts[0] + "/" |
|
||||
} |
|
||||
sqlInsertDirectoryStatement := fmt.Sprintf("INSERT INTO %s (directoryroot, directoryname) "+ |
|
||||
"SELECT $1, $2 WHERE NOT EXISTS ( SELECT id FROM %s WHERE directoryroot=$3 AND directoryname=$4 )", |
|
||||
directoriesTableName, directoriesTableName) |
|
||||
|
|
||||
glog.V(4).Infof("Postgres query -- Inserting directory (if it doesn't exist) - root = %s, name = %s", |
|
||||
directoryRoot, directoryName) |
|
||||
|
|
||||
_, err := s.db.Exec(sqlInsertDirectoryStatement, directoryRoot, directoryName, directoryRoot, directoryName) |
|
||||
if err != nil { |
|
||||
glog.V(0).Infof("Postgres query -- Error inserting directory - root = %s, name = %s: %s", |
|
||||
directoryRoot, directoryName, err) |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) delete(uriPath string) error { |
|
||||
directoryPart, filePart := filepath.Split(uriPath) |
|
||||
sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE directoryPart=$1 AND filePart=$2", filesTableName) |
|
||||
|
|
||||
glog.V(3).Infof("Postgres query -- deleting path '%s'", uriPath) |
|
||||
|
|
||||
res, err := s.db.Exec(sqlStatement, directoryPart, filePart) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
_, err = res.RowsAffected() |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) lookupDirectory(dirPath string) (DirectoryId, string, error) { |
|
||||
directoryRoot, directoryName := s.mySplitPath(dirPath) |
|
||||
|
|
||||
sqlStatement := fmt.Sprintf("SELECT id, directoryroot, directoryname FROM %s WHERE directoryRoot=$1 AND directoryName=$2", directoriesTableName) |
|
||||
|
|
||||
row := s.db.QueryRow(sqlStatement, directoryRoot, directoryName) |
|
||||
var id DirectoryId |
|
||||
var dirRoot string |
|
||||
var dirName string |
|
||||
err := row.Scan(&id, &dirRoot, &dirName) |
|
||||
|
|
||||
glog.V(3).Infof("Postgres lookupDirectory -- looking up directory '%s' and found id '%d', root '%s', name '%s' ", dirPath, id, dirRoot, dirName) |
|
||||
|
|
||||
if err != nil { |
|
||||
return 0, "", err |
|
||||
} |
|
||||
return id, filepath.Join(dirRoot, dirName), err |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) findDirectories(dirPath string, limit int) (dirs []filer.DirectoryName, err error) { |
|
||||
sqlStatement := fmt.Sprintf("SELECT id, directoryroot, directoryname FROM %s WHERE directoryRoot=$1 AND directoryName != '' ORDER BY id LIMIT $2", directoriesTableName) |
|
||||
rows, err := s.db.Query(sqlStatement, dirPath, limit) |
|
||||
|
|
||||
if err != nil { |
|
||||
glog.V(0).Infof("Postgres findDirectories error: %s", err) |
|
||||
} |
|
||||
|
|
||||
if rows != nil { |
|
||||
defer rows.Close() |
|
||||
for rows.Next() { |
|
||||
var id DirectoryId |
|
||||
var directoryRoot string |
|
||||
var directoryName string |
|
||||
|
|
||||
scanErr := rows.Scan(&id, &directoryRoot, &directoryName) |
|
||||
if scanErr != nil { |
|
||||
err = scanErr |
|
||||
} |
|
||||
dirs = append(dirs, filer.DirectoryName(directoryName)) |
|
||||
} |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) safeToDeleteDirectory(dirPath string, recursive bool) bool { |
|
||||
if recursive { |
|
||||
return true |
|
||||
} |
|
||||
sqlStatement := fmt.Sprintf("SELECT id FROM %s WHERE directoryRoot LIKE $1 LIMIT 1", directoriesTableName) |
|
||||
row := s.db.QueryRow(sqlStatement, dirPath+"%") |
|
||||
|
|
||||
var id DirectoryId |
|
||||
err := row.Scan(&id) |
|
||||
if err != nil { |
|
||||
if err == sql.ErrNoRows { |
|
||||
return true |
|
||||
} |
|
||||
} |
|
||||
return false |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) mySplitPath(dirPath string) (directoryRoot string, directoryName string) { |
|
||||
pathParts := strings.Split(dirPath, "/") |
|
||||
directoryRoot = "/" |
|
||||
directoryName = "" |
|
||||
if len(pathParts) > 1 { |
|
||||
directoryRoot = strings.Join(pathParts[0:len(pathParts)-2], "/") + "/" |
|
||||
directoryName = strings.Join(pathParts[len(pathParts)-2:], "/") |
|
||||
} else if len(pathParts) == 1 { |
|
||||
directoryRoot = "/" |
|
||||
directoryName = pathParts[0] + "/" |
|
||||
} |
|
||||
return directoryRoot, directoryName |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) deleteDirectory(dirPath string, recursive bool) (err error) { |
|
||||
directoryRoot, directoryName := s.mySplitPath(dirPath) |
|
||||
|
|
||||
// delete files
|
|
||||
sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE directorypart=$1", filesTableName) |
|
||||
_, err = s.db.Exec(sqlStatement, dirPath) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
// delete specific directory if it is empty or recursive delete was requested
|
|
||||
safeToDelete := s.safeToDeleteDirectory(dirPath, recursive) |
|
||||
if safeToDelete { |
|
||||
sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directoryRoot=$1 AND directoryName=$2", directoriesTableName) |
|
||||
_, err = s.db.Exec(sqlStatement, directoryRoot, directoryName) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
if recursive { |
|
||||
// delete descendant files
|
|
||||
sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directorypart LIKE $1", filesTableName) |
|
||||
_, err = s.db.Exec(sqlStatement, dirPath+"%") |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
// delete descendant directories
|
|
||||
sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directoryRoot LIKE $1", directoriesTableName) |
|
||||
_, err = s.db.Exec(sqlStatement, dirPath+"%") |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) findFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) { |
|
||||
var rows *sql.Rows = nil |
|
||||
|
|
||||
if lastFileName == "" { |
|
||||
sqlStatement := |
|
||||
fmt.Sprintf("SELECT fid, directorypart, filepart FROM %s WHERE directorypart=$1 ORDER BY id LIMIT $2", filesTableName) |
|
||||
rows, err = s.db.Query(sqlStatement, dirPath, limit) |
|
||||
} else { |
|
||||
sqlStatement := |
|
||||
fmt.Sprintf("SELECT fid, directorypart, filepart FROM %s WHERE directorypart=$1 "+ |
|
||||
"AND id > (SELECT id FROM %s WHERE directoryPart=$2 AND filepart=$3) ORDER BY id LIMIT $4", |
|
||||
filesTableName, filesTableName) |
|
||||
_, lastFileNameName := filepath.Split(lastFileName) |
|
||||
rows, err = s.db.Query(sqlStatement, dirPath, dirPath, lastFileNameName, limit) |
|
||||
} |
|
||||
|
|
||||
if err != nil { |
|
||||
glog.V(0).Infof("Postgres find files error: %s", err) |
|
||||
} |
|
||||
|
|
||||
if rows != nil { |
|
||||
defer rows.Close() |
|
||||
|
|
||||
for rows.Next() { |
|
||||
var fid filer.FileId |
|
||||
var directoryPart string |
|
||||
var filePart string |
|
||||
|
|
||||
scanErr := rows.Scan(&fid, &directoryPart, &filePart) |
|
||||
if scanErr != nil { |
|
||||
err = scanErr |
|
||||
} |
|
||||
|
|
||||
files = append(files, filer.FileEntry{Name: filepath.Join(directoryPart, filePart), Id: fid}) |
|
||||
if len(files) >= limit { |
|
||||
break |
|
||||
} |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
glog.V(3).Infof("Postgres findFiles -- looking up files under '%s' and found %d files. Limit=%d, lastFileName=%s", |
|
||||
dirPath, len(files), limit, lastFileName) |
|
||||
|
|
||||
return files, err |
|
||||
} |
|
@ -1,149 +0,0 @@ |
|||||
package postgres_store |
|
||||
|
|
||||
import ( |
|
||||
"database/sql" |
|
||||
"errors" |
|
||||
"fmt" |
|
||||
"sync" |
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
"github.com/chrislusf/seaweedfs/weed/glog" |
|
||||
|
|
||||
_ "github.com/lib/pq" |
|
||||
_ "path/filepath" |
|
||||
"path/filepath" |
|
||||
) |
|
||||
|
|
||||
const ( |
|
||||
default_maxIdleConnections = 100 |
|
||||
default_maxOpenConnections = 50 |
|
||||
filesTableName = "files" |
|
||||
directoriesTableName = "directories" |
|
||||
) |
|
||||
|
|
||||
var ( |
|
||||
_init_db sync.Once |
|
||||
_db_connection *sql.DB |
|
||||
) |
|
||||
|
|
||||
type PostgresConf struct { |
|
||||
User string |
|
||||
Password string |
|
||||
HostName string |
|
||||
Port int |
|
||||
DataBase string |
|
||||
SslMode string |
|
||||
MaxIdleConnections int |
|
||||
MaxOpenConnections int |
|
||||
} |
|
||||
|
|
||||
type PostgresStore struct { |
|
||||
db *sql.DB |
|
||||
server string |
|
||||
user string |
|
||||
password string |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) CreateFile(fullFilePath string, fid string) (err error) { |
|
||||
|
|
||||
var old_fid string |
|
||||
if old_fid, err = s.query(fullFilePath); err != nil && err != sql.ErrNoRows { |
|
||||
return fmt.Errorf("PostgresStore Put operation failed when querying path %s: err is %v", fullFilePath, err) |
|
||||
} else { |
|
||||
if len(old_fid) == 0 { |
|
||||
err = s.insert(fullFilePath, fid) |
|
||||
if err != nil { |
|
||||
return fmt.Errorf("PostgresStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err) |
|
||||
} |
|
||||
} else { |
|
||||
err = s.update(fullFilePath, fid) |
|
||||
if err != nil { |
|
||||
return fmt.Errorf("PostgresStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err) |
|
||||
} |
|
||||
} |
|
||||
} |
|
||||
return |
|
||||
|
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) FindFile(fullFilePath string) (fid string, err error) { |
|
||||
|
|
||||
if err != nil { |
|
||||
return "", fmt.Errorf("PostgresStore Get operation can not parse file path %s: err is %v", fullFilePath, err) |
|
||||
} |
|
||||
fid, err = s.query(fullFilePath) |
|
||||
|
|
||||
return fid, err |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) LookupDirectoryEntry(dirPath string, name string) (found bool, fileId string, err error) { |
|
||||
fullPath := filepath.Join(dirPath, name) |
|
||||
if fileId, err = s.FindFile(fullPath); err == nil { |
|
||||
return true, fileId, nil |
|
||||
} |
|
||||
if _, _, err := s.lookupDirectory(fullPath); err == nil { |
|
||||
return true, "", err |
|
||||
} |
|
||||
return false, "", err |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) DeleteFile(fullFilePath string) (fid string, err error) { |
|
||||
if err != nil { |
|
||||
return "", fmt.Errorf("PostgresStore Delete operation can not parse file path %s: err is %v", fullFilePath, err) |
|
||||
} |
|
||||
if fid, err = s.query(fullFilePath); err != nil { |
|
||||
return "", fmt.Errorf("PostgresStore Delete operation failed when querying path %s: err is %v", fullFilePath, err) |
|
||||
} else if fid == "" { |
|
||||
return "", nil |
|
||||
} |
|
||||
if err = s.delete(fullFilePath); err != nil { |
|
||||
return "", fmt.Errorf("PostgresStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err) |
|
||||
} else { |
|
||||
return "", nil |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) ListDirectories(dirPath string) (dirs []filer.DirectoryName, err error) { |
|
||||
|
|
||||
dirs, err = s.findDirectories(dirPath, 1000) |
|
||||
|
|
||||
glog.V(3).Infof("Postgres ListDirs = found %d directories under %s", len(dirs), dirPath) |
|
||||
|
|
||||
return dirs, err |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) { |
|
||||
files, err = s.findFiles(dirPath, lastFileName, limit) |
|
||||
return files, err |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) DeleteDirectory(dirPath string, recursive bool) (err error) { |
|
||||
err = s.deleteDirectory(dirPath, recursive) |
|
||||
if err != nil { |
|
||||
glog.V(0).Infof("Error in Postgres DeleteDir '%s' (recursive = '%t'): %s", err) |
|
||||
} |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) Move(fromPath string, toPath string) (err error) { |
|
||||
glog.V(3).Infoln("Calling posgres_store Move") |
|
||||
return errors.New("Move is not yet implemented for the PostgreSQL store.") |
|
||||
} |
|
||||
|
|
||||
//func NewPostgresStore(master string, confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore {
|
|
||||
func NewPostgresStore(master string, conf PostgresConf) *PostgresStore { |
|
||||
pg := &PostgresStore{ |
|
||||
db: getDbConnection(conf), |
|
||||
} |
|
||||
|
|
||||
pg.createDirectoriesTable() |
|
||||
|
|
||||
if err := pg.createFilesTable(); err != nil { |
|
||||
fmt.Printf("create table failed %v", err) |
|
||||
} |
|
||||
|
|
||||
return pg |
|
||||
} |
|
||||
|
|
||||
func (s *PostgresStore) Close() { |
|
||||
s.db.Close() |
|
||||
} |
|
@ -1,50 +0,0 @@ |
|||||
package redis_store |
|
||||
|
|
||||
import ( |
|
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
|
|
||||
"github.com/go-redis/redis" |
|
||||
) |
|
||||
|
|
||||
type RedisStore struct { |
|
||||
Client *redis.Client |
|
||||
} |
|
||||
|
|
||||
func NewRedisStore(hostPort string, password string, database int) *RedisStore { |
|
||||
client := redis.NewClient(&redis.Options{ |
|
||||
Addr: hostPort, |
|
||||
Password: password, |
|
||||
DB: database, |
|
||||
}) |
|
||||
return &RedisStore{Client: client} |
|
||||
} |
|
||||
|
|
||||
func (s *RedisStore) Get(fullFileName string) (fid string, err error) { |
|
||||
fid, err = s.Client.Get(fullFileName).Result() |
|
||||
if err == redis.Nil { |
|
||||
err = filer.ErrNotFound |
|
||||
} |
|
||||
return fid, err |
|
||||
} |
|
||||
func (s *RedisStore) Put(fullFileName string, fid string) (err error) { |
|
||||
_, err = s.Client.Set(fullFileName, fid, 0).Result() |
|
||||
if err == redis.Nil { |
|
||||
err = nil |
|
||||
} |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
// Currently the fid is not returned
|
|
||||
func (s *RedisStore) Delete(fullFileName string) (err error) { |
|
||||
_, err = s.Client.Del(fullFileName).Result() |
|
||||
if err == redis.Nil { |
|
||||
err = nil |
|
||||
} |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
func (s *RedisStore) Close() { |
|
||||
if s.Client != nil { |
|
||||
s.Client.Close() |
|
||||
} |
|
||||
} |
|
@ -1,45 +0,0 @@ |
|||||
There are two main components of a filer: directories and files. |
|
||||
|
|
||||
My previous approach was to use some sequance number to generate directoryId. |
|
||||
However, this is not scalable. The id generation itself is a bottleneck. |
|
||||
It needs careful locking and deduplication checking to get a directoryId. |
|
||||
|
|
||||
In a second design, each directory is deterministically mapped to UUID version 3, |
|
||||
which uses MD5 to map a tuple of <uuid, name> to a version 3 UUID. |
|
||||
However, this UUID3 approach is logically the same as storing the full path. |
|
||||
|
|
||||
Storing the full path is the simplest design. |
|
||||
|
|
||||
separator is a special byte, 0x00. |
|
||||
|
|
||||
When writing a file: |
|
||||
<file parent full path, separator, file name> => fildId, file properties |
|
||||
For folders: |
|
||||
The filer breaks the directory path into folders. |
|
||||
for each folder: |
|
||||
if it is not in cache: |
|
||||
check whether the folder is created in the KVS, if not: |
|
||||
set <folder parent full path, separator, folder name> => directory properties |
|
||||
if no permission for the folder: |
|
||||
break |
|
||||
|
|
||||
|
|
||||
The filer caches the most recently used folder permissions with a TTL. |
|
||||
So any folder permission change needs to wait TTL interval to take effect. |
|
||||
|
|
||||
|
|
||||
|
|
||||
When listing the directory: |
|
||||
prefix scan of using (the folder full path + separator) as the prefix |
|
||||
|
|
||||
The downside: |
|
||||
1. Rename a folder will need to recursively process all sub folders and files. |
|
||||
2. Move a folder will need to recursively process all sub folders and files. |
|
||||
So these operations are not allowed if the folder is not empty. |
|
||||
|
|
||||
Allowing: |
|
||||
1. Rename a file |
|
||||
2. Move a file to a different folder |
|
||||
3. Delete an empty folder |
|
||||
|
|
||||
|
|
@ -0,0 +1,130 @@ |
|||||
|
package abstract_sql |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
) |
||||
|
|
||||
|
type AbstractSqlStore struct { |
||||
|
DB *sql.DB |
||||
|
SqlInsert string |
||||
|
SqlUpdate string |
||||
|
SqlFind string |
||||
|
SqlDelete string |
||||
|
SqlListExclusive string |
||||
|
SqlListInclusive string |
||||
|
} |
||||
|
|
||||
|
func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { |
||||
|
|
||||
|
dir, name := entry.FullPath.DirAndName() |
||||
|
meta, err := entry.EncodeAttributesAndChunks() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("encode %s: %s", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
res, err := store.DB.Exec(store.SqlInsert, hashToLong(dir), name, dir, meta) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("insert %s: %s", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
_, err = res.RowsAffected() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("insert %s but no rows affected: %s", entry.FullPath, err) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { |
||||
|
|
||||
|
dir, name := entry.FullPath.DirAndName() |
||||
|
meta, err := entry.EncodeAttributesAndChunks() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("encode %s: %s", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
res, err := store.DB.Exec(store.SqlUpdate, meta, hashToLong(dir), name, dir) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("update %s: %s", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
_, err = res.RowsAffected() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entry, error) { |
||||
|
|
||||
|
dir, name := fullpath.DirAndName() |
||||
|
row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir) |
||||
|
var data []byte |
||||
|
if err := row.Scan(&data); err != nil { |
||||
|
return nil, fmt.Errorf("read entry %s: %v", fullpath, err) |
||||
|
} |
||||
|
|
||||
|
entry := &filer2.Entry{ |
||||
|
FullPath: fullpath, |
||||
|
} |
||||
|
if err := entry.DecodeAttributesAndChunks(data); err != nil { |
||||
|
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
return entry, nil |
||||
|
} |
||||
|
|
||||
|
func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) (error) { |
||||
|
|
||||
|
dir, name := fullpath.DirAndName() |
||||
|
|
||||
|
res, err := store.DB.Exec(store.SqlDelete, hashToLong(dir), name, dir) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("delete %s: %s", fullpath, err) |
||||
|
} |
||||
|
|
||||
|
_, err = res.RowsAffected() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { |
||||
|
|
||||
|
sqlText := store.SqlListExclusive |
||||
|
if inclusive { |
||||
|
sqlText = store.SqlListInclusive |
||||
|
} |
||||
|
|
||||
|
rows, err := store.DB.Query(sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) |
||||
|
if err != nil { |
||||
|
return nil, fmt.Errorf("list %s : %v", fullpath, err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
|
||||
|
for rows.Next() { |
||||
|
var name string |
||||
|
var data []byte |
||||
|
if err = rows.Scan(&name, &data); err != nil { |
||||
|
glog.V(0).Infof("scan %s : %v", fullpath, err) |
||||
|
return nil, fmt.Errorf("scan %s: %v", fullpath, err) |
||||
|
} |
||||
|
|
||||
|
entry := &filer2.Entry{ |
||||
|
FullPath: filer2.NewFullPath(string(fullpath), name), |
||||
|
} |
||||
|
if err = entry.DecodeAttributesAndChunks(data); err != nil { |
||||
|
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) |
||||
|
return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
entries = append(entries, entry) |
||||
|
} |
||||
|
|
||||
|
return entries, nil |
||||
|
} |
@ -0,0 +1,32 @@ |
|||||
|
package abstract_sql |
||||
|
|
||||
|
import ( |
||||
|
"crypto/md5" |
||||
|
"io" |
||||
|
) |
||||
|
|
||||
|
// returns a 64 bit big int
|
||||
|
func hashToLong(dir string) (v int64) { |
||||
|
h := md5.New() |
||||
|
io.WriteString(h, dir) |
||||
|
|
||||
|
b := h.Sum(nil) |
||||
|
|
||||
|
v += int64(b[0]) |
||||
|
v <<= 8 |
||||
|
v += int64(b[1]) |
||||
|
v <<= 8 |
||||
|
v += int64(b[2]) |
||||
|
v <<= 8 |
||||
|
v += int64(b[3]) |
||||
|
v <<= 8 |
||||
|
v += int64(b[4]) |
||||
|
v <<= 8 |
||||
|
v += int64(b[5]) |
||||
|
v <<= 8 |
||||
|
v += int64(b[6]) |
||||
|
v <<= 8 |
||||
|
v += int64(b[7]) |
||||
|
|
||||
|
return |
||||
|
} |
@ -0,0 +1,14 @@ |
|||||
|
1. create a keyspace |
||||
|
|
||||
|
CREATE KEYSPACE seaweedfs WITH replication = {'class':'SimpleStrategy', 'replication_factor' : 1}; |
||||
|
|
||||
|
2. create filemeta table |
||||
|
|
||||
|
USE seaweedfs; |
||||
|
|
||||
|
CREATE TABLE filemeta ( |
||||
|
directory varchar, |
||||
|
name varchar, |
||||
|
meta blob, |
||||
|
PRIMARY KEY (directory, name) |
||||
|
) WITH CLUSTERING ORDER BY (name ASC); |
@ -0,0 +1,131 @@ |
|||||
|
package cassandra |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"github.com/gocql/gocql" |
||||
|
"github.com/spf13/viper" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
filer2.Stores = append(filer2.Stores, &CassandraStore{}) |
||||
|
} |
||||
|
|
||||
|
type CassandraStore struct { |
||||
|
cluster *gocql.ClusterConfig |
||||
|
session *gocql.Session |
||||
|
} |
||||
|
|
||||
|
func (store *CassandraStore) GetName() string { |
||||
|
return "cassandra" |
||||
|
} |
||||
|
|
||||
|
func (store *CassandraStore) Initialize(viper *viper.Viper) (err error) { |
||||
|
return store.initialize( |
||||
|
viper.GetString("keyspace"), |
||||
|
viper.GetStringSlice("hosts"), |
||||
|
) |
||||
|
} |
||||
|
|
||||
|
func (store *CassandraStore) initialize(keyspace string, hosts []string) (err error) { |
||||
|
store.cluster = gocql.NewCluster(hosts...) |
||||
|
store.cluster.Keyspace = keyspace |
||||
|
store.cluster.Consistency = gocql.LocalQuorum |
||||
|
store.session, err = store.cluster.CreateSession() |
||||
|
if err != nil { |
||||
|
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) { |
||||
|
|
||||
|
dir, name := entry.FullPath.DirAndName() |
||||
|
meta, err := entry.EncodeAttributesAndChunks() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("encode %s: %s", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
if err := store.session.Query( |
||||
|
"INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?)", |
||||
|
dir, name, meta).Exec(); err != nil { |
||||
|
return fmt.Errorf("insert %s: %s", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *CassandraStore) UpdateEntry(entry *filer2.Entry) (err error) { |
||||
|
|
||||
|
return store.InsertEntry(entry) |
||||
|
} |
||||
|
|
||||
|
func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { |
||||
|
|
||||
|
dir, name := fullpath.DirAndName() |
||||
|
var data []byte |
||||
|
if err := store.session.Query( |
||||
|
"SELECT meta FROM filemeta WHERE directory=? AND name=?", |
||||
|
dir, name).Consistency(gocql.One).Scan(&data); err != nil { |
||||
|
if err != gocql.ErrNotFound { |
||||
|
return nil, fmt.Errorf("read entry %s: %v", fullpath, err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if len(data) == 0 { |
||||
|
return nil, fmt.Errorf("not found: %s", fullpath) |
||||
|
} |
||||
|
|
||||
|
entry = &filer2.Entry{ |
||||
|
FullPath: fullpath, |
||||
|
} |
||||
|
err = entry.DecodeAttributesAndChunks(data) |
||||
|
if err != nil { |
||||
|
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
return entry, nil |
||||
|
} |
||||
|
|
||||
|
func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error { |
||||
|
|
||||
|
dir, name := fullpath.DirAndName() |
||||
|
|
||||
|
if err := store.session.Query( |
||||
|
"DELETE FROM filemeta WHERE directory=? AND name=?", |
||||
|
dir, name).Exec(); err != nil { |
||||
|
return fmt.Errorf("delete %s : %v", fullpath, err) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, |
||||
|
limit int) (entries []*filer2.Entry, err error) { |
||||
|
|
||||
|
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" |
||||
|
if inclusive { |
||||
|
cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?" |
||||
|
} |
||||
|
|
||||
|
var data []byte |
||||
|
var name string |
||||
|
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter() |
||||
|
for iter.Scan(&name, &data) { |
||||
|
entry := &filer2.Entry{ |
||||
|
FullPath: filer2.NewFullPath(string(fullpath), name), |
||||
|
} |
||||
|
if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil { |
||||
|
err = decodeErr |
||||
|
glog.V(0).Infof("list %s : %v", entry.FullPath, err) |
||||
|
break |
||||
|
} |
||||
|
entries = append(entries, entry) |
||||
|
} |
||||
|
if err := iter.Close(); err != nil { |
||||
|
glog.V(0).Infof("list iterator close: %v", err) |
||||
|
} |
||||
|
|
||||
|
return entries, err |
||||
|
} |
@ -0,0 +1,126 @@ |
|||||
|
package filer2 |
||||
|
|
||||
|
import ( |
||||
|
"os" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"github.com/spf13/viper" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
FILER_TOML_EXAMPLE = ` |
||||
|
# A sample TOML config file for SeaweedFS filer store |
||||
|
|
||||
|
[memory] |
||||
|
# local in memory, mostly for testing purpose |
||||
|
enabled = false |
||||
|
|
||||
|
[leveldb] |
||||
|
# local on disk, mostly for simple single-machine setup, fairly scalable |
||||
|
enabled = false |
||||
|
dir = "." # directory to store level db files |
||||
|
|
||||
|
#################################################### |
||||
|
# multiple filers on shared storage, fairly scalable |
||||
|
#################################################### |
||||
|
|
||||
|
[mysql] |
||||
|
# CREATE TABLE IF NOT EXISTS filemeta ( |
||||
|
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', |
||||
|
# name VARCHAR(1000) COMMENT 'directory or file name', |
||||
|
# directory VARCHAR(4096) COMMENT 'full path to parent directory', |
||||
|
# meta BLOB, |
||||
|
# PRIMARY KEY (dirhash, name) |
||||
|
# ) DEFAULT CHARSET=utf8; |
||||
|
enabled = true |
||||
|
hostname = "localhost" |
||||
|
port = 3306 |
||||
|
username = "root" |
||||
|
password = "" |
||||
|
database = "" # create or use an existing database |
||||
|
connection_max_idle = 2 |
||||
|
connection_max_open = 100 |
||||
|
|
||||
|
[postgres] |
||||
|
# CREATE TABLE IF NOT EXISTS filemeta ( |
||||
|
# dirhash BIGINT, |
||||
|
# name VARCHAR(1000), |
||||
|
# directory VARCHAR(4096), |
||||
|
# meta bytea, |
||||
|
# PRIMARY KEY (dirhash, name) |
||||
|
# ); |
||||
|
enabled = false |
||||
|
hostname = "localhost" |
||||
|
port = 5432 |
||||
|
username = "postgres" |
||||
|
password = "" |
||||
|
database = "" # create or use an existing database |
||||
|
sslmode = "disable" |
||||
|
connection_max_idle = 100 |
||||
|
connection_max_open = 100 |
||||
|
|
||||
|
[cassandra] |
||||
|
# CREATE TABLE filemeta ( |
||||
|
# directory varchar, |
||||
|
# name varchar, |
||||
|
# meta blob, |
||||
|
# PRIMARY KEY (directory, name) |
||||
|
# ) WITH CLUSTERING ORDER BY (name ASC); |
||||
|
enabled = false |
||||
|
keyspace="seaweedfs" |
||||
|
hosts=[ |
||||
|
"localhost:9042", |
||||
|
] |
||||
|
|
||||
|
[redis] |
||||
|
enabled = true |
||||
|
address = "localhost:6379" |
||||
|
password = "" |
||||
|
db = 0 |
||||
|
|
||||
|
` |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
Stores []FilerStore |
||||
|
) |
||||
|
|
||||
|
func (f *Filer) LoadConfiguration() { |
||||
|
|
||||
|
// find a filer store
|
||||
|
viper.SetConfigName("filer") // name of config file (without extension)
|
||||
|
viper.AddConfigPath(".") // optionally look for config in the working directory
|
||||
|
viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths
|
||||
|
viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in
|
||||
|
if err := viper.ReadInConfig(); err != nil { // Handle errors reading the config file
|
||||
|
glog.Fatalf("Failed to load filer.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/" + |
||||
|
"\n\nPlease follow this example and add a filer.toml file to " + |
||||
|
"current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n" + FILER_TOML_EXAMPLE) |
||||
|
} |
||||
|
|
||||
|
glog.V(0).Infof("Reading filer configuration from %s", viper.ConfigFileUsed()) |
||||
|
for _, store := range Stores { |
||||
|
if viper.GetBool(store.GetName() + ".enabled") { |
||||
|
viperSub := viper.Sub(store.GetName()) |
||||
|
if err := store.Initialize(viperSub); err != nil { |
||||
|
glog.Fatalf("Failed to initialize store for %s: %+v", |
||||
|
store.GetName(), err) |
||||
|
} |
||||
|
f.SetStore(store) |
||||
|
glog.V(0).Infof("Configure filer for %s from %s", store.GetName(), viper.ConfigFileUsed()) |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
println() |
||||
|
println("Supported filer stores are:") |
||||
|
for _, store := range Stores { |
||||
|
println(" " + store.GetName()) |
||||
|
} |
||||
|
|
||||
|
println() |
||||
|
println("Please configure a supported filer store in", viper.ConfigFileUsed()) |
||||
|
println() |
||||
|
|
||||
|
os.Exit(-1) |
||||
|
} |
@ -1,42 +0,0 @@ |
|||||
package embedded |
|
||||
|
|
||||
import ( |
|
||||
"github.com/syndtr/goleveldb/leveldb" |
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2" |
|
||||
) |
|
||||
|
|
||||
type EmbeddedStore struct { |
|
||||
db *leveldb.DB |
|
||||
} |
|
||||
|
|
||||
func NewEmbeddedStore(dir string) (filer *EmbeddedStore, err error) { |
|
||||
filer = &EmbeddedStore{} |
|
||||
if filer.db, err = leveldb.OpenFile(dir, nil); err != nil { |
|
||||
return |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func (filer *EmbeddedStore) InsertEntry(entry *filer2.Entry) (err error) { |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (filer *EmbeddedStore) AddDirectoryLink(directory *filer2.Entry, delta int32) (err error) { |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (filer *EmbeddedStore) AppendFileChunk(fullpath filer2.FullPath, fileChunk filer2.FileChunk) (err error) { |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (filer *EmbeddedStore) FindEntry(fullpath filer2.FullPath) (found bool, entry *filer2.Entry, err error) { |
|
||||
return false, nil, nil |
|
||||
} |
|
||||
|
|
||||
func (filer *EmbeddedStore) DeleteEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { |
|
||||
return nil, nil |
|
||||
} |
|
||||
|
|
||||
func (filer *EmbeddedStore) ListDirectoryEntries(fullpath filer2.FullPath) (entries []*filer2.Entry, err error) { |
|
||||
return nil, nil |
|
||||
} |
|
@ -0,0 +1,42 @@ |
|||||
|
package filer2 |
||||
|
|
||||
|
import ( |
||||
|
"os" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
) |
||||
|
|
||||
|
type Attr struct { |
||||
|
Mtime time.Time // time of last modification
|
||||
|
Crtime time.Time // time of creation (OS X only)
|
||||
|
Mode os.FileMode // file mode
|
||||
|
Uid uint32 // owner uid
|
||||
|
Gid uint32 // group gid
|
||||
|
Mime string |
||||
|
} |
||||
|
|
||||
|
func (attr Attr) IsDirectory() bool { |
||||
|
return attr.Mode&os.ModeDir > 0 |
||||
|
} |
||||
|
|
||||
|
type Entry struct { |
||||
|
FullPath |
||||
|
|
||||
|
Attr |
||||
|
|
||||
|
// the following is for files
|
||||
|
Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Size() uint64 { |
||||
|
return TotalSize(entry.Chunks) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Timestamp() time.Time { |
||||
|
if entry.IsDirectory() { |
||||
|
return entry.Crtime |
||||
|
} else { |
||||
|
return entry.Mtime |
||||
|
} |
||||
|
} |
@ -0,0 +1,45 @@ |
|||||
|
package filer2 |
||||
|
|
||||
|
import ( |
||||
|
"os" |
||||
|
"time" |
||||
|
|
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
"github.com/gogo/protobuf/proto" |
||||
|
) |
||||
|
|
||||
|
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { |
||||
|
message := &filer_pb.Entry{ |
||||
|
Attributes: &filer_pb.FuseAttributes{ |
||||
|
Crtime: entry.Attr.Crtime.Unix(), |
||||
|
Mtime: entry.Attr.Mtime.Unix(), |
||||
|
FileMode: uint32(entry.Attr.Mode), |
||||
|
Uid: entry.Uid, |
||||
|
Gid: entry.Gid, |
||||
|
Mime: entry.Mime, |
||||
|
}, |
||||
|
Chunks: entry.Chunks, |
||||
|
} |
||||
|
return proto.Marshal(message) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error { |
||||
|
|
||||
|
message := &filer_pb.Entry{} |
||||
|
|
||||
|
if err := proto.UnmarshalMerge(blob, message); err != nil { |
||||
|
return fmt.Errorf("decoding value blob for %s: %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
entry.Attr.Crtime = time.Unix(message.Attributes.Crtime, 0) |
||||
|
entry.Attr.Mtime = time.Unix(message.Attributes.Mtime, 0) |
||||
|
entry.Attr.Mode = os.FileMode(message.Attributes.FileMode) |
||||
|
entry.Attr.Uid = message.Attributes.Uid |
||||
|
entry.Attr.Gid = message.Attributes.Gid |
||||
|
entry.Attr.Mime = message.Attributes.Mime |
||||
|
|
||||
|
entry.Chunks = message.Chunks |
||||
|
|
||||
|
return nil |
||||
|
} |
@ -0,0 +1,316 @@ |
|||||
|
package filer2 |
||||
|
|
||||
|
import ( |
||||
|
"log" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
) |
||||
|
|
||||
|
func TestCompactFileChunks(t *testing.T) { |
||||
|
chunks := []*filer_pb.FileChunk{ |
||||
|
{Offset: 10, Size: 100, FileId: "abc", Mtime: 50}, |
||||
|
{Offset: 100, Size: 100, FileId: "def", Mtime: 100}, |
||||
|
{Offset: 200, Size: 100, FileId: "ghi", Mtime: 200}, |
||||
|
{Offset: 110, Size: 200, FileId: "jkl", Mtime: 300}, |
||||
|
} |
||||
|
|
||||
|
compacted, garbarge := CompactFileChunks(chunks) |
||||
|
|
||||
|
log.Printf("Compacted: %+v", compacted) |
||||
|
log.Printf("Garbage : %+v", garbarge) |
||||
|
|
||||
|
if len(compacted) != 3 { |
||||
|
t.Fatalf("unexpected compacted: %d", len(compacted)) |
||||
|
} |
||||
|
if len(garbarge) != 1 { |
||||
|
t.Fatalf("unexpected garbarge: %d", len(garbarge)) |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestIntervalMerging(t *testing.T) { |
||||
|
|
||||
|
testcases := []struct { |
||||
|
Chunks []*filer_pb.FileChunk |
||||
|
Expected []*visibleInterval |
||||
|
}{ |
||||
|
// case 0: normal
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, |
||||
|
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, |
||||
|
}, |
||||
|
Expected: []*visibleInterval{ |
||||
|
{start: 0, stop: 100, fileId: "abc"}, |
||||
|
{start: 100, stop: 200, fileId: "asdf"}, |
||||
|
{start: 200, stop: 300, fileId: "fsad"}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 1: updates overwrite full chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, |
||||
|
}, |
||||
|
Expected: []*visibleInterval{ |
||||
|
{start: 0, stop: 200, fileId: "asdf"}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 2: updates overwrite part of previous chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 50, FileId: "asdf", Mtime: 134}, |
||||
|
}, |
||||
|
Expected: []*visibleInterval{ |
||||
|
{start: 0, stop: 50, fileId: "asdf"}, |
||||
|
{start: 50, stop: 100, fileId: "abc"}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 3: updates overwrite full chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, |
||||
|
{Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154}, |
||||
|
}, |
||||
|
Expected: []*visibleInterval{ |
||||
|
{start: 0, stop: 50, fileId: "asdf"}, |
||||
|
{start: 50, stop: 300, fileId: "xxxx"}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 4: updates far away from prev chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, |
||||
|
{Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154}, |
||||
|
}, |
||||
|
Expected: []*visibleInterval{ |
||||
|
{start: 0, stop: 200, fileId: "asdf"}, |
||||
|
{start: 250, stop: 500, fileId: "xxxx"}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 5: updates overwrite full chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 184}, |
||||
|
{Offset: 70, Size: 150, FileId: "abc", Mtime: 143}, |
||||
|
{Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134}, |
||||
|
}, |
||||
|
Expected: []*visibleInterval{ |
||||
|
{start: 0, stop: 200, fileId: "asdf"}, |
||||
|
{start: 200, stop: 220, fileId: "abc"}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 6: same updates
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
}, |
||||
|
Expected: []*visibleInterval{ |
||||
|
{start: 0, stop: 100, fileId: "abc"}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 7: real updates
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123}, |
||||
|
{Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130}, |
||||
|
{Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140}, |
||||
|
{Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150}, |
||||
|
{Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160}, |
||||
|
{Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170}, |
||||
|
}, |
||||
|
Expected: []*visibleInterval{ |
||||
|
{start: 0, stop: 2097152, fileId: "3,029565bf3092"}, |
||||
|
{start: 2097152, stop: 5242880, fileId: "6,029632f47ae2"}, |
||||
|
{start: 5242880, stop: 8388608, fileId: "2,029734c5aa10"}, |
||||
|
{start: 8388608, stop: 11534336, fileId: "5,02982f80de50"}, |
||||
|
{start: 11534336, stop: 14376529, fileId: "7,0299ad723803"}, |
||||
|
}, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
for i, testcase := range testcases { |
||||
|
log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i) |
||||
|
intervals := nonOverlappingVisibleIntervals(testcase.Chunks) |
||||
|
for x, interval := range intervals { |
||||
|
log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s", |
||||
|
i, x, interval.start, interval.stop, interval.fileId) |
||||
|
} |
||||
|
for x, interval := range intervals { |
||||
|
if interval.start != testcase.Expected[x].start { |
||||
|
t.Fatalf("failed on test case %d, interval %d, start %d, expect %d", |
||||
|
i, x, interval.start, testcase.Expected[x].start) |
||||
|
} |
||||
|
if interval.stop != testcase.Expected[x].stop { |
||||
|
t.Fatalf("failed on test case %d, interval %d, stop %d, expect %d", |
||||
|
i, x, interval.stop, testcase.Expected[x].stop) |
||||
|
} |
||||
|
if interval.fileId != testcase.Expected[x].fileId { |
||||
|
t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s", |
||||
|
i, x, interval.fileId, testcase.Expected[x].fileId) |
||||
|
} |
||||
|
} |
||||
|
if len(intervals) != len(testcase.Expected) { |
||||
|
t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestChunksReading(t *testing.T) { |
||||
|
|
||||
|
testcases := []struct { |
||||
|
Chunks []*filer_pb.FileChunk |
||||
|
Offset int64 |
||||
|
Size int |
||||
|
Expected []*ChunkView |
||||
|
}{ |
||||
|
// case 0: normal
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, |
||||
|
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, |
||||
|
}, |
||||
|
Offset: 0, |
||||
|
Size: 250, |
||||
|
Expected: []*ChunkView{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, |
||||
|
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, |
||||
|
{Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 1: updates overwrite full chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, |
||||
|
}, |
||||
|
Offset: 50, |
||||
|
Size: 100, |
||||
|
Expected: []*ChunkView{ |
||||
|
{Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 2: updates overwrite part of previous chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 50, FileId: "asdf", Mtime: 134}, |
||||
|
}, |
||||
|
Offset: 25, |
||||
|
Size: 50, |
||||
|
Expected: []*ChunkView{ |
||||
|
{Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25}, |
||||
|
{Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 3: updates overwrite full chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, |
||||
|
{Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154}, |
||||
|
}, |
||||
|
Offset: 0, |
||||
|
Size: 200, |
||||
|
Expected: []*ChunkView{ |
||||
|
{Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0}, |
||||
|
{Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 4: updates far away from prev chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, |
||||
|
{Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154}, |
||||
|
}, |
||||
|
Offset: 0, |
||||
|
Size: 400, |
||||
|
Expected: []*ChunkView{ |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, |
||||
|
// {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
|
||||
|
}, |
||||
|
}, |
||||
|
// case 5: updates overwrite full chunks
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 184}, |
||||
|
{Offset: 70, Size: 150, FileId: "abc", Mtime: 143}, |
||||
|
{Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134}, |
||||
|
}, |
||||
|
Offset: 0, |
||||
|
Size: 220, |
||||
|
Expected: []*ChunkView{ |
||||
|
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, |
||||
|
{Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 6: same updates
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
}, |
||||
|
Offset: 0, |
||||
|
Size: 100, |
||||
|
Expected: []*ChunkView{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, |
||||
|
}, |
||||
|
}, |
||||
|
// case 7: edge cases
|
||||
|
{ |
||||
|
Chunks: []*filer_pb.FileChunk{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, |
||||
|
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, |
||||
|
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, |
||||
|
}, |
||||
|
Offset: 0, |
||||
|
Size: 200, |
||||
|
Expected: []*ChunkView{ |
||||
|
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, |
||||
|
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, |
||||
|
}, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
for i, testcase := range testcases { |
||||
|
log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i) |
||||
|
chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size) |
||||
|
for x, chunk := range chunks { |
||||
|
log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s", |
||||
|
i, x, chunk.Offset, chunk.Size, chunk.FileId) |
||||
|
if chunk.Offset != testcase.Expected[x].Offset { |
||||
|
t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d", |
||||
|
i, x, chunk.Offset, testcase.Expected[x].Offset) |
||||
|
} |
||||
|
if chunk.Size != testcase.Expected[x].Size { |
||||
|
t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d", |
||||
|
i, x, chunk.Size, testcase.Expected[x].Size) |
||||
|
} |
||||
|
if chunk.FileId != testcase.Expected[x].FileId { |
||||
|
t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s", |
||||
|
i, x, chunk.FileId, testcase.Expected[x].FileId) |
||||
|
} |
||||
|
if chunk.LogicOffset != testcase.Expected[x].LogicOffset { |
||||
|
t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d", |
||||
|
i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset) |
||||
|
} |
||||
|
} |
||||
|
if len(chunks) != len(testcase.Expected) { |
||||
|
t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
@ -0,0 +1,60 @@ |
|||||
|
package filer2 |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"context" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"google.golang.org/grpc" |
||||
|
) |
||||
|
|
||||
|
func (fs *Filer) GetMaster() string { |
||||
|
return fs.currentMaster |
||||
|
} |
||||
|
|
||||
|
func (fs *Filer) KeepConnectedToMaster() { |
||||
|
glog.V(0).Infof("Filer bootstraps with masters %v", fs.masters) |
||||
|
for _, master := range fs.masters { |
||||
|
glog.V(0).Infof("Connecting to %v", master) |
||||
|
withMasterClient(master, func(client master_pb.SeaweedClient) error { |
||||
|
stream, err := client.KeepConnected(context.Background()) |
||||
|
if err != nil { |
||||
|
glog.V(0).Infof("failed to keep connected to %s: %v", master, err) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
glog.V(0).Infof("Connected to %v", master) |
||||
|
fs.currentMaster = master |
||||
|
|
||||
|
for { |
||||
|
time.Sleep(time.Duration(float32(10*1e3)*0.25) * time.Millisecond) |
||||
|
|
||||
|
if err = stream.Send(&master_pb.Empty{}); err != nil { |
||||
|
glog.V(0).Infof("failed to send to %s: %v", master, err) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
if _, err = stream.Recv(); err != nil { |
||||
|
glog.V(0).Infof("failed to receive from %s: %v", master, err) |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
}) |
||||
|
fs.currentMaster = "" |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error { |
||||
|
|
||||
|
grpcConnection, err := grpc.Dial(master, grpc.WithInsecure()) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("fail to dial %s: %v", master, err) |
||||
|
} |
||||
|
defer grpcConnection.Close() |
||||
|
|
||||
|
client := master_pb.NewSeaweedClient(grpcConnection) |
||||
|
|
||||
|
return fn(client) |
||||
|
} |
@ -1,66 +0,0 @@ |
|||||
package filer2 |
|
||||
|
|
||||
import ( |
|
||||
"errors" |
|
||||
"os" |
|
||||
"time" |
|
||||
"path/filepath" |
|
||||
) |
|
||||
|
|
||||
type FileId string //file id in SeaweedFS
|
|
||||
type FullPath string |
|
||||
|
|
||||
func (fp FullPath) DirAndName() (string, string) { |
|
||||
dir, name := filepath.Split(string(fp)) |
|
||||
if dir == "/" { |
|
||||
return dir, name |
|
||||
} |
|
||||
if len(dir) < 1 { |
|
||||
return "/", "" |
|
||||
} |
|
||||
return dir[:len(dir)-1], name |
|
||||
} |
|
||||
|
|
||||
type Attr struct { |
|
||||
Mtime time.Time // time of last modification
|
|
||||
Crtime time.Time // time of creation (OS X only)
|
|
||||
Mode os.FileMode // file mode
|
|
||||
Uid uint32 // owner uid
|
|
||||
Gid uint32 // group gid
|
|
||||
} |
|
||||
|
|
||||
type Entry struct { |
|
||||
FullPath |
|
||||
|
|
||||
Attr |
|
||||
|
|
||||
// the following is for files
|
|
||||
Chunks []FileChunk `json:"chunks,omitempty"` |
|
||||
} |
|
||||
|
|
||||
type FileChunk struct { |
|
||||
Fid FileId `json:"fid,omitempty"` |
|
||||
Offset int64 `json:"offset,omitempty"` |
|
||||
Size uint64 `json:"size,omitempty"` // size in bytes
|
|
||||
} |
|
||||
|
|
||||
type AbstractFiler interface { |
|
||||
CreateEntry(*Entry) (error) |
|
||||
AppendFileChunk(FullPath, FileChunk) (err error) |
|
||||
FindEntry(FullPath) (found bool, fileEntry *Entry, err error) |
|
||||
DeleteEntry(FullPath) (fileEntry *Entry, err error) |
|
||||
|
|
||||
ListDirectoryEntries(dirPath FullPath) ([]*Entry, error) |
|
||||
UpdateEntry(*Entry) (error) |
|
||||
} |
|
||||
|
|
||||
var ErrNotFound = errors.New("filer: no entry is found in filer store") |
|
||||
|
|
||||
type FilerStore interface { |
|
||||
InsertEntry(*Entry) (error) |
|
||||
AppendFileChunk(FullPath, FileChunk) (err error) |
|
||||
FindEntry(FullPath) (found bool, entry *Entry, err error) |
|
||||
DeleteEntry(FullPath) (fileEntry *Entry, err error) |
|
||||
|
|
||||
ListDirectoryEntries(dirPath FullPath) ([]*Entry, error) |
|
||||
} |
|
@ -0,0 +1,18 @@ |
|||||
|
package filer2 |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"github.com/spf13/viper" |
||||
|
) |
||||
|
|
||||
|
type FilerStore interface { |
||||
|
GetName() string |
||||
|
Initialize(viper *viper.Viper) error |
||||
|
InsertEntry(*Entry) error |
||||
|
UpdateEntry(*Entry) (err error) |
||||
|
FindEntry(FullPath) (entry *Entry, err error) |
||||
|
DeleteEntry(FullPath) (err error) |
||||
|
ListDirectoryEntries(dirPath FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) |
||||
|
} |
||||
|
|
||||
|
var ErrNotFound = errors.New("filer: no entry is found in filer store") |
@ -0,0 +1,31 @@ |
|||||
|
package filer2 |
||||
|
|
||||
|
import ( |
||||
|
"path/filepath" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
type FullPath string |
||||
|
|
||||
|
func NewFullPath(dir, name string) FullPath { |
||||
|
if strings.HasSuffix(dir, "/") { |
||||
|
return FullPath(dir + name) |
||||
|
} |
||||
|
return FullPath(dir + "/" + name) |
||||
|
} |
||||
|
|
||||
|
func (fp FullPath) DirAndName() (string, string) { |
||||
|
dir, name := filepath.Split(string(fp)) |
||||
|
if dir == "/" { |
||||
|
return dir, name |
||||
|
} |
||||
|
if len(dir) < 1 { |
||||
|
return "/", "" |
||||
|
} |
||||
|
return dir[:len(dir)-1], name |
||||
|
} |
||||
|
|
||||
|
func (fp FullPath) Name() string { |
||||
|
_, name := filepath.Split(string(fp)) |
||||
|
return name |
||||
|
} |
@ -0,0 +1,169 @@ |
|||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"fmt" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
weed_util "github.com/chrislusf/seaweedfs/weed/util" |
||||
|
"github.com/spf13/viper" |
||||
|
"github.com/syndtr/goleveldb/leveldb" |
||||
|
leveldb_util "github.com/syndtr/goleveldb/leveldb/util" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
DIR_FILE_SEPARATOR = byte(0x00) |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
filer2.Stores = append(filer2.Stores, &LevelDBStore{}) |
||||
|
} |
||||
|
|
||||
|
type LevelDBStore struct { |
||||
|
db *leveldb.DB |
||||
|
} |
||||
|
|
||||
|
func (store *LevelDBStore) GetName() string { |
||||
|
return "leveldb" |
||||
|
} |
||||
|
|
||||
|
func (store *LevelDBStore) Initialize(viper *viper.Viper) (err error) { |
||||
|
dir := viper.GetString("dir") |
||||
|
return store.initialize(dir) |
||||
|
} |
||||
|
|
||||
|
func (store *LevelDBStore) initialize(dir string) (err error) { |
||||
|
if err := weed_util.TestFolderWritable(dir); err != nil { |
||||
|
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) |
||||
|
} |
||||
|
|
||||
|
if store.db, err = leveldb.OpenFile(dir, nil); err != nil { |
||||
|
return |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) { |
||||
|
key := genKey(entry.DirAndName()) |
||||
|
|
||||
|
value, err := entry.EncodeAttributesAndChunks() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) |
||||
|
} |
||||
|
|
||||
|
err = store.db.Put(key, value, nil) |
||||
|
|
||||
|
if err != nil { |
||||
|
return fmt.Errorf("persisting %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) { |
||||
|
|
||||
|
return store.InsertEntry(entry) |
||||
|
} |
||||
|
|
||||
|
func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { |
||||
|
key := genKey(fullpath.DirAndName()) |
||||
|
|
||||
|
data, err := store.db.Get(key, nil) |
||||
|
|
||||
|
if err == leveldb.ErrNotFound { |
||||
|
return nil, filer2.ErrNotFound |
||||
|
} |
||||
|
if err != nil { |
||||
|
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
entry = &filer2.Entry{ |
||||
|
FullPath: fullpath, |
||||
|
} |
||||
|
err = entry.DecodeAttributesAndChunks(data) |
||||
|
if err != nil { |
||||
|
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
|
||||
|
|
||||
|
return entry, nil |
||||
|
} |
||||
|
|
||||
|
func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) { |
||||
|
key := genKey(fullpath.DirAndName()) |
||||
|
|
||||
|
err = store.db.Delete(key, nil) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("delete %s : %v", fullpath, err) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, |
||||
|
limit int) (entries []*filer2.Entry, err error) { |
||||
|
|
||||
|
directoryPrefix := genDirectoryKeyPrefix(fullpath, "") |
||||
|
|
||||
|
iter := store.db.NewIterator(&leveldb_util.Range{Start: genDirectoryKeyPrefix(fullpath, startFileName)}, nil) |
||||
|
for iter.Next() { |
||||
|
key := iter.Key() |
||||
|
if !bytes.HasPrefix(key, directoryPrefix) { |
||||
|
break |
||||
|
} |
||||
|
fileName := getNameFromKey(key) |
||||
|
if fileName == "" { |
||||
|
continue |
||||
|
} |
||||
|
if fileName == startFileName && !inclusive { |
||||
|
continue |
||||
|
} |
||||
|
limit-- |
||||
|
if limit < 0 { |
||||
|
break |
||||
|
} |
||||
|
entry := &filer2.Entry{ |
||||
|
FullPath: filer2.NewFullPath(string(fullpath), fileName), |
||||
|
} |
||||
|
if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { |
||||
|
err = decodeErr |
||||
|
glog.V(0).Infof("list %s : %v", entry.FullPath, err) |
||||
|
break |
||||
|
} |
||||
|
entries = append(entries, entry) |
||||
|
} |
||||
|
iter.Release() |
||||
|
|
||||
|
return entries, err |
||||
|
} |
||||
|
|
||||
|
func genKey(dirPath, fileName string) (key []byte) { |
||||
|
key = []byte(dirPath) |
||||
|
key = append(key, DIR_FILE_SEPARATOR) |
||||
|
key = append(key, []byte(fileName)...) |
||||
|
return key |
||||
|
} |
||||
|
|
||||
|
func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { |
||||
|
keyPrefix = []byte(string(fullpath)) |
||||
|
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) |
||||
|
if len(startFileName) > 0 { |
||||
|
keyPrefix = append(keyPrefix, []byte(startFileName)...) |
||||
|
} |
||||
|
return keyPrefix |
||||
|
} |
||||
|
|
||||
|
func getNameFromKey(key []byte) string { |
||||
|
|
||||
|
sepIndex := len(key) - 1 |
||||
|
for sepIndex >= 0 && key[sepIndex] != DIR_FILE_SEPARATOR { |
||||
|
sepIndex-- |
||||
|
} |
||||
|
|
||||
|
return string(key[sepIndex+1:]) |
||||
|
|
||||
|
} |
@ -0,0 +1,61 @@ |
|||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"io/ioutil" |
||||
|
"os" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestCreateAndFind(t *testing.T) { |
||||
|
filer := filer2.NewFiler(nil) |
||||
|
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") |
||||
|
defer os.RemoveAll(dir) |
||||
|
store := &LevelDBStore{} |
||||
|
store.initialize(dir) |
||||
|
filer.SetStore(store) |
||||
|
filer.DisableDirectoryCache() |
||||
|
|
||||
|
fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") |
||||
|
|
||||
|
entry1 := &filer2.Entry{ |
||||
|
FullPath: fullpath, |
||||
|
Attr: filer2.Attr{ |
||||
|
Mode: 0440, |
||||
|
Uid: 1234, |
||||
|
Gid: 5678, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
if err := filer.CreateEntry(entry1); err != nil { |
||||
|
t.Errorf("create entry %v: %v", entry1.FullPath, err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
entry, err := filer.FindEntry(fullpath) |
||||
|
|
||||
|
if err != nil { |
||||
|
t.Errorf("find entry: %v", err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if entry.FullPath != entry1.FullPath { |
||||
|
t.Errorf("find wrong entry: %v", entry.FullPath) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// checking one upper directory
|
||||
|
entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100) |
||||
|
if len(entries) != 1 { |
||||
|
t.Errorf("list entries count: %v", len(entries)) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// checking one upper directory
|
||||
|
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) |
||||
|
if len(entries) != 1 { |
||||
|
t.Errorf("list entries count: %v", len(entries)) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
} |
@ -0,0 +1,67 @@ |
|||||
|
package mysql |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql" |
||||
|
_ "github.com/go-sql-driver/mysql" |
||||
|
"github.com/spf13/viper" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
filer2.Stores = append(filer2.Stores, &MysqlStore{}) |
||||
|
} |
||||
|
|
||||
|
type MysqlStore struct { |
||||
|
abstract_sql.AbstractSqlStore |
||||
|
} |
||||
|
|
||||
|
func (store *MysqlStore) GetName() string { |
||||
|
return "mysql" |
||||
|
} |
||||
|
|
||||
|
func (store *MysqlStore) Initialize(viper *viper.Viper) (err error) { |
||||
|
return store.initialize( |
||||
|
viper.GetString("username"), |
||||
|
viper.GetString("password"), |
||||
|
viper.GetString("hostname"), |
||||
|
viper.GetInt("port"), |
||||
|
viper.GetString("database"), |
||||
|
viper.GetInt("connection_max_idle"), |
||||
|
viper.GetInt("connection_max_open"), |
||||
|
) |
||||
|
} |
||||
|
|
||||
|
func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int) (err error) { |
||||
|
|
||||
|
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)" |
||||
|
store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?" |
||||
|
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?" |
||||
|
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?" |
||||
|
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?" |
||||
|
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?" |
||||
|
|
||||
|
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) |
||||
|
var dbErr error |
||||
|
store.DB, dbErr = sql.Open("mysql", sqlUrl) |
||||
|
if dbErr != nil { |
||||
|
store.DB.Close() |
||||
|
store.DB = nil |
||||
|
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err) |
||||
|
} |
||||
|
|
||||
|
store.DB.SetMaxIdleConns(maxIdle) |
||||
|
store.DB.SetMaxOpenConns(maxOpen) |
||||
|
|
||||
|
if err = store.DB.Ping(); err != nil { |
||||
|
return fmt.Errorf("connect to %s error:%v", sqlUrl, err) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
@ -0,0 +1,17 @@ |
|||||
|
|
||||
|
1. create "seaweedfs" database |
||||
|
|
||||
|
export PGHOME=/Library/PostgreSQL/10 |
||||
|
$PGHOME/bin/createdb --username=postgres --password seaweedfs |
||||
|
|
||||
|
2. create "filemeta" table |
||||
|
$PGHOME/bin/psql --username=postgres --password seaweedfs |
||||
|
|
||||
|
CREATE TABLE IF NOT EXISTS filemeta ( |
||||
|
dirhash BIGINT, |
||||
|
name VARCHAR(1000), |
||||
|
directory VARCHAR(4096), |
||||
|
meta bytea, |
||||
|
PRIMARY KEY (dirhash, name) |
||||
|
); |
||||
|
|
@ -0,0 +1,68 @@ |
|||||
|
package postgres |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql" |
||||
|
_ "github.com/lib/pq" |
||||
|
"github.com/spf13/viper" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
filer2.Stores = append(filer2.Stores, &PostgresStore{}) |
||||
|
} |
||||
|
|
||||
|
type PostgresStore struct { |
||||
|
abstract_sql.AbstractSqlStore |
||||
|
} |
||||
|
|
||||
|
func (store *PostgresStore) GetName() string { |
||||
|
return "postgres" |
||||
|
} |
||||
|
|
||||
|
func (store *PostgresStore) Initialize(viper *viper.Viper) (err error) { |
||||
|
return store.initialize( |
||||
|
viper.GetString("username"), |
||||
|
viper.GetString("password"), |
||||
|
viper.GetString("hostname"), |
||||
|
viper.GetInt("port"), |
||||
|
viper.GetString("database"), |
||||
|
viper.GetString("sslmode"), |
||||
|
viper.GetInt("connection_max_idle"), |
||||
|
viper.GetInt("connection_max_open"), |
||||
|
) |
||||
|
} |
||||
|
|
||||
|
func (store *PostgresStore) initialize(user, password, hostname string, port int, database, sslmode string, maxIdle, maxOpen int) (err error) { |
||||
|
|
||||
|
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)" |
||||
|
store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4" |
||||
|
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" |
||||
|
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" |
||||
|
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" |
||||
|
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" |
||||
|
|
||||
|
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode) |
||||
|
var dbErr error |
||||
|
store.DB, dbErr = sql.Open("postgres", sqlUrl) |
||||
|
if dbErr != nil { |
||||
|
store.DB.Close() |
||||
|
store.DB = nil |
||||
|
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err) |
||||
|
} |
||||
|
|
||||
|
store.DB.SetMaxIdleConns(maxIdle) |
||||
|
store.DB.SetMaxOpenConns(maxOpen) |
||||
|
|
||||
|
if err = store.DB.Ping(); err != nil { |
||||
|
return fmt.Errorf("connect to %s error:%v", sqlUrl, err) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
@ -0,0 +1,167 @@ |
|||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"github.com/go-redis/redis" |
||||
|
"github.com/spf13/viper" |
||||
|
"sort" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
DIR_LIST_MARKER = "\x00" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
filer2.Stores = append(filer2.Stores, &RedisStore{}) |
||||
|
} |
||||
|
|
||||
|
type RedisStore struct { |
||||
|
Client *redis.Client |
||||
|
} |
||||
|
|
||||
|
func (store *RedisStore) GetName() string { |
||||
|
return "redis" |
||||
|
} |
||||
|
|
||||
|
func (store *RedisStore) Initialize(viper *viper.Viper) (err error) { |
||||
|
return store.initialize( |
||||
|
viper.GetString("address"), |
||||
|
viper.GetString("password"), |
||||
|
viper.GetInt("database"), |
||||
|
) |
||||
|
} |
||||
|
|
||||
|
func (store *RedisStore) initialize(hostPort string, password string, database int) (err error) { |
||||
|
store.Client = redis.NewClient(&redis.Options{ |
||||
|
Addr: hostPort, |
||||
|
Password: password, |
||||
|
DB: database, |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (store *RedisStore) InsertEntry(entry *filer2.Entry) (err error) { |
||||
|
|
||||
|
value, err := entry.EncodeAttributesAndChunks() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) |
||||
|
} |
||||
|
|
||||
|
_, err = store.Client.Set(string(entry.FullPath), value, 0).Result() |
||||
|
|
||||
|
if err != nil { |
||||
|
return fmt.Errorf("persisting %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
dir, name := entry.FullPath.DirAndName() |
||||
|
if name != "" { |
||||
|
_, err = store.Client.SAdd(genDirectoryListKey(dir), name).Result() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *RedisStore) UpdateEntry(entry *filer2.Entry) (err error) { |
||||
|
|
||||
|
return store.InsertEntry(entry) |
||||
|
} |
||||
|
|
||||
|
func (store *RedisStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { |
||||
|
|
||||
|
data, err := store.Client.Get(string(fullpath)).Result() |
||||
|
if err == redis.Nil { |
||||
|
return nil, filer2.ErrNotFound |
||||
|
} |
||||
|
|
||||
|
if err != nil { |
||||
|
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
entry = &filer2.Entry{ |
||||
|
FullPath: fullpath, |
||||
|
} |
||||
|
err = entry.DecodeAttributesAndChunks([]byte(data)) |
||||
|
if err != nil { |
||||
|
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) |
||||
|
} |
||||
|
|
||||
|
return entry, nil |
||||
|
} |
||||
|
|
||||
|
func (store *RedisStore) DeleteEntry(fullpath filer2.FullPath) (err error) { |
||||
|
|
||||
|
_, err = store.Client.Del(string(fullpath)).Result() |
||||
|
|
||||
|
if err != nil { |
||||
|
return fmt.Errorf("delete %s : %v", fullpath, err) |
||||
|
} |
||||
|
|
||||
|
dir, name := fullpath.DirAndName() |
||||
|
if name != "" { |
||||
|
_, err = store.Client.SRem(genDirectoryListKey(dir), name).Result() |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (store *RedisStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, |
||||
|
limit int) (entries []*filer2.Entry, err error) { |
||||
|
|
||||
|
members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() |
||||
|
if err != nil { |
||||
|
return nil, fmt.Errorf("list %s : %v", fullpath, err) |
||||
|
} |
||||
|
|
||||
|
// skip
|
||||
|
if startFileName != "" { |
||||
|
var t []string |
||||
|
for _, m := range members { |
||||
|
if strings.Compare(m, startFileName) >= 0 { |
||||
|
if m == startFileName { |
||||
|
if inclusive { |
||||
|
t = append(t, m) |
||||
|
} |
||||
|
} else { |
||||
|
t = append(t, m) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
members = t |
||||
|
} |
||||
|
|
||||
|
// sort
|
||||
|
sort.Slice(members, func(i, j int) bool { |
||||
|
return strings.Compare(members[i], members[j]) < 0 |
||||
|
}) |
||||
|
|
||||
|
// limit
|
||||
|
if limit < len(members) { |
||||
|
members = members[:limit] |
||||
|
} |
||||
|
|
||||
|
// fetch entry meta
|
||||
|
for _, fileName := range members { |
||||
|
path := filer2.NewFullPath(string(fullpath), fileName) |
||||
|
entry, err := store.FindEntry(path) |
||||
|
if err != nil { |
||||
|
glog.V(0).Infof("list %s : %v", path, err) |
||||
|
} else { |
||||
|
entries = append(entries, entry) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return entries, err |
||||
|
} |
||||
|
|
||||
|
func genDirectoryListKey(dir string) (dirList string) { |
||||
|
return dir + DIR_LIST_MARKER |
||||
|
} |
@ -0,0 +1,165 @@ |
|||||
|
package filesys |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"bytes" |
||||
|
"time" |
||||
|
"context" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
"github.com/chrislusf/seaweedfs/weed/operation" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
) |
||||
|
|
||||
|
type ContinuousDirtyPages struct { |
||||
|
hasData bool |
||||
|
Offset int64 |
||||
|
Size int64 |
||||
|
Data []byte |
||||
|
f *File |
||||
|
} |
||||
|
|
||||
|
func newDirtyPages(file *File) *ContinuousDirtyPages { |
||||
|
return &ContinuousDirtyPages{ |
||||
|
Data: make([]byte, file.wfs.chunkSizeLimit), |
||||
|
f: file, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { |
||||
|
|
||||
|
var chunk *filer_pb.FileChunk |
||||
|
|
||||
|
if len(data) > len(pages.Data) { |
||||
|
// this is more than what buffer can hold.
|
||||
|
|
||||
|
// flush existing
|
||||
|
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { |
||||
|
if chunk != nil { |
||||
|
glog.V(4).Infof("%s/%s flush existing [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) |
||||
|
} |
||||
|
chunks = append(chunks, chunk) |
||||
|
} else { |
||||
|
glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) |
||||
|
return |
||||
|
} |
||||
|
pages.Size = 0 |
||||
|
|
||||
|
// flush the big page
|
||||
|
if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { |
||||
|
if chunk != nil { |
||||
|
glog.V(4).Infof("%s/%s flush big request [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) |
||||
|
chunks = append(chunks, chunk) |
||||
|
} |
||||
|
} else { |
||||
|
glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) || |
||||
|
pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) { |
||||
|
// if the data is out of range,
|
||||
|
// or buffer is full if adding new data,
|
||||
|
// flush current buffer and add new data
|
||||
|
|
||||
|
// println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size)
|
||||
|
|
||||
|
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { |
||||
|
if chunk != nil { |
||||
|
glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) |
||||
|
chunks = append(chunks, chunk) |
||||
|
} |
||||
|
} else { |
||||
|
glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) |
||||
|
return |
||||
|
} |
||||
|
pages.Offset = offset |
||||
|
pages.Size = int64(len(data)) |
||||
|
copy(pages.Data, data) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
copy(pages.Data[offset-pages.Offset:], data) |
||||
|
pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset) |
||||
|
|
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) { |
||||
|
|
||||
|
if pages.Size == 0 { |
||||
|
return nil, nil |
||||
|
} |
||||
|
|
||||
|
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { |
||||
|
pages.Size = 0 |
||||
|
if chunk != nil { |
||||
|
glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) |
||||
|
} |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) { |
||||
|
return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset) |
||||
|
} |
||||
|
|
||||
|
func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { |
||||
|
|
||||
|
if pages.Size == 0 { |
||||
|
return nil, nil |
||||
|
} |
||||
|
|
||||
|
var fileId, host string |
||||
|
|
||||
|
if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
request := &filer_pb.AssignVolumeRequest{ |
||||
|
Count: 1, |
||||
|
Replication: pages.f.wfs.replication, |
||||
|
Collection: pages.f.wfs.collection, |
||||
|
} |
||||
|
|
||||
|
resp, err := client.AssignVolume(ctx, request) |
||||
|
if err != nil { |
||||
|
glog.V(0).Infof("assign volume failure %v: %v", request, err) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
fileId, host = resp.FileId, resp.Url |
||||
|
|
||||
|
return nil |
||||
|
}); err != nil { |
||||
|
return nil, fmt.Errorf("filer assign volume: %v", err) |
||||
|
} |
||||
|
|
||||
|
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) |
||||
|
bufReader := bytes.NewReader(pages.Data[:pages.Size]) |
||||
|
uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, "") |
||||
|
if err != nil { |
||||
|
glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) |
||||
|
return nil, fmt.Errorf("upload data: %v", err) |
||||
|
} |
||||
|
if uploadResult.Error != "" { |
||||
|
glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err) |
||||
|
return nil, fmt.Errorf("upload result: %v", uploadResult.Error) |
||||
|
} |
||||
|
|
||||
|
return &filer_pb.FileChunk{ |
||||
|
FileId: fileId, |
||||
|
Offset: offset, |
||||
|
Size: uint64(len(buf)), |
||||
|
Mtime: time.Now().UnixNano(), |
||||
|
}, nil |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func max(x, y int64) int64 { |
||||
|
if x > y { |
||||
|
return x |
||||
|
} |
||||
|
return y |
||||
|
} |
@ -1,75 +1,136 @@ |
|||||
package filesys |
package filesys |
||||
|
|
||||
import ( |
import ( |
||||
"context" |
|
||||
"fmt" |
|
||||
|
|
||||
"bazil.org/fuse" |
"bazil.org/fuse" |
||||
"github.com/chrislusf/seaweedfs/weed/filer" |
|
||||
"bazil.org/fuse/fs" |
"bazil.org/fuse/fs" |
||||
|
"context" |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
"github.com/chrislusf/seaweedfs/weed/glog" |
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
"os" |
||||
|
"path/filepath" |
||||
|
"time" |
||||
) |
) |
||||
|
|
||||
var _ = fs.Node(&File{}) |
var _ = fs.Node(&File{}) |
||||
// var _ = fs.NodeOpener(&File{})
|
|
||||
// var _ = fs.NodeFsyncer(&File{})
|
|
||||
var _ = fs.Handle(&File{}) |
|
||||
var _ = fs.HandleReadAller(&File{}) |
|
||||
// var _ = fs.HandleReader(&File{})
|
|
||||
var _ = fs.HandleWriter(&File{}) |
|
||||
|
var _ = fs.NodeOpener(&File{}) |
||||
|
var _ = fs.NodeFsyncer(&File{}) |
||||
|
var _ = fs.NodeSetattrer(&File{}) |
||||
|
|
||||
type File struct { |
type File struct { |
||||
FileId filer.FileId |
|
||||
|
Chunks []*filer_pb.FileChunk |
||||
Name string |
Name string |
||||
|
dir *Dir |
||||
wfs *WFS |
wfs *WFS |
||||
|
attributes *filer_pb.FuseAttributes |
||||
|
isOpen bool |
||||
} |
} |
||||
|
|
||||
func (file *File) Attr(context context.Context, attr *fuse.Attr) error { |
func (file *File) Attr(context context.Context, attr *fuse.Attr) error { |
||||
attr.Mode = 0444 |
|
||||
return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
|
||||
|
|
||||
request := &filer_pb.GetFileAttributesRequest{ |
|
||||
|
fullPath := filepath.Join(file.dir.Path, file.Name) |
||||
|
|
||||
|
if file.attributes == nil || !file.isOpen { |
||||
|
item := file.wfs.listDirectoryEntriesCache.Get(fullPath) |
||||
|
if item != nil { |
||||
|
entry := item.Value().(*filer_pb.Entry) |
||||
|
file.Chunks = entry.Chunks |
||||
|
file.attributes = entry.Attributes |
||||
|
glog.V(1).Infof("file attr read cached %v attributes", file.Name) |
||||
|
} else { |
||||
|
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
request := &filer_pb.GetEntryAttributesRequest{ |
||||
Name: file.Name, |
Name: file.Name, |
||||
ParentDir: "", //TODO add parent folder
|
|
||||
FileId: string(file.FileId), |
|
||||
|
ParentDir: file.dir.Path, |
||||
} |
} |
||||
|
|
||||
glog.V(1).Infof("read file size: %v", request) |
|
||||
resp, err := client.GetFileAttributes(context, request) |
|
||||
|
resp, err := client.GetEntryAttributes(context, request) |
||||
if err != nil { |
if err != nil { |
||||
|
glog.V(0).Infof("file attr read file %v: %v", request, err) |
||||
return err |
return err |
||||
} |
} |
||||
|
|
||||
attr.Size = resp.Attributes.FileSize |
|
||||
|
file.attributes = resp.Attributes |
||||
|
file.Chunks = resp.Chunks |
||||
|
|
||||
|
glog.V(1).Infof("file attr %v %+v: %d", fullPath, file.attributes, filer2.TotalSize(file.Chunks)) |
||||
|
|
||||
return nil |
return nil |
||||
}) |
}) |
||||
|
|
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
attr.Mode = os.FileMode(file.attributes.FileMode) |
||||
|
attr.Size = filer2.TotalSize(file.Chunks) |
||||
|
attr.Mtime = time.Unix(file.attributes.Mtime, 0) |
||||
|
attr.Gid = file.attributes.Gid |
||||
|
attr.Uid = file.attributes.Uid |
||||
|
|
||||
|
return nil |
||||
|
|
||||
} |
} |
||||
|
|
||||
func (file *File) ReadAll(ctx context.Context) (content []byte, err error) { |
|
||||
|
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { |
||||
|
|
||||
err = file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
|
||||
|
fullPath := filepath.Join(file.dir.Path, file.Name) |
||||
|
|
||||
request := &filer_pb.GetFileContentRequest{ |
|
||||
FileId: string(file.FileId), |
|
||||
|
glog.V(3).Infof("%v file open %+v", fullPath, req) |
||||
|
|
||||
|
file.isOpen = true |
||||
|
|
||||
|
return &FileHandle{ |
||||
|
f: file, |
||||
|
dirtyPages: newDirtyPages(file), |
||||
|
RequestId: req.Header.ID, |
||||
|
NodeId: req.Header.Node, |
||||
|
Uid: req.Uid, |
||||
|
Gid: req.Gid, |
||||
|
}, nil |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { |
||||
|
fullPath := filepath.Join(file.dir.Path, file.Name) |
||||
|
|
||||
|
glog.V(3).Infof("%v file setattr %+v", fullPath, req) |
||||
|
if req.Valid.Size() { |
||||
|
|
||||
|
glog.V(3).Infof("%v file setattr set size=%v", fullPath, req.Size) |
||||
|
if req.Size == 0 { |
||||
|
// fmt.Printf("truncate %v \n", fullPath)
|
||||
|
file.Chunks = nil |
||||
|
} |
||||
|
file.attributes.FileSize = req.Size |
||||
|
} |
||||
|
if req.Valid.Mode() { |
||||
|
file.attributes.FileMode = uint32(req.Mode) |
||||
} |
} |
||||
|
|
||||
glog.V(1).Infof("read file content: %v", request) |
|
||||
resp, err := client.GetFileContent(ctx, request) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
|
if req.Valid.Uid() { |
||||
|
file.attributes.Uid = req.Uid |
||||
|
} |
||||
|
|
||||
|
if req.Valid.Gid() { |
||||
|
file.attributes.Gid = req.Gid |
||||
} |
} |
||||
|
|
||||
content = resp.Content |
|
||||
|
if req.Valid.Mtime() { |
||||
|
file.attributes.Mtime = req.Mtime.Unix() |
||||
|
} |
||||
|
|
||||
return nil |
return nil |
||||
}) |
|
||||
|
|
||||
return content, err |
|
||||
} |
} |
||||
|
|
||||
func (file *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { |
|
||||
fmt.Printf("write file %+v\n", req) |
|
||||
|
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { |
||||
|
// fsync works at OS level
|
||||
|
// write the file chunks to the filer
|
||||
|
glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req) |
||||
|
|
||||
return nil |
return nil |
||||
} |
} |
@ -0,0 +1,219 @@ |
|||||
|
package filesys |
||||
|
|
||||
|
import ( |
||||
|
"bazil.org/fuse" |
||||
|
"bazil.org/fuse/fs" |
||||
|
"context" |
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
"github.com/chrislusf/seaweedfs/weed/util" |
||||
|
"strings" |
||||
|
"sync" |
||||
|
"net/http" |
||||
|
) |
||||
|
|
||||
|
type FileHandle struct { |
||||
|
// cache file has been written to
|
||||
|
dirtyPages *ContinuousDirtyPages |
||||
|
dirtyMetadata bool |
||||
|
|
||||
|
f *File |
||||
|
RequestId fuse.RequestID // unique ID for request
|
||||
|
NodeId fuse.NodeID // file or directory the request is about
|
||||
|
Uid uint32 // user ID of process making request
|
||||
|
Gid uint32 // group ID of process making request
|
||||
|
} |
||||
|
|
||||
|
var _ = fs.Handle(&FileHandle{}) |
||||
|
|
||||
|
// var _ = fs.HandleReadAller(&FileHandle{})
|
||||
|
var _ = fs.HandleReader(&FileHandle{}) |
||||
|
var _ = fs.HandleFlusher(&FileHandle{}) |
||||
|
var _ = fs.HandleWriter(&FileHandle{}) |
||||
|
var _ = fs.HandleReleaser(&FileHandle{}) |
||||
|
|
||||
|
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { |
||||
|
|
||||
|
glog.V(4).Infof("%v/%v read fh: [%d,%d)", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(req.Size)) |
||||
|
|
||||
|
if len(fh.f.Chunks) == 0 { |
||||
|
glog.V(0).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name) |
||||
|
return fmt.Errorf("empty file %v/%v", fh.f.dir.Path, fh.f.Name) |
||||
|
} |
||||
|
|
||||
|
buff := make([]byte, req.Size) |
||||
|
|
||||
|
chunkViews := filer2.ViewFromChunks(fh.f.Chunks, req.Offset, req.Size) |
||||
|
|
||||
|
var vids []string |
||||
|
for _, chunkView := range chunkViews { |
||||
|
vids = append(vids, volumeId(chunkView.FileId)) |
||||
|
} |
||||
|
|
||||
|
vid2Locations := make(map[string]*filer_pb.Locations) |
||||
|
|
||||
|
err := fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
glog.V(4).Infof("read fh lookup volume id locations: %v", vids) |
||||
|
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ |
||||
|
VolumeIds: vids, |
||||
|
}) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
vid2Locations = resp.LocationsMap |
||||
|
|
||||
|
return nil |
||||
|
}) |
||||
|
|
||||
|
if err != nil { |
||||
|
glog.V(4).Infof("%v/%v read fh lookup volume ids: %v", fh.f.dir.Path, fh.f.Name, err) |
||||
|
return fmt.Errorf("failed to lookup volume ids %v: %v", vids, err) |
||||
|
} |
||||
|
|
||||
|
var totalRead int64 |
||||
|
var wg sync.WaitGroup |
||||
|
for _, chunkView := range chunkViews { |
||||
|
wg.Add(1) |
||||
|
go func(chunkView *filer2.ChunkView) { |
||||
|
defer wg.Done() |
||||
|
|
||||
|
glog.V(4).Infof("read fh reading chunk: %+v", chunkView) |
||||
|
|
||||
|
locations := vid2Locations[volumeId(chunkView.FileId)] |
||||
|
if locations == nil || len(locations.Locations) == 0 { |
||||
|
glog.V(0).Infof("failed to locate %s", chunkView.FileId) |
||||
|
err = fmt.Errorf("failed to locate %s", chunkView.FileId) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
var n int64 |
||||
|
n, err = util.ReadUrl( |
||||
|
fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId), |
||||
|
chunkView.Offset, |
||||
|
int(chunkView.Size), |
||||
|
buff[chunkView.LogicOffset-req.Offset:chunkView.LogicOffset-req.Offset+int64(chunkView.Size)]) |
||||
|
|
||||
|
if err != nil { |
||||
|
|
||||
|
glog.V(0).Infof("%v/%v read http://%s/%v %v bytes: %v", fh.f.dir.Path, fh.f.Name, locations.Locations[0].Url, chunkView.FileId, n, err) |
||||
|
|
||||
|
err = fmt.Errorf("failed to read http://%s/%s: %v", |
||||
|
locations.Locations[0].Url, chunkView.FileId, err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView) |
||||
|
totalRead += n |
||||
|
|
||||
|
}(chunkView) |
||||
|
} |
||||
|
wg.Wait() |
||||
|
|
||||
|
resp.Data = buff[:totalRead] |
||||
|
|
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// Write to the file handle
|
||||
|
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { |
||||
|
|
||||
|
// write the request to volume servers
|
||||
|
|
||||
|
glog.V(4).Infof("%+v/%v write fh: [%d,%d)", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data))) |
||||
|
|
||||
|
chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err) |
||||
|
} |
||||
|
|
||||
|
resp.Size = len(req.Data) |
||||
|
|
||||
|
if req.Offset == 0 { |
||||
|
fh.f.attributes.Mime = http.DetectContentType(req.Data) |
||||
|
fh.dirtyMetadata = true |
||||
|
} |
||||
|
|
||||
|
for _, chunk := range chunks { |
||||
|
fh.f.Chunks = append(fh.f.Chunks, chunk) |
||||
|
glog.V(1).Infof("uploaded %s/%s to %s [%d,%d)", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) |
||||
|
fh.dirtyMetadata = true |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error { |
||||
|
|
||||
|
glog.V(4).Infof("%+v/%v release fh", fh.f.dir.Path, fh.f.Name) |
||||
|
|
||||
|
fh.f.isOpen = false |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Flush - experimenting with uploading at flush, this slows operations down till it has been
|
||||
|
// completely flushed
|
||||
|
func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { |
||||
|
// fflush works at fh level
|
||||
|
// send the data to the OS
|
||||
|
glog.V(4).Infof("%s/%s fh flush %v", fh.f.dir.Path, fh.f.Name, req) |
||||
|
|
||||
|
chunk, err := fh.dirtyPages.FlushToStorage(ctx) |
||||
|
if err != nil { |
||||
|
glog.V(0).Infof("flush %s/%s to %s [%d,%d): %v", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size), err) |
||||
|
return fmt.Errorf("flush %s/%s to %s [%d,%d): %v", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size), err) |
||||
|
} |
||||
|
if chunk != nil { |
||||
|
fh.f.Chunks = append(fh.f.Chunks, chunk) |
||||
|
fh.dirtyMetadata = true |
||||
|
} |
||||
|
|
||||
|
if !fh.dirtyMetadata { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
if len(fh.f.Chunks) == 0 { |
||||
|
glog.V(2).Infof("fh %s/%s flush skipping empty: %v", fh.f.dir.Path, fh.f.Name, req) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
err = fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
request := &filer_pb.UpdateEntryRequest{ |
||||
|
Directory: fh.f.dir.Path, |
||||
|
Entry: &filer_pb.Entry{ |
||||
|
Name: fh.f.Name, |
||||
|
Attributes: fh.f.attributes, |
||||
|
Chunks: fh.f.Chunks, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
glog.V(1).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.Chunks)) |
||||
|
for i, chunk := range fh.f.Chunks { |
||||
|
glog.V(1).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) |
||||
|
} |
||||
|
if _, err := client.UpdateEntry(ctx, request); err != nil { |
||||
|
return fmt.Errorf("update fh: %v", err) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
}) |
||||
|
|
||||
|
if err == nil { |
||||
|
fh.dirtyMetadata = false |
||||
|
} |
||||
|
|
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func volumeId(fileId string) string { |
||||
|
lastCommaIndex := strings.LastIndex(fileId, ",") |
||||
|
if lastCommaIndex > 0 { |
||||
|
return fileId[:lastCommaIndex] |
||||
|
} |
||||
|
return fileId |
||||
|
} |
@ -1,31 +0,0 @@ |
|||||
package filer |
|
||||
|
|
||||
import ( |
|
||||
"fmt" |
|
||||
"net/url" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/security" |
|
||||
"github.com/chrislusf/seaweedfs/weed/util" |
|
||||
) |
|
||||
|
|
||||
type SubmitResult struct { |
|
||||
FileName string `json:"fileName,omitempty"` |
|
||||
FileUrl string `json:"fileUrl,omitempty"` |
|
||||
Fid string `json:"fid,omitempty"` |
|
||||
Size uint32 `json:"size,omitempty"` |
|
||||
Error string `json:"error,omitempty"` |
|
||||
} |
|
||||
|
|
||||
func RegisterFile(filer string, path string, fileId string, secret security.Secret) error { |
|
||||
// TODO: jwt need to be used
|
|
||||
_ = security.GenJwt(secret, fileId) |
|
||||
|
|
||||
values := make(url.Values) |
|
||||
values.Add("path", path) |
|
||||
values.Add("fileId", fileId) |
|
||||
_, err := util.Post("http://"+filer+"/admin/register", values) |
|
||||
if err != nil { |
|
||||
return fmt.Errorf("Failed to register path %s on filer %s to file id %s : %v", path, filer, fileId, err) |
|
||||
} |
|
||||
return nil |
|
||||
} |
|
@ -1,41 +0,0 @@ |
|||||
package weed_server |
|
||||
|
|
||||
import ( |
|
||||
"net/http" |
|
||||
|
|
||||
"github.com/chrislusf/seaweedfs/weed/glog" |
|
||||
) |
|
||||
|
|
||||
/* |
|
||||
Move a folder or a file, with 4 Use cases: |
|
||||
mv fromDir toNewDir |
|
||||
mv fromDir toOldDir |
|
||||
mv fromFile toDir |
|
||||
mv fromFile toFile |
|
||||
|
|
||||
Wildcard is not supported. |
|
||||
|
|
||||
*/ |
|
||||
func (fs *FilerServer) moveHandler(w http.ResponseWriter, r *http.Request) { |
|
||||
from := r.FormValue("from") |
|
||||
to := r.FormValue("to") |
|
||||
err := fs.filer.Move(from, to) |
|
||||
if err != nil { |
|
||||
glog.V(4).Infoln("moving", from, "->", to, err.Error()) |
|
||||
writeJsonError(w, r, http.StatusInternalServerError, err) |
|
||||
} else { |
|
||||
w.WriteHeader(http.StatusOK) |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) { |
|
||||
path := r.FormValue("path") |
|
||||
fileId := r.FormValue("fileId") |
|
||||
err := fs.filer.CreateFile(path, fileId) |
|
||||
if err != nil { |
|
||||
glog.V(4).Infof("register %s to %s error: %v", fileId, path, err) |
|
||||
writeJsonError(w, r, http.StatusInternalServerError, err) |
|
||||
} else { |
|
||||
w.WriteHeader(http.StatusOK) |
|
||||
} |
|
||||
} |
|
@ -0,0 +1,70 @@ |
|||||
|
package weed_server |
||||
|
|
||||
|
import ( |
||||
|
"net/http" |
||||
|
"strconv" |
||||
|
"strings" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui" |
||||
|
) |
||||
|
|
||||
|
// listDirectoryHandler lists directories and folers under a directory
|
||||
|
// files are sorted by name and paginated via "lastFileName" and "limit".
|
||||
|
// sub directories are listed on the first page, when "lastFileName"
|
||||
|
// is empty.
|
||||
|
func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) { |
||||
|
path := r.URL.Path |
||||
|
if strings.HasSuffix(path, "/") && len(path) > 1 { |
||||
|
path = path[:len(path)-1] |
||||
|
} |
||||
|
|
||||
|
limit, limit_err := strconv.Atoi(r.FormValue("limit")) |
||||
|
if limit_err != nil { |
||||
|
limit = 100 |
||||
|
} |
||||
|
|
||||
|
lastFileName := r.FormValue("lastFileName") |
||||
|
|
||||
|
entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(path), lastFileName, false, limit) |
||||
|
|
||||
|
if err != nil { |
||||
|
glog.V(0).Infof("listDirectory %s %s $d: %s", path, lastFileName, limit, err) |
||||
|
w.WriteHeader(http.StatusNotFound) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
shouldDisplayLoadMore := len(entries) == limit |
||||
|
if path == "/" { |
||||
|
path = "" |
||||
|
} |
||||
|
|
||||
|
if len(entries) > 0 { |
||||
|
lastFileName = entries[len(entries)-1].Name() |
||||
|
} |
||||
|
|
||||
|
glog.V(4).Infof("listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries)) |
||||
|
|
||||
|
args := struct { |
||||
|
Path string |
||||
|
Breadcrumbs []ui.Breadcrumb |
||||
|
Entries interface{} |
||||
|
Limit int |
||||
|
LastFileName string |
||||
|
ShouldDisplayLoadMore bool |
||||
|
}{ |
||||
|
path, |
||||
|
ui.ToBreadcrumb(path), |
||||
|
entries, |
||||
|
limit, |
||||
|
lastFileName, |
||||
|
shouldDisplayLoadMore, |
||||
|
} |
||||
|
|
||||
|
if r.Header.Get("Accept") == "application/json" { |
||||
|
writeJsonQuiet(w, r, http.StatusOK, args) |
||||
|
} else { |
||||
|
ui.StatusTpl.Execute(w, args) |
||||
|
} |
||||
|
} |
@ -0,0 +1,189 @@ |
|||||
|
package weed_server |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"io" |
||||
|
"io/ioutil" |
||||
|
"net/http" |
||||
|
"path" |
||||
|
"strconv" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/filer2" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"github.com/chrislusf/seaweedfs/weed/operation" |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
) |
||||
|
|
||||
|
func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string) bool { |
||||
|
if r.Method != "POST" { |
||||
|
glog.V(4).Infoln("AutoChunking not supported for method", r.Method) |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
// autoChunking can be set at the command-line level or as a query param. Query param overrides command-line
|
||||
|
query := r.URL.Query() |
||||
|
|
||||
|
parsedMaxMB, _ := strconv.ParseInt(query.Get("maxMB"), 10, 32) |
||||
|
maxMB := int32(parsedMaxMB) |
||||
|
if maxMB <= 0 && fs.maxMB > 0 { |
||||
|
maxMB = int32(fs.maxMB) |
||||
|
} |
||||
|
if maxMB <= 0 { |
||||
|
glog.V(4).Infoln("AutoChunking not enabled") |
||||
|
return false |
||||
|
} |
||||
|
glog.V(4).Infoln("AutoChunking level set to", maxMB, "(MB)") |
||||
|
|
||||
|
chunkSize := 1024 * 1024 * maxMB |
||||
|
|
||||
|
contentLength := int64(0) |
||||
|
if contentLengthHeader := r.Header["Content-Length"]; len(contentLengthHeader) == 1 { |
||||
|
contentLength, _ = strconv.ParseInt(contentLengthHeader[0], 10, 64) |
||||
|
if contentLength <= int64(chunkSize) { |
||||
|
glog.V(4).Infoln("Content-Length of", contentLength, "is less than the chunk size of", chunkSize, "so autoChunking will be skipped.") |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if contentLength <= 0 { |
||||
|
glog.V(4).Infoln("Content-Length value is missing or unexpected so autoChunking will be skipped.") |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection) |
||||
|
if err != nil { |
||||
|
writeJsonError(w, r, http.StatusInternalServerError, err) |
||||
|
} else if reply != nil { |
||||
|
writeJsonQuiet(w, r, http.StatusCreated, reply) |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string) (filerResult *FilerPostResult, replyerr error) { |
||||
|
|
||||
|
multipartReader, multipartReaderErr := r.MultipartReader() |
||||
|
if multipartReaderErr != nil { |
||||
|
return nil, multipartReaderErr |
||||
|
} |
||||
|
|
||||
|
part1, part1Err := multipartReader.NextPart() |
||||
|
if part1Err != nil { |
||||
|
return nil, part1Err |
||||
|
} |
||||
|
|
||||
|
fileName := part1.FileName() |
||||
|
if fileName != "" { |
||||
|
fileName = path.Base(fileName) |
||||
|
} |
||||
|
|
||||
|
var fileChunks []*filer_pb.FileChunk |
||||
|
|
||||
|
totalBytesRead := int64(0) |
||||
|
tmpBufferSize := int32(1024 * 1024) |
||||
|
tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize)) |
||||
|
chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow
|
||||
|
chunkBufOffset := int32(0) |
||||
|
chunkOffset := int64(0) |
||||
|
writtenChunks := 0 |
||||
|
|
||||
|
filerResult = &FilerPostResult{ |
||||
|
Name: fileName, |
||||
|
} |
||||
|
|
||||
|
for totalBytesRead < contentLength { |
||||
|
tmpBuffer.Reset() |
||||
|
bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize)) |
||||
|
readFully := readErr != nil && readErr == io.EOF |
||||
|
tmpBuf := tmpBuffer.Bytes() |
||||
|
bytesToCopy := tmpBuf[0:int(bytesRead)] |
||||
|
|
||||
|
copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy) |
||||
|
chunkBufOffset = chunkBufOffset + int32(bytesRead) |
||||
|
|
||||
|
if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { |
||||
|
writtenChunks = writtenChunks + 1 |
||||
|
fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection) |
||||
|
if assignErr != nil { |
||||
|
return nil, assignErr |
||||
|
} |
||||
|
|
||||
|
// upload the chunk to the volume server
|
||||
|
chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) |
||||
|
uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId) |
||||
|
if uploadErr != nil { |
||||
|
return nil, uploadErr |
||||
|
} |
||||
|
|
||||
|
// Save to chunk manifest structure
|
||||
|
fileChunks = append(fileChunks, |
||||
|
&filer_pb.FileChunk{ |
||||
|
FileId: fileId, |
||||
|
Offset: chunkOffset, |
||||
|
Size: uint64(chunkBufOffset), |
||||
|
Mtime: time.Now().UnixNano(), |
||||
|
}, |
||||
|
) |
||||
|
|
||||
|
// reset variables for the next chunk
|
||||
|
chunkBufOffset = 0 |
||||
|
chunkOffset = totalBytesRead + int64(bytesRead) |
||||
|
} |
||||
|
|
||||
|
totalBytesRead = totalBytesRead + int64(bytesRead) |
||||
|
|
||||
|
if bytesRead == 0 || readFully { |
||||
|
break |
||||
|
} |
||||
|
|
||||
|
if readErr != nil { |
||||
|
return nil, readErr |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
path := r.URL.Path |
||||
|
// also delete the old fid unless PUT operation
|
||||
|
if r.Method != "PUT" { |
||||
|
if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil { |
||||
|
for _, chunk := range entry.Chunks { |
||||
|
oldFid := chunk.FileId |
||||
|
operation.DeleteFile(fs.filer.GetMaster(), oldFid, fs.jwt(oldFid)) |
||||
|
} |
||||
|
} else if err != nil { |
||||
|
glog.V(0).Infof("error %v occur when finding %s in filer store", err, path) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
glog.V(4).Infoln("saving", path) |
||||
|
entry := &filer2.Entry{ |
||||
|
FullPath: filer2.FullPath(path), |
||||
|
Attr: filer2.Attr{ |
||||
|
Mtime: time.Now(), |
||||
|
Crtime: time.Now(), |
||||
|
Mode: 0660, |
||||
|
}, |
||||
|
Chunks: fileChunks, |
||||
|
} |
||||
|
if db_err := fs.filer.CreateEntry(entry); db_err != nil { |
||||
|
replyerr = db_err |
||||
|
filerResult.Error = db_err.Error() |
||||
|
glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string) (err error) { |
||||
|
err = nil |
||||
|
|
||||
|
ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) |
||||
|
uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, fs.jwt(fileId)) |
||||
|
if uploadResult != nil { |
||||
|
glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) |
||||
|
} |
||||
|
if uploadError != nil { |
||||
|
err = uploadError |
||||
|
} |
||||
|
return |
||||
|
} |
@ -0,0 +1,139 @@ |
|||||
|
package weed_server |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"crypto/md5" |
||||
|
"encoding/base64" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"io/ioutil" |
||||
|
"mime/multipart" |
||||
|
"net/http" |
||||
|
"net/textproto" |
||||
|
"strings" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
) |
||||
|
|
||||
|
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") |
||||
|
|
||||
|
func escapeQuotes(s string) string { |
||||
|
return quoteEscaper.Replace(s) |
||||
|
} |
||||
|
|
||||
|
func createFormFile(writer *multipart.Writer, fieldname, filename, mime string) (io.Writer, error) { |
||||
|
h := make(textproto.MIMEHeader) |
||||
|
h.Set("Content-Disposition", |
||||
|
fmt.Sprintf(`form-data; name="%s"; filename="%s"`, |
||||
|
escapeQuotes(fieldname), escapeQuotes(filename))) |
||||
|
if len(mime) == 0 { |
||||
|
mime = "application/octet-stream" |
||||
|
} |
||||
|
h.Set("Content-Type", mime) |
||||
|
return writer.CreatePart(h) |
||||
|
} |
||||
|
|
||||
|
func makeFormData(filename, mimeType string, content io.Reader) (formData io.Reader, contentType string, err error) { |
||||
|
buf := new(bytes.Buffer) |
||||
|
writer := multipart.NewWriter(buf) |
||||
|
defer writer.Close() |
||||
|
|
||||
|
part, err := createFormFile(writer, "file", filename, mimeType) |
||||
|
if err != nil { |
||||
|
glog.V(0).Infoln(err) |
||||
|
return |
||||
|
} |
||||
|
_, err = io.Copy(part, content) |
||||
|
if err != nil { |
||||
|
glog.V(0).Infoln(err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
formData = buf |
||||
|
contentType = writer.FormDataContentType() |
||||
|
|
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func checkContentMD5(w http.ResponseWriter, r *http.Request) (err error) { |
||||
|
if contentMD5 := r.Header.Get("Content-MD5"); contentMD5 != "" { |
||||
|
buf, _ := ioutil.ReadAll(r.Body) |
||||
|
//checkMD5
|
||||
|
sum := md5.Sum(buf) |
||||
|
fileDataMD5 := base64.StdEncoding.EncodeToString(sum[0:len(sum)]) |
||||
|
if strings.ToLower(fileDataMD5) != strings.ToLower(contentMD5) { |
||||
|
glog.V(0).Infof("fileDataMD5 [%s] is not equal to Content-MD5 [%s]", fileDataMD5, contentMD5) |
||||
|
err = fmt.Errorf("MD5 check failed") |
||||
|
writeJsonError(w, r, http.StatusNotAcceptable, err) |
||||
|
return |
||||
|
} |
||||
|
//reconstruct http request body for following new request to volume server
|
||||
|
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (fs *FilerServer) monolithicUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) { |
||||
|
/* |
||||
|
Amazon S3 ref link:[http://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html]
|
||||
|
There is a long way to provide a completely compatibility against all Amazon S3 API, I just made |
||||
|
a simple data stream adapter between S3 PUT API and seaweedfs's volume storage Write API |
||||
|
1. The request url format should be http://$host:$port/$bucketName/$objectName
|
||||
|
2. bucketName will be mapped to seaweedfs's collection name |
||||
|
3. You could customize and make your enhancement. |
||||
|
*/ |
||||
|
lastPos := strings.LastIndex(r.URL.Path, "/") |
||||
|
if lastPos == -1 || lastPos == 0 || lastPos == len(r.URL.Path)-1 { |
||||
|
glog.V(0).Infoln("URL Path [%s] is invalid, could not retrieve file name", r.URL.Path) |
||||
|
err = fmt.Errorf("URL Path is invalid") |
||||
|
writeJsonError(w, r, http.StatusInternalServerError, err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if err = checkContentMD5(w, r); err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
fileName := r.URL.Path[lastPos+1:] |
||||
|
if err = multipartHttpBodyBuilder(w, r, fileName); err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
secondPos := strings.Index(r.URL.Path[1:], "/") + 1 |
||||
|
collection = r.URL.Path[1:secondPos] |
||||
|
path := r.URL.Path |
||||
|
|
||||
|
if fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path); err == nil && fileId == "" { |
||||
|
fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func multipartHttpBodyBuilder(w http.ResponseWriter, r *http.Request, fileName string) (err error) { |
||||
|
body, contentType, te := makeFormData(fileName, r.Header.Get("Content-Type"), r.Body) |
||||
|
if te != nil { |
||||
|
glog.V(0).Infoln("S3 protocol to raw seaweed protocol failed", te.Error()) |
||||
|
writeJsonError(w, r, http.StatusInternalServerError, te) |
||||
|
err = te |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if body != nil { |
||||
|
switch v := body.(type) { |
||||
|
case *bytes.Buffer: |
||||
|
r.ContentLength = int64(v.Len()) |
||||
|
case *bytes.Reader: |
||||
|
r.ContentLength = int64(v.Len()) |
||||
|
case *strings.Reader: |
||||
|
r.ContentLength = int64(v.Len()) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
r.Header.Set("Content-Type", contentType) |
||||
|
rc, ok := body.(io.ReadCloser) |
||||
|
if !ok && body != nil { |
||||
|
rc = ioutil.NopCloser(body) |
||||
|
} |
||||
|
r.Body = rc |
||||
|
return |
||||
|
} |
@ -0,0 +1,39 @@ |
|||||
|
package weed_server |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"io/ioutil" |
||||
|
"net/http" |
||||
|
"strings" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"github.com/chrislusf/seaweedfs/weed/storage" |
||||
|
) |
||||
|
|
||||
|
func (fs *FilerServer) multipartUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) { |
||||
|
//Default handle way for http multipart
|
||||
|
if r.Method == "PUT" { |
||||
|
buf, _ := ioutil.ReadAll(r.Body) |
||||
|
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) |
||||
|
fileName, _, _, _, _, _, _, _, pe := storage.ParseUpload(r) |
||||
|
if pe != nil { |
||||
|
glog.V(0).Infoln("failing to parse post body", pe.Error()) |
||||
|
writeJsonError(w, r, http.StatusInternalServerError, pe) |
||||
|
err = pe |
||||
|
return |
||||
|
} |
||||
|
//reconstruct http request body for following new request to volume server
|
||||
|
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) |
||||
|
|
||||
|
path := r.URL.Path |
||||
|
if strings.HasSuffix(path, "/") { |
||||
|
if fileName != "" { |
||||
|
path += fileName |
||||
|
} |
||||
|
} |
||||
|
fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path) |
||||
|
} else { |
||||
|
fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection) |
||||
|
} |
||||
|
return |
||||
|
} |
@ -0,0 +1,24 @@ |
|||||
|
package master_ui |
||||
|
|
||||
|
import ( |
||||
|
"path/filepath" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
type Breadcrumb struct { |
||||
|
Name string |
||||
|
Link string |
||||
|
} |
||||
|
|
||||
|
func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { |
||||
|
parts := strings.Split(fullpath, "/") |
||||
|
|
||||
|
for i := 0; i < len(parts); i++ { |
||||
|
crumbs = append(crumbs, Breadcrumb{ |
||||
|
Name: parts[i] + "/", |
||||
|
Link: "/" + filepath.Join(parts[0:i+1]...), |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
return |
||||
|
} |
@ -1,5 +1,5 @@ |
|||||
package util |
package util |
||||
|
|
||||
const ( |
const ( |
||||
VERSION = "0.77" |
|
||||
|
VERSION = "0.90 beta" |
||||
) |
) |
Write
Preview
Loading…
Cancel
Save
Reference in new issue