hilimd
4 years ago
committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 682 additions and 128 deletions
-
11README.md
-
2docker/compose/local-s3tests-compose.yml
-
9go.mod
-
43go.sum
-
4k8s/seaweedfs/Chart.yaml
-
2k8s/seaweedfs/values.yaml
-
2other/java/client/pom.xml
-
2other/java/client/pom.xml.deploy
-
2other/java/client/pom_debug.xml
-
2other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java
-
8other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
-
4other/java/examples/pom.xml
-
2other/java/hdfs2/dependency-reduced-pom.xml
-
2other/java/hdfs2/pom.xml
-
2other/java/hdfs3/dependency-reduced-pom.xml
-
2other/java/hdfs3/pom.xml
-
2unmaintained/load_test/load_test_leveldb/load_test_leveldb.go
-
108unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go
-
1weed/command/command.go
-
165weed/command/fuse.go
-
2weed/command/mount.go
-
20weed/command/mount_std.go
-
11weed/command/scaffold.go
-
1weed/filer/meta_aggregator.go
-
78weed/filer/sqlite/sqlite_store.go
-
9weed/filer/sqlite/sqlite_store_unsupported.go
-
10weed/filer/stream.go
-
11weed/filesys/dirty_pages_temp_file.go
-
37weed/filesys/wfs.go
-
39weed/filesys/wfs_filer_client.go
-
2weed/filesys/wfs_write.go
-
59weed/pb/grpc_client_server.go
-
3weed/replication/sink/azuresink/azure_sink.go
-
5weed/replication/sink/b2sink/b2_sink.go
-
3weed/replication/sink/gcssink/gcs_sink.go
-
30weed/replication/sink/localsink/local_sink.go
-
3weed/replication/sink/s3sink/s3_sink.go
-
1weed/replication/source/filer_source.go
-
33weed/s3api/s3api_object_handlers.go
-
12weed/s3api/s3err/s3api_errors.go
-
8weed/server/common.go
-
3weed/server/filer_server.go
-
21weed/server/filer_server_handlers_read.go
-
2weed/server/filer_server_handlers_write_autochunk.go
-
7weed/server/volume_server_handlers_read.go
-
2weed/server/volume_server_ui/templates.go
-
2weed/shell/command_fs_meta_save.go
-
3weed/shell/command_s3_bucket_list.go
-
4weed/static/javascript/jquery-2.1.3.min.js
-
2weed/static/javascript/jquery-3.6.0.min.js
-
6weed/topology/data_node.go
-
3weed/util/config.go
-
2weed/util/constants.go
-
1weed/wdclient/masterclient.go
@ -1,5 +1,5 @@ |
|||||
apiVersion: v1 |
apiVersion: v1 |
||||
description: SeaweedFS |
description: SeaweedFS |
||||
name: seaweedfs |
name: seaweedfs |
||||
appVersion: "2.48" |
|
||||
version: 2.48 |
|
||||
|
appVersion: "2.49" |
||||
|
version: 2.49 |
@ -0,0 +1,108 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"context" |
||||
|
"flag" |
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb" |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
"google.golang.org/grpc" |
||||
|
"io" |
||||
|
"strconv" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
dir = flag.String("dir", "/tmp", "directory to create files") |
||||
|
n = flag.Int("n", 100, "the number of metadata") |
||||
|
tailFiler = flag.String("filer", "localhost:8888", "the filer address") |
||||
|
isWrite = flag.Bool("write", false, "only write") |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
|
||||
|
flag.Parse() |
||||
|
|
||||
|
if *isWrite { |
||||
|
startGenerateMetadata() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
expected := 0 |
||||
|
startSubscribeMetadata(func(event *filer_pb.SubscribeMetadataResponse) error { |
||||
|
if event.Directory != *dir { |
||||
|
return nil |
||||
|
} |
||||
|
name := event.EventNotification.NewEntry.Name |
||||
|
fmt.Printf("=> %s\n", name) |
||||
|
id := name[4:] |
||||
|
if x, err := strconv.Atoi(id); err == nil { |
||||
|
if x != expected { |
||||
|
return fmt.Errorf("Expected file%d Actual %s\n", expected, name) |
||||
|
} |
||||
|
expected++ |
||||
|
} else { |
||||
|
return err |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func startGenerateMetadata() { |
||||
|
pb.WithFilerClient(*tailFiler, grpc.WithInsecure(), func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
for i := 0; i < *n; i++ { |
||||
|
name := fmt.Sprintf("file%d", i) |
||||
|
if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ |
||||
|
Directory: *dir, |
||||
|
Entry: &filer_pb.Entry{ |
||||
|
Name: name, |
||||
|
}, |
||||
|
}); err != nil { |
||||
|
fmt.Printf("create entry %s: %v\n", name, err) |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
|
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func startSubscribeMetadata(eachEntryFunc func(event *filer_pb.SubscribeMetadataResponse) error) { |
||||
|
|
||||
|
lastTsNs := int64(0) |
||||
|
|
||||
|
tailErr := pb.WithFilerClient(*tailFiler, grpc.WithInsecure(), func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
ctx, cancel := context.WithCancel(context.Background()) |
||||
|
defer cancel() |
||||
|
|
||||
|
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ |
||||
|
ClientName: "tail", |
||||
|
PathPrefix: *dir, |
||||
|
SinceNs: lastTsNs, |
||||
|
}) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("listen: %v", err) |
||||
|
} |
||||
|
|
||||
|
for { |
||||
|
resp, listenErr := stream.Recv() |
||||
|
if listenErr == io.EOF { |
||||
|
return nil |
||||
|
} |
||||
|
if listenErr != nil { |
||||
|
return listenErr |
||||
|
} |
||||
|
if err = eachEntryFunc(resp); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
lastTsNs = resp.TsNs |
||||
|
} |
||||
|
|
||||
|
}) |
||||
|
if tailErr != nil { |
||||
|
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr) |
||||
|
} |
||||
|
} |
@ -0,0 +1,165 @@ |
|||||
|
package command |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"strings" |
||||
|
"strconv" |
||||
|
"time" |
||||
|
"os" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
cmdFuse.Run = runFuse // break init cycle
|
||||
|
} |
||||
|
|
||||
|
func runFuse(cmd *Command, args []string) bool { |
||||
|
argsLen := len(args) |
||||
|
options := []string{} |
||||
|
|
||||
|
// at least target mount path should be passed
|
||||
|
if argsLen < 1 { |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
// first option is always target mount path
|
||||
|
mountOptions.dir = &args[0] |
||||
|
|
||||
|
// scan parameters looking for one or more -o options
|
||||
|
// -o options receive parameters on format key=value[,key=value]...
|
||||
|
for i := 0; i < argsLen; i++ { |
||||
|
if args[i] == "-o" && i+1 <= argsLen { |
||||
|
options = strings.Split(args[i+1], ",") |
||||
|
i++ |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// for each option passed with -o
|
||||
|
for _, option := range options { |
||||
|
// split just first = character
|
||||
|
parts := strings.SplitN(option, "=", 2) |
||||
|
|
||||
|
// if doesn't key and value skip
|
||||
|
if len(parts) != 2 { |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
key, value := parts[0], parts[1] |
||||
|
|
||||
|
// switch key keeping "weed mount" parameters
|
||||
|
switch key { |
||||
|
case "filer": |
||||
|
mountOptions.filer = &value |
||||
|
case "filer.path": |
||||
|
mountOptions.filerMountRootPath = &value |
||||
|
case "dirAutoCreate": |
||||
|
if parsed, err := strconv.ParseBool(value); err != nil { |
||||
|
mountOptions.dirAutoCreate = &parsed |
||||
|
} else { |
||||
|
panic(fmt.Errorf("dirAutoCreate: %s", err)) |
||||
|
} |
||||
|
case "collection": |
||||
|
mountOptions.collection = &value |
||||
|
case "replication": |
||||
|
mountOptions.replication = &value |
||||
|
case "disk": |
||||
|
mountOptions.diskType = &value |
||||
|
case "ttl": |
||||
|
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil { |
||||
|
intValue := int(parsed) |
||||
|
mountOptions.ttlSec = &intValue |
||||
|
} else { |
||||
|
panic(fmt.Errorf("ttl: %s", err)) |
||||
|
} |
||||
|
case "chunkSizeLimitMB": |
||||
|
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil { |
||||
|
intValue := int(parsed) |
||||
|
mountOptions.chunkSizeLimitMB = &intValue |
||||
|
} else { |
||||
|
panic(fmt.Errorf("chunkSizeLimitMB: %s", err)) |
||||
|
} |
||||
|
case "concurrentWriters": |
||||
|
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil { |
||||
|
intValue := int(parsed) |
||||
|
mountOptions.concurrentWriters = &intValue |
||||
|
} else { |
||||
|
panic(fmt.Errorf("concurrentWriters: %s", err)) |
||||
|
} |
||||
|
case "cacheDir": |
||||
|
mountOptions.cacheDir = &value |
||||
|
case "cacheCapacityMB": |
||||
|
if parsed, err := strconv.ParseInt(value, 0, 64); err != nil { |
||||
|
mountOptions.cacheSizeMB = &parsed |
||||
|
} else { |
||||
|
panic(fmt.Errorf("cacheCapacityMB: %s", err)) |
||||
|
} |
||||
|
case "dataCenter": |
||||
|
mountOptions.dataCenter = &value |
||||
|
case "allowOthers": |
||||
|
if parsed, err := strconv.ParseBool(value); err != nil { |
||||
|
mountOptions.allowOthers = &parsed |
||||
|
} else { |
||||
|
panic(fmt.Errorf("allowOthers: %s", err)) |
||||
|
} |
||||
|
case "umask": |
||||
|
mountOptions.umaskString = &value |
||||
|
case "nonempty": |
||||
|
if parsed, err := strconv.ParseBool(value); err != nil { |
||||
|
mountOptions.nonempty = &parsed |
||||
|
} else { |
||||
|
panic(fmt.Errorf("nonempty: %s", err)) |
||||
|
} |
||||
|
case "volumeServerAccess": |
||||
|
mountOptions.volumeServerAccess = &value |
||||
|
case "map.uid": |
||||
|
mountOptions.uidMap = &value |
||||
|
case "map.gid": |
||||
|
mountOptions.gidMap = &value |
||||
|
case "readOnly": |
||||
|
if parsed, err := strconv.ParseBool(value); err != nil { |
||||
|
mountOptions.readOnly = &parsed |
||||
|
} else { |
||||
|
panic(fmt.Errorf("readOnly: %s", err)) |
||||
|
} |
||||
|
case "cpuprofile": |
||||
|
mountCpuProfile = &value |
||||
|
case "memprofile": |
||||
|
mountMemProfile = &value |
||||
|
case "readRetryTime": |
||||
|
if parsed, err := time.ParseDuration(value); err != nil { |
||||
|
mountReadRetryTime = &parsed |
||||
|
} else { |
||||
|
panic(fmt.Errorf("readRetryTime: %s", err)) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// I don't know why PATH environment variable is lost
|
||||
|
if err := os.Setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); err != nil { |
||||
|
panic(fmt.Errorf("setenv: %s", err)) |
||||
|
} |
||||
|
|
||||
|
// just call "weed mount" command
|
||||
|
return runMount(cmdMount, []string{}) |
||||
|
} |
||||
|
|
||||
|
var cmdFuse = &Command{ |
||||
|
UsageLine: "fuse /mnt/mount/point -o \"filer=localhost:8888,filer.path=/\"", |
||||
|
Short: "Allow use weed with linux's mount command", |
||||
|
Long: `Allow use weed with linux's mount command |
||||
|
|
||||
|
You can use -t weed on mount command: |
||||
|
mv weed /sbin/mount.weed |
||||
|
mount -t weed fuse /mnt -o "filer=localhost:8888,filer.path=/" |
||||
|
|
||||
|
Or you can use -t fuse on mount command: |
||||
|
mv weed /sbin/weed |
||||
|
mount -t fuse.weed fuse /mnt -o "filer=localhost:8888,filer.path=/" |
||||
|
mount -t fuse "weed#fuse" /mnt -o "filer=localhost:8888,filer.path=/" |
||||
|
|
||||
|
To use without mess with your /sbin: |
||||
|
mount -t fuse./home/user/bin/weed fuse /mnt -o "filer=localhost:8888,filer.path=/" |
||||
|
mount -t fuse "/home/user/bin/weed#fuse" /mnt -o "filer=localhost:8888,filer.path=/" |
||||
|
|
||||
|
To check valid options look "weed mount --help" |
||||
|
`, |
||||
|
} |
@ -0,0 +1,78 @@ |
|||||
|
// +build linux darwin windows
|
||||
|
|
||||
|
// limited GOOS due to modernc.org/libc/unistd
|
||||
|
|
||||
|
package sqlite |
||||
|
|
||||
|
import ( |
||||
|
"context" |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/filer" |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer/mysql" |
||||
|
"github.com/chrislusf/seaweedfs/weed/util" |
||||
|
_ "modernc.org/sqlite" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
filer.Stores = append(filer.Stores, &SqliteStore{}) |
||||
|
} |
||||
|
|
||||
|
type SqliteStore struct { |
||||
|
abstract_sql.AbstractSqlStore |
||||
|
} |
||||
|
|
||||
|
func (store *SqliteStore) GetName() string { |
||||
|
return "sqlite" |
||||
|
} |
||||
|
|
||||
|
func (store *SqliteStore) Initialize(configuration util.Configuration, prefix string) (err error) { |
||||
|
dbFile := configuration.GetString(prefix + "dbFile") |
||||
|
createTable := `CREATE TABLE IF NOT EXISTS "%s" ( |
||||
|
dirhash BIGINT, |
||||
|
name VARCHAR(1000), |
||||
|
directory TEXT, |
||||
|
meta BLOB, |
||||
|
PRIMARY KEY (dirhash, name) |
||||
|
) WITHOUT ROWID;` |
||||
|
upsertQuery := `INSERT INTO "%s"(dirhash,name,directory,meta)VALUES(?,?,?,?) |
||||
|
ON CONFLICT(dirhash,name) DO UPDATE SET |
||||
|
directory=excluded.directory, |
||||
|
meta=excluded.meta; |
||||
|
` |
||||
|
return store.initialize( |
||||
|
dbFile, |
||||
|
createTable, |
||||
|
upsertQuery, |
||||
|
) |
||||
|
} |
||||
|
|
||||
|
func (store *SqliteStore) initialize(dbFile, createTable, upsertQuery string) (err error) { |
||||
|
|
||||
|
store.SupportBucketTable = true |
||||
|
store.SqlGenerator = &mysql.SqlGenMysql{ |
||||
|
CreateTableSqlTemplate: createTable, |
||||
|
DropTableSqlTemplate: "drop table `%s`", |
||||
|
UpsertQueryTemplate: upsertQuery, |
||||
|
} |
||||
|
|
||||
|
var dbErr error |
||||
|
store.DB, dbErr = sql.Open("sqlite", dbFile) |
||||
|
if dbErr != nil { |
||||
|
store.DB.Close() |
||||
|
store.DB = nil |
||||
|
return fmt.Errorf("can not connect to %s error:%v", dbFile, err) |
||||
|
} |
||||
|
|
||||
|
if err = store.DB.Ping(); err != nil { |
||||
|
return fmt.Errorf("connect to %s error:%v", dbFile, err) |
||||
|
} |
||||
|
|
||||
|
if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { |
||||
|
return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
@ -0,0 +1,9 @@ |
|||||
|
// +build !linux,!darwin,!windows
|
||||
|
|
||||
|
// limited GOOS due to modernc.org/libc/unistd
|
||||
|
|
||||
|
package sqlite |
||||
|
|
||||
|
func init() { |
||||
|
// filer.Stores = append(filer.Stores, &SqliteStore{})
|
||||
|
} |
4
weed/static/javascript/jquery-2.1.3.min.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
2
weed/static/javascript/jquery-3.6.0.min.js
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
Write
Preview
Loading…
Cancel
Save
Reference in new issue