Chris Lu
4 years ago
118 changed files with 3180 additions and 844 deletions
-
22.github/workflows/cleanup.yml
-
16.github/workflows/release.yml
-
26README.md
-
3docker/Dockerfile.go_build
-
3docker/Dockerfile.go_build_large
-
1docker/Dockerfile.local
-
13docker/local-dev-compose.yml
-
39docker/local-k8s-compose.yml
-
30docker/nginx/proxy.conf
-
13docker/prometheus/prometheus.yml
-
18docker/seaweedfs-compose.yml
-
15go.mod
-
67go.sum
-
3k8s/seaweedfs/Chart.yaml
-
12k8s/seaweedfs/templates/_helpers.tpl
-
1k8s/seaweedfs/templates/cronjob.yaml
-
31k8s/seaweedfs/templates/filer-service-client.yaml
-
9k8s/seaweedfs/templates/filer-service.yaml
-
12k8s/seaweedfs/templates/filer-statefulset.yaml
-
1k8s/seaweedfs/templates/master-service.yaml
-
10k8s/seaweedfs/templates/master-statefulset.yaml
-
27k8s/seaweedfs/templates/s3-deployment.yaml
-
13k8s/seaweedfs/templates/volume-statefulset.yaml
-
21k8s/seaweedfs/values.yaml
-
3weed/command/command.go
-
7weed/command/filer.go
-
118weed/command/filer_cat.go
-
2weed/command/filer_copy.go
-
201weed/command/filer_meta_tail.go
-
3weed/command/filer_replication.go
-
2weed/command/mount.go
-
5weed/command/mount_std.go
-
17weed/command/s3.go
-
84weed/command/scaffold.go
-
10weed/command/server.go
-
25weed/command/shell.go
-
25weed/command/upload.go
-
113weed/command/watch.go
-
194weed/filer/abstract_sql/abstract_sql_store.go
-
23weed/filer/abstract_sql/abstract_sql_store_kv.go
-
22weed/filer/cassandra/cassandra_store.go
-
6weed/filer/configuration.go
-
60weed/filer/elastic/v7/elastic_store.go
-
23weed/filer/etcd/etcd_store.go
-
7weed/filer/filechunk_manifest.go
-
9weed/filer/filechunks.go
-
147weed/filer/filer.go
-
4weed/filer/filer_buckets.go
-
69weed/filer/filer_delete_entry.go
-
6weed/filer/filer_notify.go
-
70weed/filer/filer_search.go
-
6weed/filer/filerstore.go
-
24weed/filer/filerstore_translate_path.go
-
62weed/filer/filerstore_wrapper.go
-
19weed/filer/hbase/hbase_store.go
-
3weed/filer/hbase/hbase_store_kv.go
-
28weed/filer/leveldb/leveldb_store.go
-
33weed/filer/leveldb/leveldb_store_test.go
-
27weed/filer/leveldb2/leveldb2_store.go
-
6weed/filer/leveldb2/leveldb2_store_test.go
-
375weed/filer/leveldb3/leveldb3_store.go
-
46weed/filer/leveldb3/leveldb3_store_kv.go
-
88weed/filer/leveldb3/leveldb3_store_test.go
-
22weed/filer/mongodb/mongodb_store.go
-
52weed/filer/mysql/mysql_sql_gen.go
-
23weed/filer/mysql/mysql_store.go
-
82weed/filer/mysql2/mysql2_store.go
-
53weed/filer/postgres/postgres_sql_gen.go
-
18weed/filer/postgres/postgres_store.go
-
87weed/filer/postgres2/postgres2_store.go
-
8weed/filer/reader_at.go
-
6weed/filer/redis/redis_cluster_store.go
-
2weed/filer/redis/redis_store.go
-
49weed/filer/redis/universal_redis_store.go
-
8weed/filer/redis/universal_redis_store_kv.go
-
6weed/filer/redis2/redis_cluster_store.go
-
2weed/filer/redis2/redis_store.go
-
51weed/filer/redis2/universal_redis_store.go
-
8weed/filer/redis2/universal_redis_store_kv.go
-
41weed/filer/rocksdb/README.md
-
302weed/filer/rocksdb/rocksdb_store.go
-
47weed/filer/rocksdb/rocksdb_store_kv.go
-
117weed/filer/rocksdb/rocksdb_store_test.go
-
40weed/filer/rocksdb/rocksdb_ttl.go
-
8weed/filer/stream.go
-
110weed/filesys/dir.go
-
2weed/filesys/filehandle.go
-
16weed/filesys/meta_cache/meta_cache.go
-
7weed/filesys/meta_cache/meta_cache_init.go
-
26weed/messaging/broker/broker_grpc_server_subscribe.go
-
5weed/notification/configuration.go
-
3weed/operation/submit.go
-
3weed/replication/sink/filersink/filer_sink.go
-
31weed/s3api/auth_credentials.go
-
2weed/s3api/s3api_bucket_handlers.go
-
10weed/s3api/s3api_objects_list_handlers.go
-
1weed/s3api/s3api_server.go
-
7weed/security/tls.go
-
9weed/server/common.go
-
44weed/server/filer_grpc_server.go
@ -0,0 +1,22 @@ |
|||
name: Cleanup |
|||
|
|||
on: |
|||
push: |
|||
branches: [ master ] |
|||
|
|||
jobs: |
|||
|
|||
build: |
|||
name: Build |
|||
runs-on: ubuntu-latest |
|||
|
|||
steps: |
|||
|
|||
- name: Delete old release assets |
|||
uses: mknejp/delete-release-assets@v1 |
|||
with: |
|||
token: ${{ github.token }} |
|||
tag: dev |
|||
fail-if-no-assets: false |
|||
assets: | |
|||
weed-* |
@ -0,0 +1,30 @@ |
|||
# HTTP 1.1 support |
|||
proxy_http_version 1.1; |
|||
#proxy_buffering off; |
|||
proxy_set_header Host $http_host; |
|||
proxy_set_header Upgrade $http_upgrade; |
|||
proxy_set_header Connection $proxy_connection; |
|||
proxy_set_header X-Real-IP $remote_addr; |
|||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
|||
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto; |
|||
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl; |
|||
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port; |
|||
|
|||
# Mitigate httpoxy attack (see README for details) |
|||
proxy_set_header Proxy ""; |
|||
|
|||
# aws default max_concurrent_requests 10 |
|||
# aws default multipart_threshold 8MB |
|||
proxy_buffering on; # GET buffering or “X-Accel-Buffering” enables or disables buffering of a response; |
|||
proxy_buffers 64 1m; # buffers used for reading a response from the proxied server, for a single connection |
|||
proxy_buffer_size 8k; # maximum size of the data that nginx can receive from the server at a time is set |
|||
proxy_busy_buffers_size 2m; |
|||
|
|||
proxy_request_buffering on; # PUT buffering |
|||
client_body_buffer_size 64m; # buffer size for reading client request body |
|||
client_max_body_size 64m; |
|||
|
|||
proxy_next_upstream error timeout non_idempotent http_500; # PUT request should be passed to the next server: |
|||
proxy_connect_timeout 200ms; |
|||
proxy_read_timeout 3s; #timeout is set only between two successive read operations |
|||
proxy_send_timeout 3s; #timeout is set only between two successive write operations |
@ -0,0 +1,13 @@ |
|||
global: |
|||
scrape_interval: 30s |
|||
scrape_timeout: 10s |
|||
|
|||
scrape_configs: |
|||
- job_name: services |
|||
metrics_path: /metrics |
|||
static_configs: |
|||
- targets: |
|||
- 'prometheus:9090' |
|||
- 'volume:9325' |
|||
- 'filer:9326' |
|||
- 's3:9327' |
@ -1,4 +1,5 @@ |
|||
apiVersion: v1 |
|||
description: SeaweedFS |
|||
name: seaweedfs |
|||
version: 2.14 |
|||
appVersion: "2.21" |
|||
version: 2.21 |
@ -0,0 +1,31 @@ |
|||
apiVersion: v1 |
|||
kind: Service |
|||
metadata: |
|||
name: {{ template "seaweedfs.name" . }}-filer-client |
|||
namespace: {{ .Release.Namespace }} |
|||
labels: |
|||
app: {{ template "seaweedfs.name" . }} |
|||
component: filer |
|||
{{- if .Values.filer.metricsPort }} |
|||
monitoring: "true" |
|||
{{- end }} |
|||
spec: |
|||
clusterIP: None |
|||
ports: |
|||
- name: "swfs-filer" |
|||
port: {{ .Values.filer.port }} |
|||
targetPort: {{ .Values.filer.port }} |
|||
protocol: TCP |
|||
- name: "swfs-filer-grpc" |
|||
port: {{ .Values.filer.grpcPort }} |
|||
targetPort: {{ .Values.filer.grpcPort }} |
|||
protocol: TCP |
|||
{{- if .Values.filer.metricsPort }} |
|||
- name: "metrics" |
|||
port: {{ .Values.filer.metricsPort }} |
|||
targetPort: {{ .Values.filer.metricsPort }} |
|||
protocol: TCP |
|||
{{- end }} |
|||
selector: |
|||
app: {{ template "seaweedfs.name" . }} |
|||
component: filer |
@ -0,0 +1,118 @@ |
|||
package command |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/wdclient" |
|||
"google.golang.org/grpc" |
|||
"math" |
|||
"net/url" |
|||
"os" |
|||
"strings" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/security" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
var ( |
|||
filerCat FilerCatOptions |
|||
) |
|||
|
|||
type FilerCatOptions struct { |
|||
grpcDialOption grpc.DialOption |
|||
filerAddress string |
|||
filerClient filer_pb.SeaweedFilerClient |
|||
output *string |
|||
} |
|||
|
|||
func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType { |
|||
return func(fileId string) (targetUrls []string, err error) { |
|||
vid := filer.VolumeId(fileId) |
|||
resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ |
|||
VolumeIds: []string{vid}, |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
locations := resp.LocationsMap[vid] |
|||
for _, loc := range locations.Locations { |
|||
targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId)) |
|||
} |
|||
return |
|||
} |
|||
} |
|||
|
|||
func init() { |
|||
cmdFilerCat.Run = runFilerCat // break init cycle
|
|||
filerCat.output = cmdFilerCat.Flag.String("o", "", "write to file instead of stdout") |
|||
} |
|||
|
|||
var cmdFilerCat = &Command{ |
|||
UsageLine: "filer.cat [-o <file>] http://localhost:8888/path/to/file", |
|||
Short: "copy one file to local", |
|||
Long: `read one file to stdout or write to a file |
|||
|
|||
`, |
|||
} |
|||
|
|||
func runFilerCat(cmd *Command, args []string) bool { |
|||
|
|||
util.LoadConfiguration("security", false) |
|||
|
|||
if len(args) == 0 { |
|||
return false |
|||
} |
|||
filerSource := args[len(args)-1] |
|||
|
|||
filerUrl, err := url.Parse(filerSource) |
|||
if err != nil { |
|||
fmt.Printf("The last argument should be a URL on filer: %v\n", err) |
|||
return false |
|||
} |
|||
urlPath := filerUrl.Path |
|||
if strings.HasSuffix(urlPath, "/") { |
|||
fmt.Printf("The last argument should be a file: %v\n", err) |
|||
return false |
|||
} |
|||
|
|||
filerCat.filerAddress = filerUrl.Host |
|||
filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") |
|||
|
|||
dir, name := util.FullPath(urlPath).DirAndName() |
|||
|
|||
writer := os.Stdout |
|||
if *filerCat.output != "" { |
|||
|
|||
fmt.Printf("saving %s to %s\n", filerSource, *filerCat.output) |
|||
|
|||
f, err := os.OpenFile(*filerCat.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755) |
|||
if err != nil { |
|||
fmt.Printf("open file %s: %v\n", *filerCat.output, err) |
|||
return false |
|||
} |
|||
defer f.Close() |
|||
writer = f |
|||
} |
|||
|
|||
pb.WithFilerClient(filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
|
|||
request := &filer_pb.LookupDirectoryEntryRequest{ |
|||
Name: name, |
|||
Directory: dir, |
|||
} |
|||
respLookupEntry, err := filer_pb.LookupEntry(client, request) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
filerCat.filerClient = client |
|||
|
|||
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) |
|||
|
|||
}) |
|||
|
|||
return true |
|||
} |
@ -0,0 +1,201 @@ |
|||
package command |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
jsoniter "github.com/json-iterator/go" |
|||
"github.com/olivere/elastic/v7" |
|||
"io" |
|||
"path/filepath" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/security" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
func init() { |
|||
cmdFilerMetaTail.Run = runFilerMetaTail // break init cycle
|
|||
} |
|||
|
|||
var cmdFilerMetaTail = &Command{ |
|||
UsageLine: "filer.meta.tail [-filer=localhost:8888] [-target=/]", |
|||
Short: "see recent changes on a filer", |
|||
Long: `See recent changes on a filer. |
|||
|
|||
`, |
|||
} |
|||
|
|||
var ( |
|||
tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port") |
|||
tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer") |
|||
tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"") |
|||
tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ") |
|||
esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://<host:port>") |
|||
esIndex = cmdFilerMetaTail.Flag.String("es.index", "seaweedfs", "ES index name") |
|||
) |
|||
|
|||
func runFilerMetaTail(cmd *Command, args []string) bool { |
|||
|
|||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") |
|||
|
|||
var filterFunc func(dir, fname string) bool |
|||
if *tailPattern != "" { |
|||
if strings.Contains(*tailPattern, "/") { |
|||
println("watch path pattern", *tailPattern) |
|||
filterFunc = func(dir, fname string) bool { |
|||
matched, err := filepath.Match(*tailPattern, dir+"/"+fname) |
|||
if err != nil { |
|||
fmt.Printf("error: %v", err) |
|||
} |
|||
return matched |
|||
} |
|||
} else { |
|||
println("watch file pattern", *tailPattern) |
|||
filterFunc = func(dir, fname string) bool { |
|||
matched, err := filepath.Match(*tailPattern, fname) |
|||
if err != nil { |
|||
fmt.Printf("error: %v", err) |
|||
} |
|||
return matched |
|||
} |
|||
} |
|||
} |
|||
|
|||
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool { |
|||
if filterFunc == nil { |
|||
return true |
|||
} |
|||
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil { |
|||
return false |
|||
} |
|||
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) { |
|||
return true |
|||
} |
|||
if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) { |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error { |
|||
fmt.Printf("dir:%s %+v\n", resp.Directory, resp.EventNotification) |
|||
return nil |
|||
} |
|||
if *esServers != "" { |
|||
var err error |
|||
eachEntryFunc, err = sendToElasticSearchFunc(*esServers, *esIndex) |
|||
if err != nil { |
|||
fmt.Printf("create elastic search client to %s: %+v\n", *esServers, err) |
|||
return false |
|||
} |
|||
} |
|||
|
|||
tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
|
|||
ctx, cancel := context.WithCancel(context.Background()) |
|||
defer cancel() |
|||
|
|||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ |
|||
ClientName: "tail", |
|||
PathPrefix: *tailTarget, |
|||
SinceNs: time.Now().Add(-*tailStart).UnixNano(), |
|||
}) |
|||
if err != nil { |
|||
return fmt.Errorf("listen: %v", err) |
|||
} |
|||
|
|||
for { |
|||
resp, listenErr := stream.Recv() |
|||
if listenErr == io.EOF { |
|||
return nil |
|||
} |
|||
if listenErr != nil { |
|||
return listenErr |
|||
} |
|||
if !shouldPrint(resp) { |
|||
continue |
|||
} |
|||
if err = eachEntryFunc(resp); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
}) |
|||
if tailErr != nil { |
|||
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr) |
|||
} |
|||
|
|||
return true |
|||
} |
|||
|
|||
type EsDocument struct { |
|||
Dir string `json:"dir,omitempty"` |
|||
Name string `json:"name,omitempty"` |
|||
IsDirectory bool `json:"isDir,omitempty"` |
|||
Size uint64 `json:"size,omitempty"` |
|||
Uid uint32 `json:"uid,omitempty"` |
|||
Gid uint32 `json:"gid,omitempty"` |
|||
UserName string `json:"userName,omitempty"` |
|||
Collection string `json:"collection,omitempty"` |
|||
Crtime int64 `json:"crtime,omitempty"` |
|||
Mtime int64 `json:"mtime,omitempty"` |
|||
Mime string `json:"mime,omitempty"` |
|||
} |
|||
|
|||
func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) { |
|||
entry := event.NewEntry |
|||
dir, name := event.NewParentPath, entry.Name |
|||
id := util.Md5String([]byte(util.NewFullPath(dir, name))) |
|||
esEntry := &EsDocument{ |
|||
Dir: dir, |
|||
Name: name, |
|||
IsDirectory: entry.IsDirectory, |
|||
Size: entry.Attributes.FileSize, |
|||
Uid: entry.Attributes.Uid, |
|||
Gid: entry.Attributes.Gid, |
|||
UserName: entry.Attributes.UserName, |
|||
Collection: entry.Attributes.Collection, |
|||
Crtime: entry.Attributes.Crtime, |
|||
Mtime: entry.Attributes.Mtime, |
|||
Mime: entry.Attributes.Mime, |
|||
} |
|||
return esEntry, id |
|||
} |
|||
|
|||
func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) { |
|||
options := []elastic.ClientOptionFunc{} |
|||
options = append(options, elastic.SetURL(strings.Split(servers, ",")...)) |
|||
options = append(options, elastic.SetSniff(false)) |
|||
options = append(options, elastic.SetHealthcheck(false)) |
|||
client, err := elastic.NewClient(options...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return func(resp *filer_pb.SubscribeMetadataResponse) error { |
|||
event := resp.EventNotification |
|||
if event.OldEntry != nil && |
|||
(event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) { |
|||
// delete or not update the same file
|
|||
dir, name := resp.Directory, event.OldEntry.Name |
|||
id := util.Md5String([]byte(util.NewFullPath(dir, name))) |
|||
println("delete", id) |
|||
_, err := client.Delete().Index(esIndex).Id(id).Do(context.Background()) |
|||
return err |
|||
} |
|||
if event.NewEntry != nil { |
|||
// add a new file or update the same file
|
|||
esEntry, id := toEsEntry(event) |
|||
value, err := jsoniter.Marshal(esEntry) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
println(string(value)) |
|||
_, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background()) |
|||
return err |
|||
} |
|||
return nil |
|||
}, nil |
|||
} |
@ -1,113 +0,0 @@ |
|||
package command |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"io" |
|||
"path/filepath" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/security" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
func init() { |
|||
cmdWatch.Run = runWatch // break init cycle
|
|||
} |
|||
|
|||
var cmdWatch = &Command{ |
|||
UsageLine: "watch [-filer=localhost:8888] [-target=/]", |
|||
Short: "see recent changes on a filer", |
|||
Long: `See recent changes on a filer. |
|||
|
|||
`, |
|||
} |
|||
|
|||
var ( |
|||
watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port") |
|||
watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer") |
|||
watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"") |
|||
watchPattern = cmdWatch.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ") |
|||
) |
|||
|
|||
func runWatch(cmd *Command, args []string) bool { |
|||
|
|||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") |
|||
|
|||
var filterFunc func(dir, fname string) bool |
|||
if *watchPattern != "" { |
|||
if strings.Contains(*watchPattern, "/") { |
|||
println("watch path pattern", *watchPattern) |
|||
filterFunc = func(dir, fname string) bool { |
|||
matched, err := filepath.Match(*watchPattern, dir+"/"+fname) |
|||
if err != nil { |
|||
fmt.Printf("error: %v", err) |
|||
} |
|||
return matched |
|||
} |
|||
} else { |
|||
println("watch file pattern", *watchPattern) |
|||
filterFunc = func(dir, fname string) bool { |
|||
matched, err := filepath.Match(*watchPattern, fname) |
|||
if err != nil { |
|||
fmt.Printf("error: %v", err) |
|||
} |
|||
return matched |
|||
} |
|||
} |
|||
} |
|||
|
|||
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool { |
|||
if filterFunc == nil { |
|||
return true |
|||
} |
|||
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil { |
|||
return false |
|||
} |
|||
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) { |
|||
return true |
|||
} |
|||
if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) { |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
|
|||
ctx, cancel := context.WithCancel(context.Background()) |
|||
defer cancel() |
|||
|
|||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ |
|||
ClientName: "watch", |
|||
PathPrefix: *watchTarget, |
|||
SinceNs: time.Now().Add(-*watchStart).UnixNano(), |
|||
}) |
|||
if err != nil { |
|||
return fmt.Errorf("listen: %v", err) |
|||
} |
|||
|
|||
for { |
|||
resp, listenErr := stream.Recv() |
|||
if listenErr == io.EOF { |
|||
return nil |
|||
} |
|||
if listenErr != nil { |
|||
return listenErr |
|||
} |
|||
if !shouldPrint(resp) { |
|||
continue |
|||
} |
|||
fmt.Printf("dir:%s %+v\n", resp.Directory, resp.EventNotification) |
|||
} |
|||
|
|||
}) |
|||
if watchErr != nil { |
|||
fmt.Printf("watch %s: %v\n", *watchFiler, watchErr) |
|||
} |
|||
|
|||
return true |
|||
} |
@ -0,0 +1,375 @@ |
|||
package leveldb |
|||
|
|||
import ( |
|||
"bytes" |
|||
"context" |
|||
"crypto/md5" |
|||
"fmt" |
|||
"github.com/syndtr/goleveldb/leveldb" |
|||
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" |
|||
"github.com/syndtr/goleveldb/leveldb/opt" |
|||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util" |
|||
"io" |
|||
"os" |
|||
"strings" |
|||
"sync" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
weed_util "github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
const ( |
|||
DEFAULT = "_main" |
|||
) |
|||
|
|||
func init() { |
|||
filer.Stores = append(filer.Stores, &LevelDB3Store{}) |
|||
} |
|||
|
|||
type LevelDB3Store struct { |
|||
dir string |
|||
dbs map[string]*leveldb.DB |
|||
dbsLock sync.RWMutex |
|||
} |
|||
|
|||
func (store *LevelDB3Store) GetName() string { |
|||
return "leveldb3" |
|||
} |
|||
|
|||
func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { |
|||
dir := configuration.GetString(prefix + "dir") |
|||
return store.initialize(dir) |
|||
} |
|||
|
|||
func (store *LevelDB3Store) initialize(dir string) (err error) { |
|||
glog.Infof("filer store leveldb3 dir: %s", dir) |
|||
if err := weed_util.TestFolderWritable(dir); err != nil { |
|||
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) |
|||
} |
|||
store.dir = dir |
|||
|
|||
db, loadDbErr := store.loadDB(DEFAULT) |
|||
if loadDbErr != nil { |
|||
return loadDbErr |
|||
} |
|||
store.dbs = make(map[string]*leveldb.DB) |
|||
store.dbs[DEFAULT] = db |
|||
|
|||
return |
|||
} |
|||
|
|||
func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) { |
|||
|
|||
opts := &opt.Options{ |
|||
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
|
|||
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
|
|||
CompactionTableSizeMultiplier: 4, |
|||
} |
|||
if name != DEFAULT { |
|||
opts = &opt.Options{ |
|||
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
|
|||
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
|
|||
CompactionTableSizeMultiplier: 4, |
|||
} |
|||
} |
|||
|
|||
dbFolder := fmt.Sprintf("%s/%s", store.dir, name) |
|||
os.MkdirAll(dbFolder, 0755) |
|||
db, dbErr := leveldb.OpenFile(dbFolder, opts) |
|||
if leveldb_errors.IsCorrupted(dbErr) { |
|||
db, dbErr = leveldb.RecoverFile(dbFolder, opts) |
|||
} |
|||
if dbErr != nil { |
|||
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) |
|||
return nil, dbErr |
|||
} |
|||
return db, nil |
|||
} |
|||
|
|||
func (store *LevelDB3Store) findDB(fullpath weed_util.FullPath, isForChildren bool) (*leveldb.DB, string, weed_util.FullPath, error) { |
|||
|
|||
store.dbsLock.RLock() |
|||
|
|||
defaultDB := store.dbs[DEFAULT] |
|||
if !strings.HasPrefix(string(fullpath), "/buckets/") { |
|||
store.dbsLock.RUnlock() |
|||
return defaultDB, DEFAULT, fullpath, nil |
|||
} |
|||
|
|||
// detect bucket
|
|||
bucketAndObjectKey := string(fullpath)[len("/buckets/"):] |
|||
t := strings.Index(bucketAndObjectKey, "/") |
|||
if t < 0 && !isForChildren { |
|||
store.dbsLock.RUnlock() |
|||
return defaultDB, DEFAULT, fullpath, nil |
|||
} |
|||
bucket := bucketAndObjectKey |
|||
shortPath := weed_util.FullPath("/") |
|||
if t > 0 { |
|||
bucket = bucketAndObjectKey[:t] |
|||
shortPath = weed_util.FullPath(bucketAndObjectKey[t:]) |
|||
} |
|||
|
|||
if db, found := store.dbs[bucket]; found { |
|||
store.dbsLock.RUnlock() |
|||
return db, bucket, shortPath, nil |
|||
} |
|||
|
|||
store.dbsLock.RUnlock() |
|||
// upgrade to write lock
|
|||
store.dbsLock.Lock() |
|||
defer store.dbsLock.Unlock() |
|||
|
|||
// double check after getting the write lock
|
|||
if db, found := store.dbs[bucket]; found { |
|||
return db, bucket, shortPath, nil |
|||
} |
|||
|
|||
// create db
|
|||
db, err := store.loadDB(bucket) |
|||
if err != nil { |
|||
return nil, bucket, shortPath, err |
|||
} |
|||
store.dbs[bucket] = db |
|||
|
|||
return db, bucket, shortPath, nil |
|||
} |
|||
|
|||
func (store *LevelDB3Store) closeDB(bucket string) { |
|||
|
|||
store.dbsLock.Lock() |
|||
defer store.dbsLock.Unlock() |
|||
|
|||
if db, found := store.dbs[bucket]; found { |
|||
db.Close() |
|||
delete(store.dbs, bucket) |
|||
} |
|||
|
|||
} |
|||
|
|||
func (store *LevelDB3Store) BeginTransaction(ctx context.Context) (context.Context, error) { |
|||
return ctx, nil |
|||
} |
|||
func (store *LevelDB3Store) CommitTransaction(ctx context.Context) error { |
|||
return nil |
|||
} |
|||
func (store *LevelDB3Store) RollbackTransaction(ctx context.Context) error { |
|||
return nil |
|||
} |
|||
|
|||
func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { |
|||
|
|||
db, _, shortPath, err := store.findDB(entry.FullPath, false) |
|||
if err != nil { |
|||
return fmt.Errorf("findDB %s : %v", entry.FullPath, err) |
|||
} |
|||
|
|||
dir, name := shortPath.DirAndName() |
|||
key := genKey(dir, name) |
|||
|
|||
value, err := entry.EncodeAttributesAndChunks() |
|||
if err != nil { |
|||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) |
|||
} |
|||
|
|||
if len(entry.Chunks) > 50 { |
|||
value = weed_util.MaybeGzipData(value) |
|||
} |
|||
|
|||
err = db.Put(key, value, nil) |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err) |
|||
} |
|||
|
|||
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
|
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *LevelDB3Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { |
|||
|
|||
return store.InsertEntry(ctx, entry) |
|||
} |
|||
|
|||
func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { |
|||
|
|||
db, _, shortPath, err := store.findDB(fullpath, false) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("findDB %s : %v", fullpath, err) |
|||
} |
|||
|
|||
dir, name := shortPath.DirAndName() |
|||
key := genKey(dir, name) |
|||
|
|||
data, err := db.Get(key, nil) |
|||
|
|||
if err == leveldb.ErrNotFound { |
|||
return nil, filer_pb.ErrNotFound |
|||
} |
|||
if err != nil { |
|||
return nil, fmt.Errorf("get %s : %v", fullpath, err) |
|||
} |
|||
|
|||
entry = &filer.Entry{ |
|||
FullPath: fullpath, |
|||
} |
|||
err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data)) |
|||
if err != nil { |
|||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) |
|||
} |
|||
|
|||
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
|
|||
|
|||
return entry, nil |
|||
} |
|||
|
|||
func (store *LevelDB3Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { |
|||
|
|||
db, _, shortPath, err := store.findDB(fullpath, false) |
|||
if err != nil { |
|||
return fmt.Errorf("findDB %s : %v", fullpath, err) |
|||
} |
|||
|
|||
dir, name := shortPath.DirAndName() |
|||
key := genKey(dir, name) |
|||
|
|||
err = db.Delete(key, nil) |
|||
if err != nil { |
|||
return fmt.Errorf("delete %s : %v", fullpath, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *LevelDB3Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { |
|||
|
|||
db, bucket, shortPath, err := store.findDB(fullpath, true) |
|||
if err != nil { |
|||
return fmt.Errorf("findDB %s : %v", fullpath, err) |
|||
} |
|||
|
|||
if bucket != DEFAULT && shortPath == "/" { |
|||
store.closeDB(bucket) |
|||
if bucket != "" { // just to make sure
|
|||
os.RemoveAll(store.dir + "/" + bucket) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
directoryPrefix := genDirectoryKeyPrefix(shortPath, "") |
|||
|
|||
batch := new(leveldb.Batch) |
|||
|
|||
iter := db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) |
|||
for iter.Next() { |
|||
key := iter.Key() |
|||
if !bytes.HasPrefix(key, directoryPrefix) { |
|||
break |
|||
} |
|||
fileName := getNameFromKey(key) |
|||
if fileName == "" { |
|||
continue |
|||
} |
|||
batch.Delete(append(directoryPrefix, []byte(fileName)...)) |
|||
} |
|||
iter.Release() |
|||
|
|||
err = db.Write(batch, nil) |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("delete %s : %v", fullpath, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *LevelDB3Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { |
|||
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) |
|||
} |
|||
|
|||
func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { |
|||
|
|||
db, _, shortPath, err := store.findDB(dirPath, true) |
|||
if err != nil { |
|||
return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err) |
|||
} |
|||
|
|||
directoryPrefix := genDirectoryKeyPrefix(shortPath, prefix) |
|||
lastFileStart := directoryPrefix |
|||
if startFileName != "" { |
|||
lastFileStart = genDirectoryKeyPrefix(shortPath, startFileName) |
|||
} |
|||
|
|||
iter := db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) |
|||
for iter.Next() { |
|||
key := iter.Key() |
|||
if !bytes.HasPrefix(key, directoryPrefix) { |
|||
break |
|||
} |
|||
fileName := getNameFromKey(key) |
|||
if fileName == "" { |
|||
continue |
|||
} |
|||
if fileName == startFileName && !includeStartFile { |
|||
continue |
|||
} |
|||
lastFileName = fileName |
|||
limit-- |
|||
if limit < 0 { |
|||
break |
|||
} |
|||
entry := &filer.Entry{ |
|||
FullPath: weed_util.NewFullPath(string(dirPath), fileName), |
|||
} |
|||
|
|||
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
|
|||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { |
|||
err = decodeErr |
|||
glog.V(0).Infof("list %s : %v", entry.FullPath, err) |
|||
break |
|||
} |
|||
if !eachEntryFunc(entry) { |
|||
break |
|||
} |
|||
} |
|||
iter.Release() |
|||
|
|||
return lastFileName, err |
|||
} |
|||
|
|||
func genKey(dirPath, fileName string) (key []byte) { |
|||
key = hashToBytes(dirPath) |
|||
key = append(key, []byte(fileName)...) |
|||
return key |
|||
} |
|||
|
|||
func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { |
|||
keyPrefix = hashToBytes(string(fullpath)) |
|||
if len(startFileName) > 0 { |
|||
keyPrefix = append(keyPrefix, []byte(startFileName)...) |
|||
} |
|||
return keyPrefix |
|||
} |
|||
|
|||
func getNameFromKey(key []byte) string { |
|||
|
|||
return string(key[md5.Size:]) |
|||
|
|||
} |
|||
|
|||
// hash directory
|
|||
func hashToBytes(dir string) []byte { |
|||
h := md5.New() |
|||
io.WriteString(h, dir) |
|||
b := h.Sum(nil) |
|||
return b |
|||
} |
|||
|
|||
func (store *LevelDB3Store) Shutdown() { |
|||
for _, db := range store.dbs { |
|||
db.Close() |
|||
} |
|||
} |
@ -0,0 +1,46 @@ |
|||
package leveldb |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/syndtr/goleveldb/leveldb" |
|||
) |
|||
|
|||
func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { |
|||
|
|||
err = store.dbs[DEFAULT].Put(key, value, nil) |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("kv put: %v", err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) { |
|||
|
|||
value, err = store.dbs[DEFAULT].Get(key, nil) |
|||
|
|||
if err == leveldb.ErrNotFound { |
|||
return nil, filer.ErrKvNotFound |
|||
} |
|||
|
|||
if err != nil { |
|||
return nil, fmt.Errorf("kv get: %v", err) |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error) { |
|||
|
|||
err = store.dbs[DEFAULT].Delete(key, nil) |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("kv delete: %v", err) |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,88 @@ |
|||
package leveldb |
|||
|
|||
import ( |
|||
"context" |
|||
"io/ioutil" |
|||
"os" |
|||
"testing" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
func TestCreateAndFind(t *testing.T) { |
|||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) |
|||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") |
|||
defer os.RemoveAll(dir) |
|||
store := &LevelDB3Store{} |
|||
store.initialize(dir) |
|||
testFiler.SetStore(store) |
|||
|
|||
fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") |
|||
|
|||
ctx := context.Background() |
|||
|
|||
entry1 := &filer.Entry{ |
|||
FullPath: fullpath, |
|||
Attr: filer.Attr{ |
|||
Mode: 0440, |
|||
Uid: 1234, |
|||
Gid: 5678, |
|||
}, |
|||
} |
|||
|
|||
if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { |
|||
t.Errorf("create entry %v: %v", entry1.FullPath, err) |
|||
return |
|||
} |
|||
|
|||
entry, err := testFiler.FindEntry(ctx, fullpath) |
|||
|
|||
if err != nil { |
|||
t.Errorf("find entry: %v", err) |
|||
return |
|||
} |
|||
|
|||
if entry.FullPath != entry1.FullPath { |
|||
t.Errorf("find wrong entry: %v", entry.FullPath) |
|||
return |
|||
} |
|||
|
|||
// checking one upper directory
|
|||
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "") |
|||
if len(entries) != 1 { |
|||
t.Errorf("list entries count: %v", len(entries)) |
|||
return |
|||
} |
|||
|
|||
// checking one upper directory
|
|||
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") |
|||
if len(entries) != 1 { |
|||
t.Errorf("list entries count: %v", len(entries)) |
|||
return |
|||
} |
|||
|
|||
} |
|||
|
|||
func TestEmptyRoot(t *testing.T) { |
|||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) |
|||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") |
|||
defer os.RemoveAll(dir) |
|||
store := &LevelDB3Store{} |
|||
store.initialize(dir) |
|||
testFiler.SetStore(store) |
|||
|
|||
ctx := context.Background() |
|||
|
|||
// checking one upper directory
|
|||
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") |
|||
if err != nil { |
|||
t.Errorf("list entries: %v", err) |
|||
return |
|||
} |
|||
if len(entries) != 0 { |
|||
t.Errorf("list entries count: %v", len(entries)) |
|||
return |
|||
} |
|||
|
|||
} |
@ -0,0 +1,52 @@ |
|||
package mysql |
|||
|
|||
import ( |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" |
|||
_ "github.com/go-sql-driver/mysql" |
|||
) |
|||
|
|||
type SqlGenMysql struct { |
|||
CreateTableSqlTemplate string |
|||
DropTableSqlTemplate string |
|||
} |
|||
|
|||
var ( |
|||
_ = abstract_sql.SqlGenerator(&SqlGenMysql{}) |
|||
) |
|||
|
|||
func (gen *SqlGenMysql) GetSqlInsert(bucket string) string { |
|||
return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES(?,?,?,?)", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenMysql) GetSqlUpdate(bucket string) string { |
|||
return fmt.Sprintf("UPDATE %s SET meta=? WHERE dirhash=? AND name=? AND directory=?", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenMysql) GetSqlFind(bucket string) string { |
|||
return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenMysql) GetSqlDelete(bucket string) string { |
|||
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(bucket string) string { |
|||
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND directory=?", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenMysql) GetSqlListExclusive(bucket string) string { |
|||
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenMysql) GetSqlListInclusive(bucket string) string { |
|||
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenMysql) GetSqlCreateTable(bucket string) string { |
|||
return fmt.Sprintf(gen.CreateTableSqlTemplate, bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenMysql) GetSqlDropTable(bucket string) string { |
|||
return fmt.Sprintf(gen.DropTableSqlTemplate, bucket) |
|||
} |
@ -0,0 +1,82 @@ |
|||
package mysql2 |
|||
|
|||
import ( |
|||
"context" |
|||
"database/sql" |
|||
"fmt" |
|||
"time" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" |
|||
"github.com/chrislusf/seaweedfs/weed/filer/mysql" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
_ "github.com/go-sql-driver/mysql" |
|||
) |
|||
|
|||
const ( |
|||
CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" |
|||
) |
|||
|
|||
func init() { |
|||
filer.Stores = append(filer.Stores, &MysqlStore2{}) |
|||
} |
|||
|
|||
type MysqlStore2 struct { |
|||
abstract_sql.AbstractSqlStore |
|||
} |
|||
|
|||
func (store *MysqlStore2) GetName() string { |
|||
return "mysql2" |
|||
} |
|||
|
|||
func (store *MysqlStore2) Initialize(configuration util.Configuration, prefix string) (err error) { |
|||
return store.initialize( |
|||
configuration.GetString(prefix+"createTable"), |
|||
configuration.GetString(prefix+"username"), |
|||
configuration.GetString(prefix+"password"), |
|||
configuration.GetString(prefix+"hostname"), |
|||
configuration.GetInt(prefix+"port"), |
|||
configuration.GetString(prefix+"database"), |
|||
configuration.GetInt(prefix+"connection_max_idle"), |
|||
configuration.GetInt(prefix+"connection_max_open"), |
|||
configuration.GetInt(prefix+"connection_max_lifetime_seconds"), |
|||
configuration.GetBool(prefix+"interpolateParams"), |
|||
) |
|||
} |
|||
|
|||
func (store *MysqlStore2) initialize(createTable, user, password, hostname string, port int, database string, maxIdle, maxOpen, |
|||
maxLifetimeSeconds int, interpolateParams bool) (err error) { |
|||
|
|||
store.SupportBucketTable = true |
|||
store.SqlGenerator = &mysql.SqlGenMysql{ |
|||
CreateTableSqlTemplate: createTable, |
|||
DropTableSqlTemplate: "drop table %s", |
|||
} |
|||
|
|||
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) |
|||
if interpolateParams { |
|||
sqlUrl += "&interpolateParams=true" |
|||
} |
|||
|
|||
var dbErr error |
|||
store.DB, dbErr = sql.Open("mysql", sqlUrl) |
|||
if dbErr != nil { |
|||
store.DB.Close() |
|||
store.DB = nil |
|||
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err) |
|||
} |
|||
|
|||
store.DB.SetMaxIdleConns(maxIdle) |
|||
store.DB.SetMaxOpenConns(maxOpen) |
|||
store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) |
|||
|
|||
if err = store.DB.Ping(); err != nil { |
|||
return fmt.Errorf("connect to %s error:%v", sqlUrl, err) |
|||
} |
|||
|
|||
if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { |
|||
return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,53 @@ |
|||
package postgres |
|||
|
|||
import ( |
|||
"fmt" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" |
|||
_ "github.com/lib/pq" |
|||
) |
|||
|
|||
type SqlGenPostgres struct { |
|||
CreateTableSqlTemplate string |
|||
DropTableSqlTemplate string |
|||
} |
|||
|
|||
var ( |
|||
_ = abstract_sql.SqlGenerator(&SqlGenPostgres{}) |
|||
) |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlInsert(bucket string) string { |
|||
return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlUpdate(bucket string) string { |
|||
return fmt.Sprintf("UPDATE %s SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlFind(bucket string) string { |
|||
return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlDelete(bucket string) string { |
|||
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(bucket string) string { |
|||
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND directory=$2", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlListExclusive(bucket string) string { |
|||
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlListInclusive(bucket string) string { |
|||
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlCreateTable(bucket string) string { |
|||
return fmt.Sprintf(gen.CreateTableSqlTemplate, bucket) |
|||
} |
|||
|
|||
func (gen *SqlGenPostgres) GetSqlDropTable(bucket string) string { |
|||
return fmt.Sprintf(gen.DropTableSqlTemplate, bucket) |
|||
} |
@ -0,0 +1,87 @@ |
|||
package postgres2 |
|||
|
|||
import ( |
|||
"context" |
|||
"database/sql" |
|||
"fmt" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" |
|||
"github.com/chrislusf/seaweedfs/weed/filer/postgres" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
_ "github.com/lib/pq" |
|||
) |
|||
|
|||
const ( |
|||
CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30" |
|||
) |
|||
|
|||
func init() { |
|||
filer.Stores = append(filer.Stores, &PostgresStore2{}) |
|||
} |
|||
|
|||
type PostgresStore2 struct { |
|||
abstract_sql.AbstractSqlStore |
|||
} |
|||
|
|||
func (store *PostgresStore2) GetName() string { |
|||
return "postgres2" |
|||
} |
|||
|
|||
func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix string) (err error) { |
|||
return store.initialize( |
|||
configuration.GetString(prefix+"createTable"), |
|||
configuration.GetString(prefix+"username"), |
|||
configuration.GetString(prefix+"password"), |
|||
configuration.GetString(prefix+"hostname"), |
|||
configuration.GetInt(prefix+"port"), |
|||
configuration.GetString(prefix+"database"), |
|||
configuration.GetString(prefix+"schema"), |
|||
configuration.GetString(prefix+"sslmode"), |
|||
configuration.GetInt(prefix+"connection_max_idle"), |
|||
configuration.GetInt(prefix+"connection_max_open"), |
|||
) |
|||
} |
|||
|
|||
func (store *PostgresStore2) initialize(createTable, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) { |
|||
|
|||
store.SupportBucketTable = true |
|||
store.SqlGenerator = &postgres.SqlGenPostgres{ |
|||
CreateTableSqlTemplate: createTable, |
|||
DropTableSqlTemplate: "drop table %s", |
|||
} |
|||
|
|||
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) |
|||
if user != "" { |
|||
sqlUrl += " user=" + user |
|||
} |
|||
if password != "" { |
|||
sqlUrl += " password=" + password |
|||
} |
|||
if database != "" { |
|||
sqlUrl += " dbname=" + database |
|||
} |
|||
if schema != "" { |
|||
sqlUrl += " search_path=" + schema |
|||
} |
|||
var dbErr error |
|||
store.DB, dbErr = sql.Open("postgres", sqlUrl) |
|||
if dbErr != nil { |
|||
store.DB.Close() |
|||
store.DB = nil |
|||
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err) |
|||
} |
|||
|
|||
store.DB.SetMaxIdleConns(maxIdle) |
|||
store.DB.SetMaxOpenConns(maxOpen) |
|||
|
|||
if err = store.DB.Ping(); err != nil { |
|||
return fmt.Errorf("connect to %s error:%v", sqlUrl, err) |
|||
} |
|||
|
|||
if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { |
|||
return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,41 @@ |
|||
# Prepare the compilation environment on linux |
|||
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test |
|||
- sudo apt-get update -qq |
|||
- sudo apt-get install gcc-6 g++-6 libsnappy-dev zlib1g-dev libbz2-dev -qq |
|||
- export CXX="g++-6" CC="gcc-6" |
|||
|
|||
- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags2_2.0-1.1ubuntu1_amd64.deb |
|||
- sudo dpkg -i libgflags2_2.0-1.1ubuntu1_amd64.deb |
|||
- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags-dev_2.0-1.1ubuntu1_amd64.deb |
|||
- sudo dpkg -i libgflags-dev_2.0-1.1ubuntu1_amd64.deb |
|||
|
|||
# Prepare the compilation environment on mac os |
|||
``` |
|||
brew install snappy |
|||
``` |
|||
|
|||
# install rocksdb: |
|||
``` |
|||
export ROCKSDB_HOME=/Users/chris/dev/rocksdb |
|||
|
|||
git clone https://github.com/facebook/rocksdb.git $ROCKSDB_HOME |
|||
pushd $ROCKSDB_HOME |
|||
make clean |
|||
make install-static |
|||
popd |
|||
``` |
|||
|
|||
# install gorocksdb |
|||
|
|||
``` |
|||
export CGO_CFLAGS="-I$ROCKSDB_HOME/include" |
|||
export CGO_LDFLAGS="-L$ROCKSDB_HOME -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" |
|||
|
|||
go get github.com/tecbot/gorocksdb |
|||
``` |
|||
# compile with rocksdb |
|||
|
|||
``` |
|||
cd ~/go/src/github.com/chrislusf/seaweedfs/weed |
|||
go install -tags rocksdb |
|||
``` |
@ -0,0 +1,302 @@ |
|||
// +build rocksdb
|
|||
|
|||
package rocksdb |
|||
|
|||
import ( |
|||
"bytes" |
|||
"context" |
|||
"crypto/md5" |
|||
"fmt" |
|||
"io" |
|||
|
|||
"github.com/tecbot/gorocksdb" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
weed_util "github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
func init() { |
|||
filer.Stores = append(filer.Stores, &RocksDBStore{}) |
|||
} |
|||
|
|||
type options struct { |
|||
opt *gorocksdb.Options |
|||
ro *gorocksdb.ReadOptions |
|||
wo *gorocksdb.WriteOptions |
|||
} |
|||
|
|||
func (opt *options) init() { |
|||
opt.opt = gorocksdb.NewDefaultOptions() |
|||
opt.ro = gorocksdb.NewDefaultReadOptions() |
|||
opt.wo = gorocksdb.NewDefaultWriteOptions() |
|||
} |
|||
|
|||
func (opt *options) close() { |
|||
opt.opt.Destroy() |
|||
opt.ro.Destroy() |
|||
opt.wo.Destroy() |
|||
} |
|||
|
|||
type RocksDBStore struct { |
|||
path string |
|||
db *gorocksdb.DB |
|||
options |
|||
} |
|||
|
|||
func (store *RocksDBStore) GetName() string { |
|||
return "rocksdb" |
|||
} |
|||
|
|||
func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { |
|||
dir := configuration.GetString(prefix + "dir") |
|||
return store.initialize(dir) |
|||
} |
|||
|
|||
func (store *RocksDBStore) initialize(dir string) (err error) { |
|||
glog.Infof("filer store rocksdb dir: %s", dir) |
|||
if err := weed_util.TestFolderWritable(dir); err != nil { |
|||
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) |
|||
} |
|||
store.options.init() |
|||
store.opt.SetCreateIfMissing(true) |
|||
// reduce write amplification
|
|||
// also avoid expired data stored in highest level never get compacted
|
|||
store.opt.SetLevelCompactionDynamicLevelBytes(true) |
|||
store.opt.SetCompactionFilter(NewTTLFilter()) |
|||
// store.opt.SetMaxBackgroundCompactions(2)
|
|||
|
|||
store.db, err = gorocksdb.OpenDb(store.opt, dir) |
|||
|
|||
return |
|||
} |
|||
|
|||
func (store *RocksDBStore) BeginTransaction(ctx context.Context) (context.Context, error) { |
|||
return ctx, nil |
|||
} |
|||
func (store *RocksDBStore) CommitTransaction(ctx context.Context) error { |
|||
return nil |
|||
} |
|||
func (store *RocksDBStore) RollbackTransaction(ctx context.Context) error { |
|||
return nil |
|||
} |
|||
|
|||
func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { |
|||
dir, name := entry.DirAndName() |
|||
key := genKey(dir, name) |
|||
|
|||
value, err := entry.EncodeAttributesAndChunks() |
|||
if err != nil { |
|||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) |
|||
} |
|||
|
|||
err = store.db.Put(store.wo, key, value) |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err) |
|||
} |
|||
|
|||
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
|
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *RocksDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { |
|||
|
|||
return store.InsertEntry(ctx, entry) |
|||
} |
|||
|
|||
func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { |
|||
dir, name := fullpath.DirAndName() |
|||
key := genKey(dir, name) |
|||
data, err := store.db.Get(store.ro, key) |
|||
|
|||
if data == nil { |
|||
return nil, filer_pb.ErrNotFound |
|||
} |
|||
defer data.Free() |
|||
|
|||
if err != nil { |
|||
return nil, fmt.Errorf("get %s : %v", fullpath, err) |
|||
} |
|||
|
|||
entry = &filer.Entry{ |
|||
FullPath: fullpath, |
|||
} |
|||
err = entry.DecodeAttributesAndChunks(data.Data()) |
|||
if err != nil { |
|||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) |
|||
} |
|||
|
|||
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
|
|||
|
|||
return entry, nil |
|||
} |
|||
|
|||
func (store *RocksDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { |
|||
dir, name := fullpath.DirAndName() |
|||
key := genKey(dir, name) |
|||
|
|||
err = store.db.Delete(store.wo, key) |
|||
if err != nil { |
|||
return fmt.Errorf("delete %s : %v", fullpath, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { |
|||
directoryPrefix := genDirectoryKeyPrefix(fullpath, "") |
|||
|
|||
batch := gorocksdb.NewWriteBatch() |
|||
defer batch.Destroy() |
|||
|
|||
ro := gorocksdb.NewDefaultReadOptions() |
|||
defer ro.Destroy() |
|||
ro.SetFillCache(false) |
|||
|
|||
iter := store.db.NewIterator(ro) |
|||
defer iter.Close() |
|||
err = enumerate(iter, directoryPrefix, nil, false, -1, func(key, value []byte) bool { |
|||
batch.Delete(key) |
|||
return true |
|||
}) |
|||
if err != nil { |
|||
return fmt.Errorf("delete list %s : %v", fullpath, err) |
|||
} |
|||
|
|||
err = store.db.Write(store.wo, batch) |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("delete %s : %v", fullpath, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, fn func(key, value []byte) bool) (err error) { |
|||
|
|||
if len(lastKey) == 0 { |
|||
iter.Seek(prefix) |
|||
} else { |
|||
iter.Seek(lastKey) |
|||
if !includeLastKey { |
|||
if iter.Valid() { |
|||
if bytes.Equal(iter.Key().Data(), lastKey) { |
|||
iter.Next() |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
i := int64(0) |
|||
for ; iter.Valid(); iter.Next() { |
|||
|
|||
if limit > 0 { |
|||
i++ |
|||
if i > limit { |
|||
break |
|||
} |
|||
} |
|||
|
|||
key := iter.Key().Data() |
|||
|
|||
if !bytes.HasPrefix(key, prefix) { |
|||
break |
|||
} |
|||
|
|||
ret := fn(key, iter.Value().Data()) |
|||
|
|||
if !ret { |
|||
break |
|||
} |
|||
|
|||
} |
|||
|
|||
if err := iter.Err(); err != nil { |
|||
return fmt.Errorf("prefix scan iterator: %v", err) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (store *RocksDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { |
|||
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) |
|||
} |
|||
|
|||
func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { |
|||
|
|||
directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix) |
|||
lastFileStart := directoryPrefix |
|||
if startFileName != "" { |
|||
lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName) |
|||
} |
|||
|
|||
ro := gorocksdb.NewDefaultReadOptions() |
|||
defer ro.Destroy() |
|||
ro.SetFillCache(false) |
|||
|
|||
iter := store.db.NewIterator(ro) |
|||
defer iter.Close() |
|||
err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, func(key, value []byte) bool { |
|||
fileName := getNameFromKey(key) |
|||
if fileName == "" { |
|||
return true |
|||
} |
|||
entry := &filer.Entry{ |
|||
FullPath: weed_util.NewFullPath(string(dirPath), fileName), |
|||
} |
|||
lastFileName = fileName |
|||
|
|||
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
|
|||
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil { |
|||
err = decodeErr |
|||
glog.V(0).Infof("list %s : %v", entry.FullPath, err) |
|||
return false |
|||
} |
|||
if !eachEntryFunc(entry) { |
|||
return false |
|||
} |
|||
return true |
|||
}) |
|||
if err != nil { |
|||
return lastFileName, fmt.Errorf("prefix list %s : %v", dirPath, err) |
|||
} |
|||
|
|||
return lastFileName, err |
|||
} |
|||
|
|||
func genKey(dirPath, fileName string) (key []byte) { |
|||
key = hashToBytes(dirPath) |
|||
key = append(key, []byte(fileName)...) |
|||
return key |
|||
} |
|||
|
|||
func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { |
|||
keyPrefix = hashToBytes(string(fullpath)) |
|||
if len(startFileName) > 0 { |
|||
keyPrefix = append(keyPrefix, []byte(startFileName)...) |
|||
} |
|||
return keyPrefix |
|||
} |
|||
|
|||
func getNameFromKey(key []byte) string { |
|||
|
|||
return string(key[md5.Size:]) |
|||
|
|||
} |
|||
|
|||
// hash directory, and use last byte for partitioning
|
|||
func hashToBytes(dir string) []byte { |
|||
h := md5.New() |
|||
io.WriteString(h, dir) |
|||
|
|||
b := h.Sum(nil) |
|||
|
|||
return b |
|||
} |
|||
|
|||
func (store *RocksDBStore) Shutdown() { |
|||
store.db.Close() |
|||
store.options.close() |
|||
} |
@ -0,0 +1,47 @@ |
|||
// +build rocksdb
|
|||
|
|||
package rocksdb |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
) |
|||
|
|||
func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { |
|||
|
|||
err = store.db.Put(store.wo, key, value) |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("kv put: %v", err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { |
|||
|
|||
value, err = store.db.GetBytes(store.ro, key) |
|||
|
|||
if value == nil { |
|||
return nil, filer.ErrKvNotFound |
|||
} |
|||
|
|||
if err != nil { |
|||
return nil, fmt.Errorf("kv get: %v", err) |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error) { |
|||
|
|||
err = store.db.Delete(store.wo, key) |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("kv delete: %v", err) |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,117 @@ |
|||
// +build rocksdb
|
|||
|
|||
package rocksdb |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"os" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
func TestCreateAndFind(t *testing.T) { |
|||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) |
|||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") |
|||
defer os.RemoveAll(dir) |
|||
store := &RocksDBStore{} |
|||
store.initialize(dir) |
|||
testFiler.SetStore(store) |
|||
|
|||
fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") |
|||
|
|||
ctx := context.Background() |
|||
|
|||
entry1 := &filer.Entry{ |
|||
FullPath: fullpath, |
|||
Attr: filer.Attr{ |
|||
Mode: 0440, |
|||
Uid: 1234, |
|||
Gid: 5678, |
|||
}, |
|||
} |
|||
|
|||
if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { |
|||
t.Errorf("create entry %v: %v", entry1.FullPath, err) |
|||
return |
|||
} |
|||
|
|||
entry, err := testFiler.FindEntry(ctx, fullpath) |
|||
|
|||
if err != nil { |
|||
t.Errorf("find entry: %v", err) |
|||
return |
|||
} |
|||
|
|||
if entry.FullPath != entry1.FullPath { |
|||
t.Errorf("find wrong entry: %v", entry.FullPath) |
|||
return |
|||
} |
|||
|
|||
// checking one upper directory
|
|||
entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "") |
|||
if len(entries) != 1 { |
|||
t.Errorf("list entries count: %v", len(entries)) |
|||
return |
|||
} |
|||
|
|||
// checking one upper directory
|
|||
entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") |
|||
if len(entries) != 1 { |
|||
t.Errorf("list entries count: %v", len(entries)) |
|||
return |
|||
} |
|||
|
|||
} |
|||
|
|||
func TestEmptyRoot(t *testing.T) { |
|||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) |
|||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") |
|||
defer os.RemoveAll(dir) |
|||
store := &RocksDBStore{} |
|||
store.initialize(dir) |
|||
testFiler.SetStore(store) |
|||
|
|||
ctx := context.Background() |
|||
|
|||
// checking one upper directory
|
|||
entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") |
|||
if err != nil { |
|||
t.Errorf("list entries: %v", err) |
|||
return |
|||
} |
|||
if len(entries) != 0 { |
|||
t.Errorf("list entries count: %v", len(entries)) |
|||
return |
|||
} |
|||
|
|||
} |
|||
|
|||
func BenchmarkInsertEntry(b *testing.B) { |
|||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) |
|||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") |
|||
defer os.RemoveAll(dir) |
|||
store := &RocksDBStore{} |
|||
store.initialize(dir) |
|||
testFiler.SetStore(store) |
|||
|
|||
ctx := context.Background() |
|||
|
|||
b.ReportAllocs() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
entry := &filer.Entry{ |
|||
FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)), |
|||
Attr: filer.Attr{ |
|||
Crtime: time.Now(), |
|||
Mtime: time.Now(), |
|||
Mode: os.FileMode(0644), |
|||
}, |
|||
} |
|||
store.InsertEntry(ctx, entry) |
|||
} |
|||
} |
@ -0,0 +1,40 @@ |
|||
//+build rocksdb
|
|||
|
|||
package rocksdb |
|||
|
|||
import ( |
|||
"time" |
|||
|
|||
"github.com/tecbot/gorocksdb" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
) |
|||
|
|||
type TTLFilter struct { |
|||
skipLevel0 bool |
|||
} |
|||
|
|||
func NewTTLFilter() gorocksdb.CompactionFilter { |
|||
return &TTLFilter{ |
|||
skipLevel0: true, |
|||
} |
|||
} |
|||
|
|||
func (t *TTLFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) { |
|||
// decode could be slow, causing write stall
|
|||
// level >0 sst can run compaction in parallel
|
|||
if !t.skipLevel0 || level > 0 { |
|||
entry := filer.Entry{} |
|||
if err := entry.DecodeAttributesAndChunks(val); err == nil { |
|||
if entry.TtlSec > 0 && |
|||
entry.Crtime.Add(time.Duration(entry.TtlSec)*time.Second).Before(time.Now()) { |
|||
return true, nil |
|||
} |
|||
} |
|||
} |
|||
return false, val |
|||
} |
|||
|
|||
func (t *TTLFilter) Name() string { |
|||
return "TTLFilter" |
|||
} |
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue