Browse Source
Tarantool filer store (#6669)
Tarantool filer store (#6669)
Co-authored-by: Marat Karimov <m.karimov@digitalms.ru>pull/6677/head
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 896 additions and 14 deletions
-
4.github/workflows/binaries_release4.yml
-
2.github/workflows/container_release4.yml
-
2.github/workflows/container_release5.yml
-
4.github/workflows/go.yml
-
4Makefile
-
17docker/Dockerfile.tarantool.dev_env
-
10docker/Makefile
-
30docker/compose/test-tarantool-filer.yml
-
14docker/tarantool/app-scm-1.rockspec
-
145docker/tarantool/config.yaml
-
7docker/tarantool/instances.yaml
-
77docker/tarantool/router.lua
-
97docker/tarantool/storage.lua
-
4go.mod
-
8go.sum
-
1weed/command/imports.go
-
7weed/command/scaffold/filer.toml
-
4weed/command/update_full.go
-
17weed/filer/store_test/test_suite.go
-
7weed/filer/tarantool/doc.go
-
11weed/filer/tarantool/readme.md
-
318weed/filer/tarantool/tarantool_store.go
-
95weed/filer/tarantool/tarantool_store_kv.go
-
24weed/filer/tarantool/tarantool_store_test.go
-
1weed/server/filer_server.go
@ -0,0 +1,17 @@ |
|||
FROM tarantool/tarantool:3.3.1 AS builder |
|||
|
|||
# install dependencies |
|||
RUN apt update && \ |
|||
apt install -y git unzip cmake tt=2.7.0 |
|||
|
|||
# init tt dir structure, create dir for app, create symlink |
|||
RUN tt init && \ |
|||
mkdir app && \ |
|||
ln -sfn ${PWD}/app/ ${PWD}/instances.enabled/app |
|||
|
|||
# copy cluster configs |
|||
COPY tarantool /opt/tarantool/app |
|||
|
|||
# build app |
|||
RUN tt build app |
|||
|
@ -0,0 +1,30 @@ |
|||
version: '3.9' |
|||
|
|||
services: |
|||
tarantool: |
|||
image: chrislusf/tarantool_dev_env |
|||
entrypoint: "tt start app -i" |
|||
environment: |
|||
APP_USER_PASSWORD: "app" |
|||
CLIENT_USER_PASSWORD: "client" |
|||
REPLICATOR_USER_PASSWORD: "replicator" |
|||
STORAGE_USER_PASSWORD: "storage" |
|||
network_mode: "host" |
|||
ports: |
|||
- "3303:3303" |
|||
|
|||
s3: |
|||
image: chrislusf/seaweedfs:local |
|||
command: "server -ip=127.0.0.1 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false" |
|||
volumes: |
|||
- ./s3.json:/etc/seaweedfs/s3.json |
|||
environment: |
|||
WEED_LEVELDB2_ENABLED: "false" |
|||
WEED_TARANTOOL_ENABLED: "true" |
|||
WEED_TARANTOOL_ADDRESS: "127.0.0.1:3303" |
|||
WEED_TARANTOOL_USER: "client" |
|||
WEED_TARANTOOL_PASSWORD: "client" |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 |
|||
network_mode: "host" |
|||
depends_on: |
|||
- tarantool |
@ -0,0 +1,14 @@ |
|||
package = 'app' |
|||
version = 'scm-1' |
|||
source = { |
|||
url = '/dev/null', |
|||
} |
|||
dependencies = { |
|||
'crud == 1.5.2-1', |
|||
'expirationd == 1.6.0-1', |
|||
'metrics-export-role == 0.3.0-1', |
|||
'vshard == 0.1.32-1' |
|||
} |
|||
build = { |
|||
type = 'none'; |
|||
} |
@ -0,0 +1,145 @@ |
|||
config: |
|||
context: |
|||
app_user_password: |
|||
from: env |
|||
env: APP_USER_PASSWORD |
|||
client_user_password: |
|||
from: env |
|||
env: CLIENT_USER_PASSWORD |
|||
replicator_user_password: |
|||
from: env |
|||
env: REPLICATOR_USER_PASSWORD |
|||
storage_user_password: |
|||
from: env |
|||
env: STORAGE_USER_PASSWORD |
|||
|
|||
credentials: |
|||
roles: |
|||
crud-role: |
|||
privileges: |
|||
- permissions: [ "execute" ] |
|||
lua_call: [ "crud.delete", "crud.get", "crud.upsert" ] |
|||
users: |
|||
app: |
|||
password: '{{ context.app_user_password }}' |
|||
roles: [ public, crud-role ] |
|||
client: |
|||
password: '{{ context.client_user_password }}' |
|||
roles: [ super ] |
|||
replicator: |
|||
password: '{{ context.replicator_user_password }}' |
|||
roles: [ replication ] |
|||
storage: |
|||
password: '{{ context.storage_user_password }}' |
|||
roles: [ sharding ] |
|||
|
|||
iproto: |
|||
advertise: |
|||
peer: |
|||
login: replicator |
|||
sharding: |
|||
login: storage |
|||
|
|||
sharding: |
|||
bucket_count: 10000 |
|||
|
|||
metrics: |
|||
include: [ all ] |
|||
exclude: [ vinyl ] |
|||
labels: |
|||
alias: '{{ instance_name }}' |
|||
|
|||
|
|||
groups: |
|||
storages: |
|||
roles: |
|||
- roles.crud-storage |
|||
- roles.expirationd |
|||
- roles.metrics-export |
|||
roles_cfg: |
|||
roles.expirationd: |
|||
cfg: |
|||
metrics: true |
|||
filer_metadata_task: |
|||
space: filer_metadata |
|||
is_expired: filer_metadata.is_expired |
|||
options: |
|||
atomic_iteration: true |
|||
force: true |
|||
index: 'expire_at_idx' |
|||
iterator_type: GT |
|||
start_key: |
|||
- 0 |
|||
tuples_per_iteration: 10000 |
|||
app: |
|||
module: storage |
|||
sharding: |
|||
roles: [ storage ] |
|||
replication: |
|||
failover: election |
|||
database: |
|||
use_mvcc_engine: true |
|||
replicasets: |
|||
storage-001: |
|||
instances: |
|||
storage-001-a: |
|||
roles_cfg: |
|||
roles.metrics-export: |
|||
http: |
|||
- listen: '0.0.0.0:8081' |
|||
endpoints: |
|||
- path: /metrics/prometheus/ |
|||
format: prometheus |
|||
- path: /metrics/json |
|||
format: json |
|||
iproto: |
|||
listen: |
|||
- uri: 127.0.0.1:3301 |
|||
advertise: |
|||
client: 127.0.0.1:3301 |
|||
storage-001-b: |
|||
roles_cfg: |
|||
roles.metrics-export: |
|||
http: |
|||
- listen: '0.0.0.0:8082' |
|||
endpoints: |
|||
- path: /metrics/prometheus/ |
|||
format: prometheus |
|||
- path: /metrics/json |
|||
format: json |
|||
iproto: |
|||
listen: |
|||
- uri: 127.0.0.1:3302 |
|||
advertise: |
|||
client: 127.0.0.1:3302 |
|||
routers: |
|||
roles: |
|||
- roles.crud-router |
|||
- roles.metrics-export |
|||
roles_cfg: |
|||
roles.crud-router: |
|||
stats: true |
|||
stats_driver: metrics |
|||
stats_quantiles: true |
|||
app: |
|||
module: router |
|||
sharding: |
|||
roles: [ router ] |
|||
replicasets: |
|||
router-001: |
|||
instances: |
|||
router-001-a: |
|||
roles_cfg: |
|||
roles.metrics-export: |
|||
http: |
|||
- listen: '0.0.0.0:8083' |
|||
endpoints: |
|||
- path: /metrics/prometheus/ |
|||
format: prometheus |
|||
- path: /metrics/json |
|||
format: json |
|||
iproto: |
|||
listen: |
|||
- uri: 127.0.0.1:3303 |
|||
advertise: |
|||
client: 127.0.0.1:3303 |
@ -0,0 +1,7 @@ |
|||
--- |
|||
storage-001-a: |
|||
|
|||
storage-001-b: |
|||
|
|||
router-001-a: |
|||
|
@ -0,0 +1,77 @@ |
|||
local vshard = require('vshard') |
|||
local log = require('log') |
|||
|
|||
-- Bootstrap the vshard router. |
|||
while true do |
|||
local ok, err = vshard.router.bootstrap({ |
|||
if_not_bootstrapped = true, |
|||
}) |
|||
if ok then |
|||
break |
|||
end |
|||
log.info(('Router bootstrap error: %s'):format(err)) |
|||
end |
|||
|
|||
-- functions for filer_metadata space |
|||
local filer_metadata = { |
|||
delete_by_directory_idx = function(directory) |
|||
-- find all storages |
|||
local storages = require('vshard').router.routeall() |
|||
-- on each storage |
|||
for _, storage in pairs(storages) do |
|||
-- call local function |
|||
local result, err = storage:callrw('filer_metadata.delete_by_directory_idx', { directory }) |
|||
-- check for error |
|||
if err then |
|||
error("Failed to call function on storage: " .. tostring(err)) |
|||
end |
|||
end |
|||
-- return |
|||
return true |
|||
end, |
|||
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit) |
|||
-- init results |
|||
local results = {} |
|||
-- find all storages |
|||
local storages = require('vshard').router.routeall() |
|||
-- on each storage |
|||
for _, storage in pairs(storages) do |
|||
-- call local function |
|||
local result, err = storage:callro('filer_metadata.find_by_directory_idx_and_name', { |
|||
dirPath, |
|||
startFileName, |
|||
includeStartFile, |
|||
limit |
|||
}) |
|||
-- check for error |
|||
if err then |
|||
error("Failed to call function on storage: " .. tostring(err)) |
|||
end |
|||
-- add to results |
|||
for _, tuple in ipairs(result) do |
|||
table.insert(results, tuple) |
|||
end |
|||
end |
|||
-- sort |
|||
table.sort(results, function(a, b) return a[3] < b[3] end) |
|||
-- apply limit |
|||
if #results > limit then |
|||
local limitedResults = {} |
|||
for i = 1, limit do |
|||
table.insert(limitedResults, results[i]) |
|||
end |
|||
results = limitedResults |
|||
end |
|||
-- return |
|||
return results |
|||
end, |
|||
} |
|||
|
|||
rawset(_G, 'filer_metadata', filer_metadata) |
|||
|
|||
-- register functions for filer_metadata space, set grants |
|||
for name, _ in pairs(filer_metadata) do |
|||
box.schema.func.create('filer_metadata.' .. name, { if_not_exists = true }) |
|||
box.schema.user.grant('app', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true }) |
|||
box.schema.user.grant('client', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true }) |
|||
end |
@ -0,0 +1,97 @@ |
|||
box.watch('box.status', function() |
|||
if box.info.ro then |
|||
return |
|||
end |
|||
|
|||
-- ==================================== |
|||
-- key_value space |
|||
-- ==================================== |
|||
box.schema.create_space('key_value', { |
|||
format = { |
|||
{ name = 'key', type = 'string' }, |
|||
{ name = 'bucket_id', type = 'unsigned' }, |
|||
{ name = 'value', type = 'string' } |
|||
}, |
|||
if_not_exists = true |
|||
}) |
|||
|
|||
-- create key_value space indexes |
|||
box.space.key_value:create_index('id', {type = 'tree', parts = { 'key' }, unique = true, if_not_exists = true}) |
|||
box.space.key_value:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true }) |
|||
|
|||
-- ==================================== |
|||
-- filer_metadata space |
|||
-- ==================================== |
|||
box.schema.create_space('filer_metadata', { |
|||
format = { |
|||
{ name = 'directory', type = 'string' }, |
|||
{ name = 'bucket_id', type = 'unsigned' }, |
|||
{ name = 'name', type = 'string' }, |
|||
{ name = 'expire_at', type = 'unsigned' }, |
|||
{ name = 'data', type = 'string' } |
|||
}, |
|||
if_not_exists = true |
|||
}) |
|||
|
|||
-- create filer_metadata space indexes |
|||
box.space.filer_metadata:create_index('id', {type = 'tree', parts = { 'directory', 'name' }, unique = true, if_not_exists = true}) |
|||
box.space.filer_metadata:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true }) |
|||
box.space.filer_metadata:create_index('directory_idx', { type = 'tree', parts = { 'directory' }, unique = false, if_not_exists = true }) |
|||
box.space.filer_metadata:create_index('name_idx', { type = 'tree', parts = { 'name' }, unique = false, if_not_exists = true }) |
|||
box.space.filer_metadata:create_index('expire_at_idx', { type = 'tree', parts = { 'expire_at' }, unique = false, if_not_exists = true}) |
|||
end) |
|||
|
|||
-- functions for filer_metadata space |
|||
local filer_metadata = { |
|||
delete_by_directory_idx = function(directory) |
|||
local space = box.space.filer_metadata |
|||
local index = space.index.directory_idx |
|||
-- for each finded directories |
|||
for _, tuple in index:pairs({ directory }, { iterator = 'EQ' }) do |
|||
space:delete({ tuple[1], tuple[3] }) |
|||
end |
|||
return true |
|||
end, |
|||
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit) |
|||
local space = box.space.filer_metadata |
|||
local directory_idx = space.index.directory_idx |
|||
-- choose filter name function |
|||
local filter_filename_func |
|||
if includeStartFile then |
|||
filter_filename_func = function(value) return value >= startFileName end |
|||
else |
|||
filter_filename_func = function(value) return value > startFileName end |
|||
end |
|||
-- init results |
|||
local results = {} |
|||
-- for each finded directories |
|||
for _, tuple in directory_idx:pairs({ dirPath }, { iterator = 'EQ' }) do |
|||
-- filter by name |
|||
if filter_filename_func(tuple[3]) then |
|||
table.insert(results, tuple) |
|||
end |
|||
end |
|||
-- sort |
|||
table.sort(results, function(a, b) return a[3] < b[3] end) |
|||
-- apply limit |
|||
if #results > limit then |
|||
local limitedResults = {} |
|||
for i = 1, limit do |
|||
table.insert(limitedResults, results[i]) |
|||
end |
|||
results = limitedResults |
|||
end |
|||
-- return |
|||
return results |
|||
end, |
|||
is_expired = function(args, tuple) |
|||
return (tuple[4] > 0) and (require('fiber').time() > tuple[4]) |
|||
end |
|||
} |
|||
|
|||
-- register functions for filer_metadata space, set grants |
|||
rawset(_G, 'filer_metadata', filer_metadata) |
|||
for name, _ in pairs(filer_metadata) do |
|||
box.schema.func.create('filer_metadata.' .. name, { setuid = true, if_not_exists = true }) |
|||
box.schema.user.grant('storage', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true }) |
|||
end |
@ -0,0 +1,7 @@ |
|||
/* |
|||
Package tarantool is for Tarantool filer store. |
|||
|
|||
The referenced "github.com/tarantool/go-tarantool/v2" library is too big when compiled. |
|||
So this is only compiled in "make full_install". |
|||
*/ |
|||
package tarantool |
@ -0,0 +1,11 @@ |
|||
## Tarantool |
|||
|
|||
database: https://www.tarantool.io/ |
|||
|
|||
go driver: https://github.com/tarantool/go-tarantool/ |
|||
|
|||
To set up local env: |
|||
`make -C docker test_tarantool` |
|||
|
|||
Run tests: |
|||
`RUN_TARANTOOL_TESTS=1 go test -tags=tarantool ./weed/filer/tarantool` |
@ -0,0 +1,318 @@ |
|||
//go:build tarantool
|
|||
// +build tarantool
|
|||
|
|||
package tarantool |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"reflect" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/filer" |
|||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/util" |
|||
weed_util "github.com/seaweedfs/seaweedfs/weed/util" |
|||
"github.com/tarantool/go-tarantool/v2" |
|||
"github.com/tarantool/go-tarantool/v2/crud" |
|||
"github.com/tarantool/go-tarantool/v2/pool" |
|||
) |
|||
|
|||
const ( |
|||
tarantoolSpaceName = "filer_metadata" |
|||
) |
|||
|
|||
func init() { |
|||
filer.Stores = append(filer.Stores, &TarantoolStore{}) |
|||
} |
|||
|
|||
type TarantoolStore struct { |
|||
pool *pool.ConnectionPool |
|||
} |
|||
|
|||
func (store *TarantoolStore) GetName() string { |
|||
return "tarantool" |
|||
} |
|||
|
|||
func (store *TarantoolStore) Initialize(configuration weed_util.Configuration, prefix string) error { |
|||
|
|||
configuration.SetDefault(prefix+"address", "localhost:3301") |
|||
configuration.SetDefault(prefix+"user", "guest") |
|||
configuration.SetDefault(prefix+"password", "") |
|||
configuration.SetDefault(prefix+"timeout", "5s") |
|||
configuration.SetDefault(prefix+"maxReconnects", "1000") |
|||
|
|||
address := configuration.GetString(prefix + "address") |
|||
user := configuration.GetString(prefix + "user") |
|||
password := configuration.GetString(prefix + "password") |
|||
|
|||
timeoutStr := configuration.GetString(prefix + "timeout") |
|||
timeout, err := time.ParseDuration(timeoutStr) |
|||
if err != nil { |
|||
return fmt.Errorf("parse tarantool store timeout: %v", err) |
|||
} |
|||
|
|||
maxReconnects := configuration.GetInt(prefix + "maxReconnects") |
|||
if maxReconnects < 0 { |
|||
return fmt.Errorf("maxReconnects is negative") |
|||
} |
|||
|
|||
addresses := strings.Split(address, ",") |
|||
|
|||
return store.initialize(addresses, user, password, timeout, uint(maxReconnects)) |
|||
} |
|||
|
|||
func (store *TarantoolStore) initialize(addresses []string, user string, password string, timeout time.Duration, maxReconnects uint) error { |
|||
|
|||
opts := tarantool.Opts{ |
|||
Timeout: timeout, |
|||
Reconnect: time.Second, |
|||
MaxReconnects: maxReconnects, |
|||
} |
|||
|
|||
poolInstances := makePoolInstances(addresses, user, password, opts) |
|||
poolOpts := pool.Opts{ |
|||
CheckTimeout: time.Second, |
|||
} |
|||
|
|||
ctx := context.Background() |
|||
p, err := pool.ConnectWithOpts(ctx, poolInstances, poolOpts) |
|||
if err != nil { |
|||
return fmt.Errorf("Can't create connection pool: %v", err) |
|||
} |
|||
|
|||
_, err = p.Do(tarantool.NewPingRequest(), pool.ANY).Get() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
store.pool = p |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func makePoolInstances(addresses []string, user string, password string, opts tarantool.Opts) []pool.Instance { |
|||
poolInstances := make([]pool.Instance, 0, len(addresses)) |
|||
for i, address := range addresses { |
|||
poolInstances = append(poolInstances, makePoolInstance(address, user, password, opts, i)) |
|||
} |
|||
return poolInstances |
|||
} |
|||
|
|||
func makePoolInstance(address string, user string, password string, opts tarantool.Opts, serial int) pool.Instance { |
|||
return pool.Instance{ |
|||
Name: fmt.Sprintf("instance%d", serial), |
|||
Dialer: tarantool.NetDialer{ |
|||
Address: address, |
|||
User: user, |
|||
Password: password, |
|||
}, |
|||
Opts: opts, |
|||
} |
|||
} |
|||
|
|||
func (store *TarantoolStore) BeginTransaction(ctx context.Context) (context.Context, error) { |
|||
return ctx, nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) CommitTransaction(ctx context.Context) error { |
|||
return nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) RollbackTransaction(ctx context.Context) error { |
|||
return nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { |
|||
dir, name := entry.FullPath.DirAndName() |
|||
meta, err := entry.EncodeAttributesAndChunks() |
|||
if err != nil { |
|||
return fmt.Errorf("encode %s: %s", entry.FullPath, err) |
|||
} |
|||
|
|||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { |
|||
meta = util.MaybeGzipData(meta) |
|||
} |
|||
|
|||
var ttl int64 |
|||
if entry.TtlSec > 0 { |
|||
ttl = time.Now().Unix() + int64(entry.TtlSec) |
|||
} else { |
|||
ttl = 0 |
|||
} |
|||
|
|||
var operations = []crud.Operation{ |
|||
{ |
|||
Operator: crud.Insert, |
|||
Field: "data", |
|||
Value: string(meta), |
|||
}, |
|||
} |
|||
|
|||
req := crud.MakeUpsertRequest(tarantoolSpaceName). |
|||
Tuple([]interface{}{dir, nil, name, ttl, string(meta)}). |
|||
Operations(operations) |
|||
|
|||
ret := crud.Result{} |
|||
|
|||
if err := store.pool.Do(req, pool.RW).GetTyped(&ret); err != nil { |
|||
return fmt.Errorf("insert %s: %s", entry.FullPath, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { |
|||
return store.InsertEntry(ctx, entry) |
|||
} |
|||
|
|||
func (store *TarantoolStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { |
|||
dir, name := fullpath.DirAndName() |
|||
|
|||
findEntryGetOpts := crud.GetOpts{ |
|||
Fields: crud.MakeOptTuple([]interface{}{"data"}), |
|||
Mode: crud.MakeOptString("read"), |
|||
PreferReplica: crud.MakeOptBool(true), |
|||
Balance: crud.MakeOptBool(true), |
|||
} |
|||
|
|||
req := crud.MakeGetRequest(tarantoolSpaceName). |
|||
Key(crud.Tuple([]interface{}{dir, name})). |
|||
Opts(findEntryGetOpts) |
|||
|
|||
resp := crud.Result{} |
|||
|
|||
err = store.pool.Do(req, pool.PreferRO).GetTyped(&resp) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
results, ok := resp.Rows.([]interface{}) |
|||
if !ok || len(results) != 1 { |
|||
return nil, filer_pb.ErrNotFound |
|||
} |
|||
|
|||
rows, ok := results[0].([]interface{}) |
|||
if !ok || len(rows) != 1 { |
|||
return nil, filer_pb.ErrNotFound |
|||
} |
|||
|
|||
row, ok := rows[0].(string) |
|||
if !ok { |
|||
return nil, fmt.Errorf("Can't convert rows[0] field to string. Actual type: %v, value: %v", reflect.TypeOf(rows[0]), rows[0]) |
|||
} |
|||
|
|||
entry = &filer.Entry{ |
|||
FullPath: fullpath, |
|||
} |
|||
|
|||
err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData([]byte(row))) |
|||
if err != nil { |
|||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) |
|||
} |
|||
|
|||
return entry, nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { |
|||
dir, name := fullpath.DirAndName() |
|||
|
|||
delOpts := crud.DeleteOpts{ |
|||
Noreturn: crud.MakeOptBool(true), |
|||
} |
|||
|
|||
req := crud.MakeDeleteRequest(tarantoolSpaceName). |
|||
Key(crud.Tuple([]interface{}{dir, name})). |
|||
Opts(delOpts) |
|||
|
|||
if _, err := store.pool.Do(req, pool.RW).Get(); err != nil { |
|||
return fmt.Errorf("delete %s : %v", fullpath, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { |
|||
req := tarantool.NewCallRequest("filer_metadata.delete_by_directory_idx"). |
|||
Args([]interface{}{fullpath}) |
|||
|
|||
if _, err := store.pool.Do(req, pool.RW).Get(); err != nil { |
|||
return fmt.Errorf("delete %s : %v", fullpath, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { |
|||
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed |
|||
} |
|||
|
|||
func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { |
|||
|
|||
req := tarantool.NewCallRequest("filer_metadata.find_by_directory_idx_and_name"). |
|||
Args([]interface{}{string(dirPath), startFileName, includeStartFile, limit}) |
|||
|
|||
results, err := store.pool.Do(req, pool.PreferRO).Get() |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
if len(results) < 1 { |
|||
glog.Errorf("Can't find results, data is empty") |
|||
return |
|||
} |
|||
|
|||
rows, ok := results[0].([]interface{}) |
|||
if !ok { |
|||
glog.Errorf("Can't convert results[0] to list") |
|||
return |
|||
} |
|||
|
|||
for _, result := range rows { |
|||
row, ok := result.([]interface{}) |
|||
if !ok { |
|||
glog.Errorf("Can't convert result to list") |
|||
return |
|||
} |
|||
|
|||
if len(row) < 5 { |
|||
glog.Errorf("Length of result is less than needed: %v", len(row)) |
|||
return |
|||
} |
|||
|
|||
nameRaw := row[2] |
|||
name, ok := nameRaw.(string) |
|||
if !ok { |
|||
glog.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw) |
|||
return |
|||
} |
|||
|
|||
dataRaw := row[4] |
|||
data, ok := dataRaw.(string) |
|||
if !ok { |
|||
glog.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw) |
|||
return |
|||
} |
|||
|
|||
entry := &filer.Entry{ |
|||
FullPath: util.NewFullPath(string(dirPath), name), |
|||
} |
|||
lastFileName = name |
|||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil { |
|||
err = decodeErr |
|||
glog.V(0).Infof("list %s : %v", entry.FullPath, err) |
|||
break |
|||
} |
|||
if !eachEntryFunc(entry) { |
|||
break |
|||
} |
|||
} |
|||
|
|||
return lastFileName, err |
|||
} |
|||
|
|||
func (store *TarantoolStore) Shutdown() { |
|||
store.pool.Close() |
|||
} |
@ -0,0 +1,95 @@ |
|||
//go:build tarantool
|
|||
// +build tarantool
|
|||
|
|||
package tarantool |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"reflect" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/filer" |
|||
"github.com/tarantool/go-tarantool/v2/crud" |
|||
"github.com/tarantool/go-tarantool/v2/pool" |
|||
) |
|||
|
|||
const ( |
|||
tarantoolKVSpaceName = "key_value" |
|||
) |
|||
|
|||
func (store *TarantoolStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { |
|||
|
|||
var operations = []crud.Operation{ |
|||
{ |
|||
Operator: crud.Insert, |
|||
Field: "value", |
|||
Value: string(value), |
|||
}, |
|||
} |
|||
|
|||
req := crud.MakeUpsertRequest(tarantoolKVSpaceName). |
|||
Tuple([]interface{}{string(key), nil, string(value)}). |
|||
Operations(operations) |
|||
|
|||
ret := crud.Result{} |
|||
if err := store.pool.Do(req, pool.RW).GetTyped(&ret); err != nil { |
|||
return fmt.Errorf("kv put: %v", err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { |
|||
|
|||
getOpts := crud.GetOpts{ |
|||
Fields: crud.MakeOptTuple([]interface{}{"value"}), |
|||
Mode: crud.MakeOptString("read"), |
|||
PreferReplica: crud.MakeOptBool(true), |
|||
Balance: crud.MakeOptBool(true), |
|||
} |
|||
|
|||
req := crud.MakeGetRequest(tarantoolKVSpaceName). |
|||
Key(crud.Tuple([]interface{}{string(key)})). |
|||
Opts(getOpts) |
|||
|
|||
resp := crud.Result{} |
|||
|
|||
err = store.pool.Do(req, pool.PreferRO).GetTyped(&resp) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
results, ok := resp.Rows.([]interface{}) |
|||
if !ok || len(results) != 1 { |
|||
return nil, filer.ErrKvNotFound |
|||
} |
|||
|
|||
rows, ok := results[0].([]interface{}) |
|||
if !ok || len(rows) != 1 { |
|||
return nil, filer.ErrKvNotFound |
|||
} |
|||
|
|||
row, ok := rows[0].(string) |
|||
if !ok { |
|||
return nil, fmt.Errorf("Can't convert rows[0] field to string. Actual type: %v, value: %v", reflect.TypeOf(rows[0]), rows[0]) |
|||
} |
|||
|
|||
return []byte(row), nil |
|||
} |
|||
|
|||
func (store *TarantoolStore) KvDelete(ctx context.Context, key []byte) (err error) { |
|||
|
|||
delOpts := crud.DeleteOpts{ |
|||
Noreturn: crud.MakeOptBool(true), |
|||
} |
|||
|
|||
req := crud.MakeDeleteRequest(tarantoolKVSpaceName). |
|||
Key(crud.Tuple([]interface{}{string(key)})). |
|||
Opts(delOpts) |
|||
|
|||
if _, err := store.pool.Do(req, pool.RW).Get(); err != nil { |
|||
return fmt.Errorf("kv delete: %v", err) |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,24 @@ |
|||
//go:build tarantool
|
|||
// +build tarantool
|
|||
|
|||
package tarantool |
|||
|
|||
import ( |
|||
"os" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/filer/store_test" |
|||
) |
|||
|
|||
func TestStore(t *testing.T) { |
|||
// run "make test_tarantool" under docker folder.
|
|||
// to set up local env
|
|||
if os.Getenv("RUN_TARANTOOL_TESTS") != "1" { |
|||
t.Skip("Tarantool tests are disabled. Set RUN_TARANTOOL_TESTS=1 to enable.") |
|||
} |
|||
store := &TarantoolStore{} |
|||
addresses := []string{"127.0.1:3303"} |
|||
store.initialize(addresses, "client", "client", 5*time.Second, 1000) |
|||
store_test.TestFilerStore(t, store) |
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue