Browse Source

keep scaffold toml updated

pull/2150/head
bingoohuang 4 years ago
parent
commit
e598493e1a
  1. 100
      weed/command/scaffold/filer.toml
  2. 8
      weed/command/scaffold/master.toml
  3. 10
      weed/command/scaffold/notification.toml
  4. 8
      weed/command/scaffold/replication.toml
  5. 10
      weed/command/scaffold/security.toml
  6. 2
      weed/command/scaffold/shell.toml

100
weed/command/scaffold/filer.toml

@ -4,7 +4,6 @@
# ./filer.toml
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
####################################################
# Customizable filer server options
####################################################
@ -14,34 +13,28 @@
recursive_delete = false
# directories under this folder will be automatically creating a separate bucket
buckets_folder = "/buckets"
####################################################
# The following are filer store options
####################################################
[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = false
dir = "./filerldb2" # directory to store level db files
enabled = true
dir = "./filerldb2" # directory to store level db files
[leveldb3]
# similar to leveldb2.
# each bucket has its own meta store.
enabled = true
dir = "./filerldb3" # directory to store level db files
enabled = false
dir = "./filerldb3" # directory to store level db files
[rocksdb]
# local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
dir = "./filerrdb" # directory to store rocksdb files
[sqlite]
# local on disk, similar to leveldb
enabled = false
dbFile = "./filer.db" # sqlite db file
dbFile = "./filer.db" # sqlite db file
[mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
@ -50,30 +43,6 @@ dbFile = "./filer.db" # sqlite db file
# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
# create table if not exists bucket_meta
# (
# id bigint primary key auto_increment comment 'increment id',
# name VARCHAR(127) not null default '' comment 'identity of bucket',
# cipher varchar(127) default null comment 'chunked data encrypt algorithm such as AES. if not empty encrypt buckets data',
# root varchar(64) comment 'root key. must be unchanged',
# shards varchar(8) not null default '' comment 'ec config , such as 4x2',
# chunk_size int not null default '4194304' comment 'chunk size',
# disabled int not null default '0' comment '0=enable',
# replication VARCHAR(3) not null default '000' comment 'replication 000, 001, 002, etc https://github.com/chrislusf/seaweedfs/wiki/Replication"',
# disk_type VARCHAR(127) not null default '' comment '[hdd|ssd|<tag>] hard drive or solid state drive or any tag',
# collection VARCHAR(127) not null default '' comment 'bucket data will be stored in this collection',
# data_center VARCHAR(127) not null default '' comment 'bucket data will be stored in this dataCenter',
# rack VARCHAR(127) not null default '' comment 'bucket data will be stored in this rack',
# ttl_seconds int not null default '0' comment 'time to live unit is second',
# fsync bool not null default '0' comment 'fsync for the writes',
# volume_growth_count int not null default '0' comment 'the number of physical volumes to add if no writable volumes',
# version int not null default '1' comment 'version of bucket',
# created_at datetime not null default current_timestamp comment 'create time',
# updated_at datetime not null default current_timestamp on update current_timestamp comment 'last update time',
# unique idx_unq_name (name)
# ) DEFAULT CHARSET = utf8mb4 comment 'buckets meta management';
enabled = false
hostname = "localhost"
port = 3306
@ -87,7 +56,6 @@ interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[mysql2] # or memsql, tidb
enabled = false
createTable = """
@ -99,35 +67,11 @@ createTable = """
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
"""
createTableBucketMeta = """
create table if not exists bucket_meta
(
id bigint primary key auto_increment comment 'increment id',
name VARCHAR(127) not null default '' comment 'identity of bucket',
cipher varchar(127) default null comment 'chunked data encrypt algorithm such as AES. if not empty encrypt buckets data',
root varchar(64) comment 'root key. must be unchanged',
shards varchar(8) not null default '' comment 'ec config , such as 4x2',
chunk_size int not null default '4194304' comment 'chunk size',
disabled int not null default '0' comment '0=enable',
replication VARCHAR(3) not null default '000' comment 'replication 000, 001, 002, etc https://github.com/chrislusf/seaweedfs/wiki/Replication"',
disk_type VARCHAR(127) not null default '' comment '[hdd|ssd|<tag>] hard drive or solid state drive or any tag',
collection VARCHAR(127) not null default '' comment 'bucket data will be stored in this collection',
data_center VARCHAR(127) not null default '' comment 'bucket data will be stored in this dataCenter',
rack VARCHAR(127) not null default '' comment 'bucket data will be stored in this rack',
ttl_seconds int not null default '0' comment 'time to live unit is second',
fsync bool not null default '0' comment 'fsync for the writes',
volume_growth_count int not null default '0' comment 'the number of physical volumes to add if no writable volumes',
version int not null default '1' comment 'version of bucket',
created_at datetime not null default current_timestamp comment 'create time',
updated_at datetime not null default current_timestamp on update current_timestamp comment 'last update time',
unique idx_unq_name (name)
) DEFAULT CHARSET = utf8mb4 comment 'buckets meta management';
"""
hostname = "localhost"
port = 3306
username = "beefs"
password = "beefs"
database = "beefs" # create or use an existing database
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
@ -135,7 +79,6 @@ interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
@ -158,7 +101,6 @@ connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[postgres2]
enabled = false
createTable = """
@ -183,7 +125,6 @@ connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[cassandra]
# CREATE TABLE filemeta (
# directory varchar,
@ -192,28 +133,27 @@ upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,
# PRIMARY KEY (directory, name)
# ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false
keyspace="seaweedfs"
hosts=[
keyspace = "seaweedfs"
hosts = [
"localhost:9042",
]
username=""
password=""
username = ""
password = ""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
# Name of the datacenter local to this filer, used as host selection fallback.
localDC = ""
[hbase]
enabled = false
zkquorum = ""
table = "seaweedfs"
[redis2]
enabled = false
address = "localhost:6379"
address = "localhost:6379"
password = ""
database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis_cluster2]
enabled = false
addresses = [
@ -231,18 +171,15 @@ readOnly = false
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[etcd]
enabled = false
servers = "localhost:2379"
timeout = "3s"
[mongodb]
enabled = false
uri = "mongodb://localhost:27017"
option_pool_size = 0
database = "seaweedfs"
[elastic7]
enabled = false
servers = [
@ -256,9 +193,6 @@ sniff_enabled = false
healthcheck_enabled = false
# increase the value is recommend, be sure the value in Elastic is greater or equal here
index.max_result_window = 10000
##########################
##########################
# To add path-specific filer store:
@ -274,6 +208,6 @@ index.max_result_window = 10000
[redis2.tmp]
enabled = false
location = "/tmp/"
address = "localhost:6379"
address = "localhost:6379"
password = ""
database = 1

8
weed/command/scaffold/master.toml

@ -3,7 +3,6 @@
# $HOME/.seaweedfs/master.toml
# /etc/seaweedfs/master.toml
# this file is read by master
[master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
@ -16,18 +15,13 @@ scripts = """
unlock
"""
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
[master.sequencer]
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
sequencer_etcd_urls = "http://127.0.0.1:2379"
# configurations for tiered cloud storage
# old volumes are transparently moved to cloud for cost efficiency
[storage.backend]
@ -38,7 +32,6 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
endpoint = ""
# create this number of logical volumes if no more writable volumes
# count_x means how many copies of data.
# e.g.:
@ -50,7 +43,6 @@ copy_1 = 7 # create 1 x 7 = 7 actual volumes
copy_2 = 6 # create 2 x 6 = 12 actual volumes
copy_3 = 3 # create 3 x 3 = 9 actual volumes
copy_other = 1 # create n x 1 = n actual volumes
# configuration flags for replication
[master.replication]
# any replication counts should be considered minimums. If you specify 010 and

10
weed/command/scaffold/notification.toml

@ -4,7 +4,6 @@
# ./notification.toml
# $HOME/.seaweedfs/notification.toml
# /etc/seaweedfs/notification.toml
####################################################
# notification
# send and receive filer updates for each file to an external message queue
@ -12,8 +11,6 @@
[notification.log]
# this is only for debugging perpose and does not work with "weed filer.replicate"
enabled = false
[notification.kafka]
enabled = false
hosts = [
@ -22,8 +19,6 @@ hosts = [
topic = "seaweedfs_filer"
offsetFile = "./last.offset"
offsetSaveIntervalSeconds = 10
[notification.aws_sqs]
# experimental, let me know if it works
enabled = false
@ -31,15 +26,12 @@ aws_access_key_id = "" # if empty, loads from the shared credentials file
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
sqs_queue_name = "my_filer_queue" # an existing queue name
[notification.google_pub_sub]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
[notification.gocdk_pub_sub]
# The Go Cloud Development Kit (https://gocloud.dev).
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
@ -51,4 +43,4 @@ enabled = false
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
# create binding myexchange => myqueue
topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"

8
weed/command/scaffold/replication.toml

@ -5,7 +5,6 @@
# ./replication.toml
# $HOME/.seaweedfs/replication.toml
# /etc/seaweedfs/replication.toml
[source.filer] # deprecated. Only useful with "weed filer.replicate"
enabled = true
grpcAddress = "localhost:18888"
@ -13,14 +12,12 @@ grpcAddress = "localhost:18888"
# this is not a directory on your hard drive, but on your filer.
# i.e., all files with this "prefix" are sent to notification message queue.
directory = "/buckets"
[sink.local]
enabled = false
directory = "/data"
# all replicated files are under modified time as yyyy-mm-dd directories
# so each date directory contains all new and updated files.
is_incremental = false
[sink.filer]
enabled = false
grpcAddress = "localhost:18888"
@ -32,7 +29,6 @@ replication = ""
collection = ""
ttlSec = 0
is_incremental = false
[sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
# default loads credentials from the shared credentials file (~/.aws/credentials).
@ -44,7 +40,6 @@ bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory
endpoint = ""
is_incremental = false
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
@ -52,7 +47,6 @@ google_application_credentials = "/path/to/x.json" # path to json credential fil
bucket = "your_bucket_seaweedfs" # an existing bucket
directory = "/" # destination directory
is_incremental = false
[sink.azure]
# experimental, let me know if it works
enabled = false
@ -61,7 +55,6 @@ account_key = ""
container = "mycontainer" # an existing container
directory = "/" # destination directory
is_incremental = false
[sink.backblaze]
enabled = false
b2_account_id = ""
@ -69,3 +62,4 @@ b2_master_application_key = ""
bucket = "mybucket" # an existing bucket
directory = "/" # destination directory
is_incremental = false

10
weed/command/scaffold/security.toml

@ -3,18 +3,15 @@
# $HOME/.seaweedfs/security.toml
# /etc/seaweedfs/security.toml
# this file is read by master, volume server, and filer
# the jwt signing key is read by master and volume server.
# a jwt defaults to expire after 10 seconds.
[jwt.signing]
key = ""
expires_after_seconds = 10 # seconds
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
[jwt.signing.read]
key = ""
expires_after_seconds = 10 # seconds
# all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files.
# the host name is not checked, so the PERM files can be shared.
@ -22,33 +19,27 @@ expires_after_seconds = 10 # seconds
ca = ""
# Set wildcard domain for enable TLS authentication by common names
allowed_wildcard_domain = "" # .mycompany.com
[grpc.volume]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.master]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.filer]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.msg_broker]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
cert = ""
key = ""
# volume server https options
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
@ -57,4 +48,3 @@ enabled = true
[https.volume]
cert = ""
key = ""

2
weed/command/scaffold/shell.toml

@ -1,10 +1,8 @@
[cluster]
default = "c1"
[cluster.c1]
master = "localhost:9333" # comma-separated master servers
filer = "localhost:8888" # filer host and port
[cluster.c2]
master = ""
filer = ""
Loading…
Cancel
Save