Browse Source

keep scaffold toml updated

pull/2150/head
bingoohuang 4 years ago
parent
commit
e598493e1a
  1. 88
      weed/command/scaffold/filer.toml
  2. 8
      weed/command/scaffold/master.toml
  3. 10
      weed/command/scaffold/notification.toml
  4. 8
      weed/command/scaffold/replication.toml
  5. 10
      weed/command/scaffold/security.toml
  6. 2
      weed/command/scaffold/shell.toml

88
weed/command/scaffold/filer.toml

@ -4,7 +4,6 @@
# ./filer.toml # ./filer.toml
# $HOME/.seaweedfs/filer.toml # $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml # /etc/seaweedfs/filer.toml
#################################################### ####################################################
# Customizable filer server options # Customizable filer server options
#################################################### ####################################################
@ -14,34 +13,28 @@
recursive_delete = false recursive_delete = false
# directories under this folder will be automatically creating a separate bucket # directories under this folder will be automatically creating a separate bucket
buckets_folder = "/buckets" buckets_folder = "/buckets"
#################################################### ####################################################
# The following are filer store options # The following are filer store options
#################################################### ####################################################
[leveldb2] [leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable # local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended. # faster than previous leveldb, recommended.
enabled = false
enabled = true
dir = "./filerldb2" # directory to store level db files dir = "./filerldb2" # directory to store level db files
[leveldb3] [leveldb3]
# similar to leveldb2. # similar to leveldb2.
# each bucket has its own meta store. # each bucket has its own meta store.
enabled = true
enabled = false
dir = "./filerldb3" # directory to store level db files dir = "./filerldb3" # directory to store level db files
[rocksdb] [rocksdb]
# local on disk, similar to leveldb # local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself # since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false enabled = false
dir = "./filerrdb" # directory to store rocksdb files dir = "./filerrdb" # directory to store rocksdb files
[sqlite] [sqlite]
# local on disk, similar to leveldb # local on disk, similar to leveldb
enabled = false enabled = false
dbFile = "./filer.db" # sqlite db file dbFile = "./filer.db" # sqlite db file
[mysql] # or memsql, tidb [mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta ( # CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
@ -50,30 +43,6 @@ dbFile = "./filer.db" # sqlite db file
# meta LONGBLOB, # meta LONGBLOB,
# PRIMARY KEY (dirhash, name) # PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8; # ) DEFAULT CHARSET=utf8;
# create table if not exists bucket_meta
# (
# id bigint primary key auto_increment comment 'increment id',
# name VARCHAR(127) not null default '' comment 'identity of bucket',
# cipher varchar(127) default null comment 'chunked data encrypt algorithm such as AES. if not empty encrypt buckets data',
# root varchar(64) comment 'root key. must be unchanged',
# shards varchar(8) not null default '' comment 'ec config , such as 4x2',
# chunk_size int not null default '4194304' comment 'chunk size',
# disabled int not null default '0' comment '0=enable',
# replication VARCHAR(3) not null default '000' comment 'replication 000, 001, 002, etc https://github.com/chrislusf/seaweedfs/wiki/Replication"',
# disk_type VARCHAR(127) not null default '' comment '[hdd|ssd|<tag>] hard drive or solid state drive or any tag',
# collection VARCHAR(127) not null default '' comment 'bucket data will be stored in this collection',
# data_center VARCHAR(127) not null default '' comment 'bucket data will be stored in this dataCenter',
# rack VARCHAR(127) not null default '' comment 'bucket data will be stored in this rack',
# ttl_seconds int not null default '0' comment 'time to live unit is second',
# fsync bool not null default '0' comment 'fsync for the writes',
# volume_growth_count int not null default '0' comment 'the number of physical volumes to add if no writable volumes',
# version int not null default '1' comment 'version of bucket',
# created_at datetime not null default current_timestamp comment 'create time',
# updated_at datetime not null default current_timestamp on update current_timestamp comment 'last update time',
# unique idx_unq_name (name)
# ) DEFAULT CHARSET = utf8mb4 comment 'buckets meta management';
enabled = false enabled = false
hostname = "localhost" hostname = "localhost"
port = 3306 port = 3306
@ -87,7 +56,6 @@ interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[mysql2] # or memsql, tidb [mysql2] # or memsql, tidb
enabled = false enabled = false
createTable = """ createTable = """
@ -99,35 +67,11 @@ createTable = """
PRIMARY KEY (dirhash, name) PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8; ) DEFAULT CHARSET=utf8;
""" """
createTableBucketMeta = """
create table if not exists bucket_meta
(
id bigint primary key auto_increment comment 'increment id',
name VARCHAR(127) not null default '' comment 'identity of bucket',
cipher varchar(127) default null comment 'chunked data encrypt algorithm such as AES. if not empty encrypt buckets data',
root varchar(64) comment 'root key. must be unchanged',
shards varchar(8) not null default '' comment 'ec config , such as 4x2',
chunk_size int not null default '4194304' comment 'chunk size',
disabled int not null default '0' comment '0=enable',
replication VARCHAR(3) not null default '000' comment 'replication 000, 001, 002, etc https://github.com/chrislusf/seaweedfs/wiki/Replication"',
disk_type VARCHAR(127) not null default '' comment '[hdd|ssd|<tag>] hard drive or solid state drive or any tag',
collection VARCHAR(127) not null default '' comment 'bucket data will be stored in this collection',
data_center VARCHAR(127) not null default '' comment 'bucket data will be stored in this dataCenter',
rack VARCHAR(127) not null default '' comment 'bucket data will be stored in this rack',
ttl_seconds int not null default '0' comment 'time to live unit is second',
fsync bool not null default '0' comment 'fsync for the writes',
volume_growth_count int not null default '0' comment 'the number of physical volumes to add if no writable volumes',
version int not null default '1' comment 'version of bucket',
created_at datetime not null default current_timestamp comment 'create time',
updated_at datetime not null default current_timestamp on update current_timestamp comment 'last update time',
unique idx_unq_name (name)
) DEFAULT CHARSET = utf8mb4 comment 'buckets meta management';
"""
hostname = "localhost" hostname = "localhost"
port = 3306 port = 3306
username = "beefs"
password = "beefs"
database = "beefs" # create or use an existing database
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2 connection_max_idle = 2
connection_max_open = 100 connection_max_open = 100
connection_max_lifetime_seconds = 0 connection_max_lifetime_seconds = 0
@ -135,7 +79,6 @@ interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[postgres] # or cockroachdb, YugabyteDB [postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta ( # CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT, # dirhash BIGINT,
@ -158,7 +101,6 @@ connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true enableUpsert = true
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[postgres2] [postgres2]
enabled = false enabled = false
createTable = """ createTable = """
@ -183,7 +125,6 @@ connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true enableUpsert = true
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[cassandra] [cassandra]
# CREATE TABLE filemeta ( # CREATE TABLE filemeta (
# directory varchar, # directory varchar,
@ -192,20 +133,20 @@ upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,
# PRIMARY KEY (directory, name) # PRIMARY KEY (directory, name)
# ) WITH CLUSTERING ORDER BY (name ASC); # ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false enabled = false
keyspace="seaweedfs"
hosts=[
keyspace = "seaweedfs"
hosts = [
"localhost:9042", "localhost:9042",
] ]
username=""
password=""
username = ""
password = ""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [] superLargeDirectories = []
# Name of the datacenter local to this filer, used as host selection fallback.
localDC = ""
[hbase] [hbase]
enabled = false enabled = false
zkquorum = "" zkquorum = ""
table = "seaweedfs" table = "seaweedfs"
[redis2] [redis2]
enabled = false enabled = false
address = "localhost:6379" address = "localhost:6379"
@ -213,7 +154,6 @@ password = ""
database = 0 database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [] superLargeDirectories = []
[redis_cluster2] [redis_cluster2]
enabled = false enabled = false
addresses = [ addresses = [
@ -231,18 +171,15 @@ readOnly = false
routeByLatency = false routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [] superLargeDirectories = []
[etcd] [etcd]
enabled = false enabled = false
servers = "localhost:2379" servers = "localhost:2379"
timeout = "3s" timeout = "3s"
[mongodb] [mongodb]
enabled = false enabled = false
uri = "mongodb://localhost:27017" uri = "mongodb://localhost:27017"
option_pool_size = 0 option_pool_size = 0
database = "seaweedfs" database = "seaweedfs"
[elastic7] [elastic7]
enabled = false enabled = false
servers = [ servers = [
@ -256,9 +193,6 @@ sniff_enabled = false
healthcheck_enabled = false healthcheck_enabled = false
# increase the value is recommend, be sure the value in Elastic is greater or equal here # increase the value is recommend, be sure the value in Elastic is greater or equal here
index.max_result_window = 10000 index.max_result_window = 10000
########################## ##########################
########################## ##########################
# To add path-specific filer store: # To add path-specific filer store:

8
weed/command/scaffold/master.toml

@ -3,7 +3,6 @@
# $HOME/.seaweedfs/master.toml # $HOME/.seaweedfs/master.toml
# /etc/seaweedfs/master.toml # /etc/seaweedfs/master.toml
# this file is read by master # this file is read by master
[master.maintenance] [master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell' # periodically run these scripts are the same as running them from 'weed shell'
scripts = """ scripts = """
@ -16,18 +15,13 @@ scripts = """
unlock unlock
""" """
sleep_minutes = 17 # sleep minutes between each script execution sleep_minutes = 17 # sleep minutes between each script execution
[master.filer] [master.filer]
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
[master.sequencer] [master.sequencer]
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389 # example : http://127.0.0.1:2379,http://127.0.0.1:2389
sequencer_etcd_urls = "http://127.0.0.1:2379" sequencer_etcd_urls = "http://127.0.0.1:2379"
# configurations for tiered cloud storage # configurations for tiered cloud storage
# old volumes are transparently moved to cloud for cost efficiency # old volumes are transparently moved to cloud for cost efficiency
[storage.backend] [storage.backend]
@ -38,7 +32,6 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2" region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket bucket = "your_bucket_name" # an existing bucket
endpoint = "" endpoint = ""
# create this number of logical volumes if no more writable volumes # create this number of logical volumes if no more writable volumes
# count_x means how many copies of data. # count_x means how many copies of data.
# e.g.: # e.g.:
@ -50,7 +43,6 @@ copy_1 = 7 # create 1 x 7 = 7 actual volumes
copy_2 = 6 # create 2 x 6 = 12 actual volumes copy_2 = 6 # create 2 x 6 = 12 actual volumes
copy_3 = 3 # create 3 x 3 = 9 actual volumes copy_3 = 3 # create 3 x 3 = 9 actual volumes
copy_other = 1 # create n x 1 = n actual volumes copy_other = 1 # create n x 1 = n actual volumes
# configuration flags for replication # configuration flags for replication
[master.replication] [master.replication]
# any replication counts should be considered minimums. If you specify 010 and # any replication counts should be considered minimums. If you specify 010 and

10
weed/command/scaffold/notification.toml

@ -4,7 +4,6 @@
# ./notification.toml # ./notification.toml
# $HOME/.seaweedfs/notification.toml # $HOME/.seaweedfs/notification.toml
# /etc/seaweedfs/notification.toml # /etc/seaweedfs/notification.toml
#################################################### ####################################################
# notification # notification
# send and receive filer updates for each file to an external message queue # send and receive filer updates for each file to an external message queue
@ -12,8 +11,6 @@
[notification.log] [notification.log]
# this is only for debugging perpose and does not work with "weed filer.replicate" # this is only for debugging perpose and does not work with "weed filer.replicate"
enabled = false enabled = false
[notification.kafka] [notification.kafka]
enabled = false enabled = false
hosts = [ hosts = [
@ -22,8 +19,6 @@ hosts = [
topic = "seaweedfs_filer" topic = "seaweedfs_filer"
offsetFile = "./last.offset" offsetFile = "./last.offset"
offsetSaveIntervalSeconds = 10 offsetSaveIntervalSeconds = 10
[notification.aws_sqs] [notification.aws_sqs]
# experimental, let me know if it works # experimental, let me know if it works
enabled = false enabled = false
@ -31,15 +26,12 @@ aws_access_key_id = "" # if empty, loads from the shared credentials file
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2" region = "us-east-2"
sqs_queue_name = "my_filer_queue" # an existing queue name sqs_queue_name = "my_filer_queue" # an existing queue name
[notification.google_pub_sub] [notification.google_pub_sub]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file google_application_credentials = "/path/to/x.json" # path to json credential file
project_id = "" # an existing project id project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
[notification.gocdk_pub_sub] [notification.gocdk_pub_sub]
# The Go Cloud Development Kit (https://gocloud.dev). # The Go Cloud Development Kit (https://gocloud.dev).
# PubSub API (https://godoc.org/gocloud.dev/pubsub). # PubSub API (https://godoc.org/gocloud.dev/pubsub).
@ -51,4 +43,4 @@ enabled = false
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then # the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
# create binding myexchange => myqueue # create binding myexchange => myqueue
topic_url = "rabbit://myexchange" topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"

8
weed/command/scaffold/replication.toml

@ -5,7 +5,6 @@
# ./replication.toml # ./replication.toml
# $HOME/.seaweedfs/replication.toml # $HOME/.seaweedfs/replication.toml
# /etc/seaweedfs/replication.toml # /etc/seaweedfs/replication.toml
[source.filer] # deprecated. Only useful with "weed filer.replicate" [source.filer] # deprecated. Only useful with "weed filer.replicate"
enabled = true enabled = true
grpcAddress = "localhost:18888" grpcAddress = "localhost:18888"
@ -13,14 +12,12 @@ grpcAddress = "localhost:18888"
# this is not a directory on your hard drive, but on your filer. # this is not a directory on your hard drive, but on your filer.
# i.e., all files with this "prefix" are sent to notification message queue. # i.e., all files with this "prefix" are sent to notification message queue.
directory = "/buckets" directory = "/buckets"
[sink.local] [sink.local]
enabled = false enabled = false
directory = "/data" directory = "/data"
# all replicated files are under modified time as yyyy-mm-dd directories # all replicated files are under modified time as yyyy-mm-dd directories
# so each date directory contains all new and updated files. # so each date directory contains all new and updated files.
is_incremental = false is_incremental = false
[sink.filer] [sink.filer]
enabled = false enabled = false
grpcAddress = "localhost:18888" grpcAddress = "localhost:18888"
@ -32,7 +29,6 @@ replication = ""
collection = "" collection = ""
ttlSec = 0 ttlSec = 0
is_incremental = false is_incremental = false
[sink.s3] [sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
# default loads credentials from the shared credentials file (~/.aws/credentials). # default loads credentials from the shared credentials file (~/.aws/credentials).
@ -44,7 +40,6 @@ bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory directory = "/" # destination directory
endpoint = "" endpoint = ""
is_incremental = false is_incremental = false
[sink.google_cloud_storage] [sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false enabled = false
@ -52,7 +47,6 @@ google_application_credentials = "/path/to/x.json" # path to json credential fil
bucket = "your_bucket_seaweedfs" # an existing bucket bucket = "your_bucket_seaweedfs" # an existing bucket
directory = "/" # destination directory directory = "/" # destination directory
is_incremental = false is_incremental = false
[sink.azure] [sink.azure]
# experimental, let me know if it works # experimental, let me know if it works
enabled = false enabled = false
@ -61,7 +55,6 @@ account_key = ""
container = "mycontainer" # an existing container container = "mycontainer" # an existing container
directory = "/" # destination directory directory = "/" # destination directory
is_incremental = false is_incremental = false
[sink.backblaze] [sink.backblaze]
enabled = false enabled = false
b2_account_id = "" b2_account_id = ""
@ -69,3 +62,4 @@ b2_master_application_key = ""
bucket = "mybucket" # an existing bucket bucket = "mybucket" # an existing bucket
directory = "/" # destination directory directory = "/" # destination directory
is_incremental = false is_incremental = false

10
weed/command/scaffold/security.toml

@ -3,18 +3,15 @@
# $HOME/.seaweedfs/security.toml # $HOME/.seaweedfs/security.toml
# /etc/seaweedfs/security.toml # /etc/seaweedfs/security.toml
# this file is read by master, volume server, and filer # this file is read by master, volume server, and filer
# the jwt signing key is read by master and volume server. # the jwt signing key is read by master and volume server.
# a jwt defaults to expire after 10 seconds. # a jwt defaults to expire after 10 seconds.
[jwt.signing] [jwt.signing]
key = "" key = ""
expires_after_seconds = 10 # seconds expires_after_seconds = 10 # seconds
# jwt for read is only supported with master+volume setup. Filer does not support this mode. # jwt for read is only supported with master+volume setup. Filer does not support this mode.
[jwt.signing.read] [jwt.signing.read]
key = "" key = ""
expires_after_seconds = 10 # seconds expires_after_seconds = 10 # seconds
# all grpc tls authentications are mutual # all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files. # the values for the following ca, cert, and key are paths to the PERM files.
# the host name is not checked, so the PERM files can be shared. # the host name is not checked, so the PERM files can be shared.
@ -22,33 +19,27 @@ expires_after_seconds = 10 # seconds
ca = "" ca = ""
# Set wildcard domain for enable TLS authentication by common names # Set wildcard domain for enable TLS authentication by common names
allowed_wildcard_domain = "" # .mycompany.com allowed_wildcard_domain = "" # .mycompany.com
[grpc.volume] [grpc.volume]
cert = "" cert = ""
key = "" key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.master] [grpc.master]
cert = "" cert = ""
key = "" key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.filer] [grpc.filer]
cert = "" cert = ""
key = "" key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.msg_broker] [grpc.msg_broker]
cert = "" cert = ""
key = "" key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names allowed_commonNames = "" # comma-separated SSL certificate common names
# use this for any place needs a grpc client # use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client] [grpc.client]
cert = "" cert = ""
key = "" key = ""
# volume server https options # volume server https options
# Note: work in progress! # Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet. # this does not work with other clients, e.g., "weed filer|mount" etc, yet.
@ -57,4 +48,3 @@ enabled = true
[https.volume] [https.volume]
cert = "" cert = ""
key = "" key = ""

2
weed/command/scaffold/shell.toml

@ -1,10 +1,8 @@
[cluster] [cluster]
default = "c1" default = "c1"
[cluster.c1] [cluster.c1]
master = "localhost:9333" # comma-separated master servers master = "localhost:9333" # comma-separated master servers
filer = "localhost:8888" # filer host and port filer = "localhost:8888" # filer host and port
[cluster.c2] [cluster.c2]
master = "" master = ""
filer = "" filer = ""
Loading…
Cancel
Save