You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

575 lines
17 KiB

5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
  1. package command
  2. import (
  3. "io/ioutil"
  4. "path/filepath"
  5. )
  6. func init() {
  7. cmdScaffold.Run = runScaffold // break init cycle
  8. }
  9. var cmdScaffold = &Command{
  10. UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
  11. Short: "generate basic configuration files",
  12. Long: `Generate filer.toml with all possible configurations for you to customize.
  13. The options can also be overwritten by environment variables.
  14. For example, the filer.toml mysql password can be overwritten by environment variable
  15. export WEED_MYSQL_PASSWORD=some_password
  16. Environment variable rules:
  17. * Prefix the variable name with "WEED_"
  18. * Upppercase the reset of variable name.
  19. * Replace '.' with '_'
  20. `,
  21. }
  22. var (
  23. outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
  24. config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
  25. )
  26. func runScaffold(cmd *Command, args []string) bool {
  27. content := ""
  28. switch *config {
  29. case "filer":
  30. content = FILER_TOML_EXAMPLE
  31. case "notification":
  32. content = NOTIFICATION_TOML_EXAMPLE
  33. case "replication":
  34. content = REPLICATION_TOML_EXAMPLE
  35. case "security":
  36. content = SECURITY_TOML_EXAMPLE
  37. case "master":
  38. content = MASTER_TOML_EXAMPLE
  39. case "shell":
  40. content = SHELL_TOML_EXAMPLE
  41. }
  42. if content == "" {
  43. println("need a valid -config option")
  44. return false
  45. }
  46. if *outputPath != "" {
  47. ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
  48. } else {
  49. println(content)
  50. }
  51. return true
  52. }
  53. const (
  54. FILER_TOML_EXAMPLE = `
  55. # A sample TOML config file for SeaweedFS filer store
  56. # Used with "weed filer" or "weed server -filer"
  57. # Put this file to one of the location, with descending priority
  58. # ./filer.toml
  59. # $HOME/.seaweedfs/filer.toml
  60. # /etc/seaweedfs/filer.toml
  61. ####################################################
  62. # Customizable filer server options
  63. ####################################################
  64. [filer.options]
  65. # with http DELETE, by default the filer would check whether a folder is empty.
  66. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  67. recursive_delete = false
  68. # directories under this folder will be automatically creating a separate bucket
  69. buckets_folder = "/buckets"
  70. ####################################################
  71. # The following are filer store options
  72. ####################################################
  73. [leveldb2]
  74. # local on disk, mostly for simple single-machine setup, fairly scalable
  75. # faster than previous leveldb, recommended.
  76. enabled = true
  77. dir = "./filerldb2" # directory to store level db files
  78. [leveldb3]
  79. # similar to leveldb2.
  80. # each bucket has its own meta store.
  81. enabled = false
  82. dir = "./filerldb3" # directory to store level db files
  83. [rocksdb]
  84. # local on disk, similar to leveldb
  85. # since it is using a C wrapper, you need to install rocksdb and build it by yourself
  86. enabled = false
  87. dir = "./filerrdb" # directory to store rocksdb files
  88. [sqlite]
  89. # local on disk, similar to leveldb
  90. enabled = false
  91. dbFile = "./filer.db" # sqlite db file
  92. [mysql] # or memsql, tidb
  93. # CREATE TABLE IF NOT EXISTS filemeta (
  94. # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
  95. # name VARCHAR(1000) BINARY COMMENT 'directory or file name',
  96. # directory TEXT COMMENT 'full path to parent directory',
  97. # meta LONGBLOB,
  98. # PRIMARY KEY (dirhash, name)
  99. # ) DEFAULT CHARSET=utf8;
  100. enabled = false
  101. hostname = "localhost"
  102. port = 3306
  103. username = "root"
  104. password = ""
  105. database = "" # create or use an existing database
  106. connection_max_idle = 2
  107. connection_max_open = 100
  108. connection_max_lifetime_seconds = 0
  109. interpolateParams = false
  110. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  111. enableUpsert = true
  112. upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
  113. [mysql2] # or memsql, tidb
  114. enabled = false
  115. createTable = """
  116. CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
  117. dirhash BIGINT,
  118. name VARCHAR(1000) BINARY,
  119. directory TEXT,
  120. meta LONGBLOB,
  121. PRIMARY KEY (dirhash, name)
  122. ) DEFAULT CHARSET=utf8;
  123. """
  124. hostname = "localhost"
  125. port = 3306
  126. username = "root"
  127. password = ""
  128. database = "" # create or use an existing database
  129. connection_max_idle = 2
  130. connection_max_open = 100
  131. connection_max_lifetime_seconds = 0
  132. interpolateParams = false
  133. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  134. enableUpsert = true
  135. upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
  136. [postgres] # or cockroachdb, YugabyteDB
  137. # CREATE TABLE IF NOT EXISTS filemeta (
  138. # dirhash BIGINT,
  139. # name VARCHAR(65535),
  140. # directory VARCHAR(65535),
  141. # meta bytea,
  142. # PRIMARY KEY (dirhash, name)
  143. # );
  144. enabled = false
  145. hostname = "localhost"
  146. port = 5432
  147. username = "postgres"
  148. password = ""
  149. database = "postgres" # create or use an existing database
  150. schema = ""
  151. sslmode = "disable"
  152. connection_max_idle = 100
  153. connection_max_open = 100
  154. connection_max_lifetime_seconds = 0
  155. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  156. enableUpsert = true
  157. upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
  158. [postgres2]
  159. enabled = false
  160. createTable = """
  161. CREATE TABLE IF NOT EXISTS "%s" (
  162. dirhash BIGINT,
  163. name VARCHAR(65535),
  164. directory VARCHAR(65535),
  165. meta bytea,
  166. PRIMARY KEY (dirhash, name)
  167. );
  168. """
  169. hostname = "localhost"
  170. port = 5432
  171. username = "postgres"
  172. password = ""
  173. database = "postgres" # create or use an existing database
  174. schema = ""
  175. sslmode = "disable"
  176. connection_max_idle = 100
  177. connection_max_open = 100
  178. connection_max_lifetime_seconds = 0
  179. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  180. enableUpsert = true
  181. upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
  182. [cassandra]
  183. # CREATE TABLE filemeta (
  184. # directory varchar,
  185. # name varchar,
  186. # meta blob,
  187. # PRIMARY KEY (directory, name)
  188. # ) WITH CLUSTERING ORDER BY (name ASC);
  189. enabled = false
  190. keyspace="seaweedfs"
  191. hosts=[
  192. "localhost:9042",
  193. ]
  194. username=""
  195. password=""
  196. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  197. superLargeDirectories = []
  198. # Name of the datacenter local to this filer, used as host selection fallback.
  199. localDC=""
  200. [hbase]
  201. enabled = false
  202. zkquorum = ""
  203. table = "seaweedfs"
  204. [redis2]
  205. enabled = false
  206. address = "localhost:6379"
  207. password = ""
  208. database = 0
  209. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  210. superLargeDirectories = []
  211. [redis_cluster2]
  212. enabled = false
  213. addresses = [
  214. "localhost:30001",
  215. "localhost:30002",
  216. "localhost:30003",
  217. "localhost:30004",
  218. "localhost:30005",
  219. "localhost:30006",
  220. ]
  221. password = ""
  222. # allows reads from slave servers or the master, but all writes still go to the master
  223. readOnly = false
  224. # automatically use the closest Redis server for reads
  225. routeByLatency = false
  226. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  227. superLargeDirectories = []
  228. [etcd]
  229. enabled = false
  230. servers = "localhost:2379"
  231. timeout = "3s"
  232. [mongodb]
  233. enabled = false
  234. uri = "mongodb://localhost:27017"
  235. option_pool_size = 0
  236. database = "seaweedfs"
  237. [elastic7]
  238. enabled = false
  239. servers = [
  240. "http://localhost1:9200",
  241. "http://localhost2:9200",
  242. "http://localhost3:9200",
  243. ]
  244. username = ""
  245. password = ""
  246. sniff_enabled = false
  247. healthcheck_enabled = false
  248. # increase the value is recommend, be sure the value in Elastic is greater or equal here
  249. index.max_result_window = 10000
  250. ##########################
  251. ##########################
  252. # To add path-specific filer store:
  253. #
  254. # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
  255. # 2. Add a location configuraiton. E.g., location = "/tmp/"
  256. # 3. Copy and customize all other configurations.
  257. # Make sure they are not the same if using the same store type!
  258. # 4. Set enabled to true
  259. #
  260. # The following is just using redis as an example
  261. ##########################
  262. [redis2.tmp]
  263. enabled = false
  264. location = "/tmp/"
  265. address = "localhost:6379"
  266. password = ""
  267. database = 1
  268. `
  269. NOTIFICATION_TOML_EXAMPLE = `
  270. # A sample TOML config file for SeaweedFS filer store
  271. # Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
  272. # Put this file to one of the location, with descending priority
  273. # ./notification.toml
  274. # $HOME/.seaweedfs/notification.toml
  275. # /etc/seaweedfs/notification.toml
  276. ####################################################
  277. # notification
  278. # send and receive filer updates for each file to an external message queue
  279. ####################################################
  280. [notification.log]
  281. # this is only for debugging perpose and does not work with "weed filer.replicate"
  282. enabled = false
  283. [notification.kafka]
  284. enabled = false
  285. hosts = [
  286. "localhost:9092"
  287. ]
  288. topic = "seaweedfs_filer"
  289. offsetFile = "./last.offset"
  290. offsetSaveIntervalSeconds = 10
  291. [notification.aws_sqs]
  292. # experimental, let me know if it works
  293. enabled = false
  294. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  295. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  296. region = "us-east-2"
  297. sqs_queue_name = "my_filer_queue" # an existing queue name
  298. [notification.google_pub_sub]
  299. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  300. enabled = false
  301. google_application_credentials = "/path/to/x.json" # path to json credential file
  302. project_id = "" # an existing project id
  303. topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
  304. [notification.gocdk_pub_sub]
  305. # The Go Cloud Development Kit (https://gocloud.dev).
  306. # PubSub API (https://godoc.org/gocloud.dev/pubsub).
  307. # Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
  308. enabled = false
  309. # This URL will Dial the RabbitMQ server at the URL in the environment
  310. # variable RABBIT_SERVER_URL and open the exchange "myexchange".
  311. # The exchange must have already been created by some other means, like
  312. # the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
  313. # create binding myexchange => myqueue
  314. topic_url = "rabbit://myexchange"
  315. sub_url = "rabbit://myqueue"
  316. `
  317. REPLICATION_TOML_EXAMPLE = `
  318. # A sample TOML config file for replicating SeaweedFS filer
  319. # Used with "weed filer.backup"
  320. # Using with "weed filer.replicate" is deprecated.
  321. # Put this file to one of the location, with descending priority
  322. # ./replication.toml
  323. # $HOME/.seaweedfs/replication.toml
  324. # /etc/seaweedfs/replication.toml
  325. [source.filer] # deprecated. Only useful with "weed filer.replicate"
  326. enabled = true
  327. grpcAddress = "localhost:18888"
  328. # all files under this directory tree are replicated.
  329. # this is not a directory on your hard drive, but on your filer.
  330. # i.e., all files with this "prefix" are sent to notification message queue.
  331. directory = "/buckets"
  332. [sink.local]
  333. enabled = false
  334. directory = "/data"
  335. # all replicated files are under modified time as yyyy-mm-dd directories
  336. # so each date directory contains all new and updated files.
  337. is_incremental = false
  338. [sink.filer]
  339. enabled = false
  340. grpcAddress = "localhost:18888"
  341. # all replicated files are under this directory tree
  342. # this is not a directory on your hard drive, but on your filer.
  343. # i.e., all received files will be "prefixed" to this directory.
  344. directory = "/backup"
  345. replication = ""
  346. collection = ""
  347. ttlSec = 0
  348. is_incremental = false
  349. [sink.s3]
  350. # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
  351. # default loads credentials from the shared credentials file (~/.aws/credentials).
  352. enabled = false
  353. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  354. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  355. region = "us-east-2"
  356. bucket = "your_bucket_name" # an existing bucket
  357. directory = "/" # destination directory
  358. endpoint = ""
  359. is_incremental = false
  360. [sink.google_cloud_storage]
  361. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  362. enabled = false
  363. google_application_credentials = "/path/to/x.json" # path to json credential file
  364. bucket = "your_bucket_seaweedfs" # an existing bucket
  365. directory = "/" # destination directory
  366. is_incremental = false
  367. [sink.azure]
  368. # experimental, let me know if it works
  369. enabled = false
  370. account_name = ""
  371. account_key = ""
  372. container = "mycontainer" # an existing container
  373. directory = "/" # destination directory
  374. is_incremental = false
  375. [sink.backblaze]
  376. enabled = false
  377. b2_account_id = ""
  378. b2_master_application_key = ""
  379. bucket = "mybucket" # an existing bucket
  380. directory = "/" # destination directory
  381. is_incremental = false
  382. `
  383. SECURITY_TOML_EXAMPLE = `
  384. # Put this file to one of the location, with descending priority
  385. # ./security.toml
  386. # $HOME/.seaweedfs/security.toml
  387. # /etc/seaweedfs/security.toml
  388. # this file is read by master, volume server, and filer
  389. # the jwt signing key is read by master and volume server.
  390. # a jwt defaults to expire after 10 seconds.
  391. [jwt.signing]
  392. key = ""
  393. expires_after_seconds = 10 # seconds
  394. # jwt for read is only supported with master+volume setup. Filer does not support this mode.
  395. [jwt.signing.read]
  396. key = ""
  397. expires_after_seconds = 10 # seconds
  398. # all grpc tls authentications are mutual
  399. # the values for the following ca, cert, and key are paths to the PERM files.
  400. # the host name is not checked, so the PERM files can be shared.
  401. [grpc]
  402. ca = ""
  403. # Set wildcard domain for enable TLS authentication by common names
  404. allowed_wildcard_domain = "" # .mycompany.com
  405. [grpc.volume]
  406. cert = ""
  407. key = ""
  408. allowed_commonNames = "" # comma-separated SSL certificate common names
  409. [grpc.master]
  410. cert = ""
  411. key = ""
  412. allowed_commonNames = "" # comma-separated SSL certificate common names
  413. [grpc.filer]
  414. cert = ""
  415. key = ""
  416. allowed_commonNames = "" # comma-separated SSL certificate common names
  417. [grpc.msg_broker]
  418. cert = ""
  419. key = ""
  420. allowed_commonNames = "" # comma-separated SSL certificate common names
  421. # use this for any place needs a grpc client
  422. # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
  423. [grpc.client]
  424. cert = ""
  425. key = ""
  426. # volume server https options
  427. # Note: work in progress!
  428. # this does not work with other clients, e.g., "weed filer|mount" etc, yet.
  429. [https.client]
  430. enabled = true
  431. [https.volume]
  432. cert = ""
  433. key = ""
  434. `
  435. MASTER_TOML_EXAMPLE = `
  436. # Put this file to one of the location, with descending priority
  437. # ./master.toml
  438. # $HOME/.seaweedfs/master.toml
  439. # /etc/seaweedfs/master.toml
  440. # this file is read by master
  441. [master.maintenance]
  442. # periodically run these scripts are the same as running them from 'weed shell'
  443. scripts = """
  444. lock
  445. ec.encode -fullPercent=95 -quietFor=1h
  446. ec.rebuild -force
  447. ec.balance -force
  448. volume.balance -force
  449. volume.fix.replication
  450. unlock
  451. """
  452. sleep_minutes = 17 # sleep minutes between each script execution
  453. [master.filer]
  454. default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
  455. [master.sequencer]
  456. type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
  457. # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
  458. # example : http://127.0.0.1:2379,http://127.0.0.1:2389
  459. sequencer_etcd_urls = "http://127.0.0.1:2379"
  460. # configurations for tiered cloud storage
  461. # old volumes are transparently moved to cloud for cost efficiency
  462. [storage.backend]
  463. [storage.backend.s3.default]
  464. enabled = false
  465. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  466. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  467. region = "us-east-2"
  468. bucket = "your_bucket_name" # an existing bucket
  469. endpoint = ""
  470. # create this number of logical volumes if no more writable volumes
  471. # count_x means how many copies of data.
  472. # e.g.:
  473. # 000 has only one copy, copy_1
  474. # 010 and 001 has two copies, copy_2
  475. # 011 has only 3 copies, copy_3
  476. [master.volume_growth]
  477. copy_1 = 7 # create 1 x 7 = 7 actual volumes
  478. copy_2 = 6 # create 2 x 6 = 12 actual volumes
  479. copy_3 = 3 # create 3 x 3 = 9 actual volumes
  480. copy_other = 1 # create n x 1 = n actual volumes
  481. # configuration flags for replication
  482. [master.replication]
  483. # any replication counts should be considered minimums. If you specify 010 and
  484. # have 3 different racks, that's still considered writable. Writes will still
  485. # try to replicate to all available volumes. You should only use this option
  486. # if you are doing your own replication or periodic sync of volumes.
  487. treat_replication_as_minimums = false
  488. `
  489. SHELL_TOML_EXAMPLE = `
  490. [cluster]
  491. default = "c1"
  492. [cluster.c1]
  493. master = "localhost:9333" # comma-separated master servers
  494. filer = "localhost:8888" # filer host and port
  495. [cluster.c2]
  496. master = ""
  497. filer = ""
  498. `
  499. )