You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

462 lines
14 KiB

5 years ago
4 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package command
  2. import (
  3. "io/ioutil"
  4. "path/filepath"
  5. )
  6. func init() {
  7. cmdScaffold.Run = runScaffold // break init cycle
  8. }
  9. var cmdScaffold = &Command{
  10. UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
  11. Short: "generate basic configuration files",
  12. Long: `Generate filer.toml with all possible configurations for you to customize.
  13. The options can also be overwritten by environment variables.
  14. For example, the filer.toml mysql password can be overwritten by environment variable
  15. export WEED_MYSQL_PASSWORD=some_password
  16. Environment variable rules:
  17. * Prefix the variable name with "WEED_"
  18. * Upppercase the reset of variable name.
  19. * Replace '.' with '_'
  20. `,
  21. }
  22. var (
  23. outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
  24. config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
  25. )
  26. func runScaffold(cmd *Command, args []string) bool {
  27. content := ""
  28. switch *config {
  29. case "filer":
  30. content = FILER_TOML_EXAMPLE
  31. case "notification":
  32. content = NOTIFICATION_TOML_EXAMPLE
  33. case "replication":
  34. content = REPLICATION_TOML_EXAMPLE
  35. case "security":
  36. content = SECURITY_TOML_EXAMPLE
  37. case "master":
  38. content = MASTER_TOML_EXAMPLE
  39. }
  40. if content == "" {
  41. println("need a valid -config option")
  42. return false
  43. }
  44. if *outputPath != "" {
  45. ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
  46. } else {
  47. println(content)
  48. }
  49. return true
  50. }
  51. const (
  52. FILER_TOML_EXAMPLE = `
  53. # A sample TOML config file for SeaweedFS filer store
  54. # Used with "weed filer" or "weed server -filer"
  55. # Put this file to one of the location, with descending priority
  56. # ./filer.toml
  57. # $HOME/.seaweedfs/filer.toml
  58. # /etc/seaweedfs/filer.toml
  59. ####################################################
  60. # Customizable filer server options
  61. ####################################################
  62. [filer.options]
  63. # with http DELETE, by default the filer would check whether a folder is empty.
  64. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  65. recursive_delete = false
  66. # directories under this folder will be automatically creating a separate bucket
  67. buckets_folder = "/buckets"
  68. ####################################################
  69. # The following are filer store options
  70. ####################################################
  71. [leveldb2]
  72. # local on disk, mostly for simple single-machine setup, fairly scalable
  73. # faster than previous leveldb, recommended.
  74. enabled = true
  75. dir = "." # directory to store level db files
  76. [mysql] # or tidb
  77. # CREATE TABLE IF NOT EXISTS filemeta (
  78. # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
  79. # name VARCHAR(1000) COMMENT 'directory or file name',
  80. # directory TEXT COMMENT 'full path to parent directory',
  81. # meta LONGBLOB,
  82. # PRIMARY KEY (dirhash, name)
  83. # ) DEFAULT CHARSET=utf8;
  84. enabled = false
  85. hostname = "localhost"
  86. port = 3306
  87. username = "root"
  88. password = ""
  89. database = "" # create or use an existing database
  90. connection_max_idle = 2
  91. connection_max_open = 100
  92. interpolateParams = false
  93. [postgres] # or cockroachdb
  94. # CREATE TABLE IF NOT EXISTS filemeta (
  95. # dirhash BIGINT,
  96. # name VARCHAR(65535),
  97. # directory VARCHAR(65535),
  98. # meta bytea,
  99. # PRIMARY KEY (dirhash, name)
  100. # );
  101. enabled = false
  102. hostname = "localhost"
  103. port = 5432
  104. username = "postgres"
  105. password = ""
  106. database = "" # create or use an existing database
  107. sslmode = "disable"
  108. connection_max_idle = 100
  109. connection_max_open = 100
  110. [cassandra]
  111. # CREATE TABLE filemeta (
  112. # directory varchar,
  113. # name varchar,
  114. # meta blob,
  115. # PRIMARY KEY (directory, name)
  116. # ) WITH CLUSTERING ORDER BY (name ASC);
  117. enabled = false
  118. keyspace="seaweedfs"
  119. hosts=[
  120. "localhost:9042",
  121. ]
  122. username=""
  123. password=""
  124. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  125. superLargeDirectories = []
  126. [hbase]
  127. zkquorum = ""
  128. table = "seaweedfs"
  129. [redis2]
  130. enabled = false
  131. address = "localhost:6379"
  132. password = ""
  133. database = 0
  134. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  135. superLargeDirectories = []
  136. [redis_cluster2]
  137. enabled = false
  138. addresses = [
  139. "localhost:30001",
  140. "localhost:30002",
  141. "localhost:30003",
  142. "localhost:30004",
  143. "localhost:30005",
  144. "localhost:30006",
  145. ]
  146. password = ""
  147. # allows reads from slave servers or the master, but all writes still go to the master
  148. readOnly = true
  149. # automatically use the closest Redis server for reads
  150. routeByLatency = true
  151. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  152. superLargeDirectories = []
  153. [etcd]
  154. enabled = false
  155. servers = "localhost:2379"
  156. timeout = "3s"
  157. [mongodb]
  158. enabled = false
  159. uri = "mongodb://localhost:27017"
  160. option_pool_size = 0
  161. database = "seaweedfs"
  162. [elastic7]
  163. enabled = false
  164. servers = [
  165. "http://localhost1:9200",
  166. "http://localhost2:9200",
  167. "http://localhost3:9200",
  168. ]
  169. username = ""
  170. password = ""
  171. sniff_enabled = false
  172. healthcheck_enabled = false
  173. # increase the value is recommend, be sure the value in Elastic is greater or equal here
  174. index.max_result_window = 10000
  175. ##########################
  176. ##########################
  177. # To add path-specific filer store:
  178. #
  179. # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
  180. # 2. Add a location configuraiton. E.g., location = "/tmp/"
  181. # 3. Copy and customize all other configurations.
  182. # Make sure they are not the same if using the same store type!
  183. # 4. Set enabled to true
  184. #
  185. # The following is just using cassandra as an example
  186. ##########################
  187. [redis2.tmp]
  188. enabled = false
  189. location = "/tmp/"
  190. address = "localhost:6379"
  191. password = ""
  192. database = 1
  193. `
  194. NOTIFICATION_TOML_EXAMPLE = `
  195. # A sample TOML config file for SeaweedFS filer store
  196. # Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
  197. # Put this file to one of the location, with descending priority
  198. # ./notification.toml
  199. # $HOME/.seaweedfs/notification.toml
  200. # /etc/seaweedfs/notification.toml
  201. ####################################################
  202. # notification
  203. # send and receive filer updates for each file to an external message queue
  204. ####################################################
  205. [notification.log]
  206. # this is only for debugging perpose and does not work with "weed filer.replicate"
  207. enabled = false
  208. [notification.kafka]
  209. enabled = false
  210. hosts = [
  211. "localhost:9092"
  212. ]
  213. topic = "seaweedfs_filer"
  214. offsetFile = "./last.offset"
  215. offsetSaveIntervalSeconds = 10
  216. [notification.aws_sqs]
  217. # experimental, let me know if it works
  218. enabled = false
  219. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  220. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  221. region = "us-east-2"
  222. sqs_queue_name = "my_filer_queue" # an existing queue name
  223. [notification.google_pub_sub]
  224. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  225. enabled = false
  226. google_application_credentials = "/path/to/x.json" # path to json credential file
  227. project_id = "" # an existing project id
  228. topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
  229. [notification.gocdk_pub_sub]
  230. # The Go Cloud Development Kit (https://gocloud.dev).
  231. # PubSub API (https://godoc.org/gocloud.dev/pubsub).
  232. # Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
  233. enabled = false
  234. # This URL will Dial the RabbitMQ server at the URL in the environment
  235. # variable RABBIT_SERVER_URL and open the exchange "myexchange".
  236. # The exchange must have already been created by some other means, like
  237. # the RabbitMQ management plugin.
  238. topic_url = "rabbit://myexchange"
  239. sub_url = "rabbit://myqueue"
  240. `
  241. REPLICATION_TOML_EXAMPLE = `
  242. # A sample TOML config file for replicating SeaweedFS filer
  243. # Used with "weed filer.replicate"
  244. # Put this file to one of the location, with descending priority
  245. # ./replication.toml
  246. # $HOME/.seaweedfs/replication.toml
  247. # /etc/seaweedfs/replication.toml
  248. [source.filer]
  249. enabled = true
  250. grpcAddress = "localhost:18888"
  251. # all files under this directory tree are replicated.
  252. # this is not a directory on your hard drive, but on your filer.
  253. # i.e., all files with this "prefix" are sent to notification message queue.
  254. directory = "/buckets"
  255. [sink.filer]
  256. enabled = false
  257. grpcAddress = "localhost:18888"
  258. # all replicated files are under this directory tree
  259. # this is not a directory on your hard drive, but on your filer.
  260. # i.e., all received files will be "prefixed" to this directory.
  261. directory = "/backup"
  262. replication = ""
  263. collection = ""
  264. ttlSec = 0
  265. [sink.s3]
  266. # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
  267. # default loads credentials from the shared credentials file (~/.aws/credentials).
  268. enabled = false
  269. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  270. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  271. region = "us-east-2"
  272. bucket = "your_bucket_name" # an existing bucket
  273. directory = "/" # destination directory
  274. endpoint = ""
  275. [sink.google_cloud_storage]
  276. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  277. enabled = false
  278. google_application_credentials = "/path/to/x.json" # path to json credential file
  279. bucket = "your_bucket_seaweedfs" # an existing bucket
  280. directory = "/" # destination directory
  281. [sink.azure]
  282. # experimental, let me know if it works
  283. enabled = false
  284. account_name = ""
  285. account_key = ""
  286. container = "mycontainer" # an existing container
  287. directory = "/" # destination directory
  288. [sink.backblaze]
  289. enabled = false
  290. b2_account_id = ""
  291. b2_master_application_key = ""
  292. bucket = "mybucket" # an existing bucket
  293. directory = "/" # destination directory
  294. `
  295. SECURITY_TOML_EXAMPLE = `
  296. # Put this file to one of the location, with descending priority
  297. # ./security.toml
  298. # $HOME/.seaweedfs/security.toml
  299. # /etc/seaweedfs/security.toml
  300. # this file is read by master, volume server, and filer
  301. # the jwt signing key is read by master and volume server.
  302. # a jwt defaults to expire after 10 seconds.
  303. [jwt.signing]
  304. key = ""
  305. expires_after_seconds = 10 # seconds
  306. # jwt for read is only supported with master+volume setup. Filer does not support this mode.
  307. [jwt.signing.read]
  308. key = ""
  309. expires_after_seconds = 10 # seconds
  310. # all grpc tls authentications are mutual
  311. # the values for the following ca, cert, and key are paths to the PERM files.
  312. # the host name is not checked, so the PERM files can be shared.
  313. [grpc]
  314. ca = ""
  315. [grpc.volume]
  316. cert = ""
  317. key = ""
  318. [grpc.master]
  319. cert = ""
  320. key = ""
  321. [grpc.filer]
  322. cert = ""
  323. key = ""
  324. [grpc.msg_broker]
  325. cert = ""
  326. key = ""
  327. # use this for any place needs a grpc client
  328. # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
  329. [grpc.client]
  330. cert = ""
  331. key = ""
  332. # volume server https options
  333. # Note: work in progress!
  334. # this does not work with other clients, e.g., "weed filer|mount" etc, yet.
  335. [https.client]
  336. enabled = true
  337. [https.volume]
  338. cert = ""
  339. key = ""
  340. `
  341. MASTER_TOML_EXAMPLE = `
  342. # Put this file to one of the location, with descending priority
  343. # ./master.toml
  344. # $HOME/.seaweedfs/master.toml
  345. # /etc/seaweedfs/master.toml
  346. # this file is read by master
  347. [master.maintenance]
  348. # periodically run these scripts are the same as running them from 'weed shell'
  349. scripts = """
  350. lock
  351. ec.encode -fullPercent=95 -quietFor=1h
  352. ec.rebuild -force
  353. ec.balance -force
  354. volume.balance -force
  355. volume.fix.replication
  356. unlock
  357. """
  358. sleep_minutes = 17 # sleep minutes between each script execution
  359. [master.filer]
  360. default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
  361. [master.sequencer]
  362. type = "raft" # Choose [raft|etcd] type for storing the file id sequence
  363. # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
  364. # example : http://127.0.0.1:2379,http://127.0.0.1:2389
  365. sequencer_etcd_urls = "http://127.0.0.1:2379"
  366. # configurations for tiered cloud storage
  367. # old volumes are transparently moved to cloud for cost efficiency
  368. [storage.backend]
  369. [storage.backend.s3.default]
  370. enabled = false
  371. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  372. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  373. region = "us-east-2"
  374. bucket = "your_bucket_name" # an existing bucket
  375. endpoint = ""
  376. # create this number of logical volumes if no more writable volumes
  377. # count_x means how many copies of data.
  378. # e.g.:
  379. # 000 has only one copy, copy_1
  380. # 010 and 001 has two copies, copy_2
  381. # 011 has only 3 copies, copy_3
  382. [master.volume_growth]
  383. copy_1 = 7 # create 1 x 7 = 7 actual volumes
  384. copy_2 = 6 # create 2 x 6 = 12 actual volumes
  385. copy_3 = 3 # create 3 x 3 = 9 actual volumes
  386. copy_other = 1 # create n x 1 = n actual volumes
  387. # configuration flags for replication
  388. [master.replication]
  389. # any replication counts should be considered minimums. If you specify 010 and
  390. # have 3 different racks, that's still considered writable. Writes will still
  391. # try to replicate to all available volumes. You should only use this option
  392. # if you are doing your own replication or periodic sync of volumes.
  393. treat_replication_as_minimums = false
  394. `
  395. )