You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

402 lines
12 KiB

5 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
  1. package command
  2. import (
  3. "io/ioutil"
  4. "path/filepath"
  5. )
  6. func init() {
  7. cmdScaffold.Run = runScaffold // break init cycle
  8. }
  9. var cmdScaffold = &Command{
  10. UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
  11. Short: "generate basic configuration files",
  12. Long: `Generate filer.toml with all possible configurations for you to customize.
  13. The options can also be overwritten by environment variables.
  14. For example, the filer.toml mysql password can be overwritten by environment variable
  15. export WEED_MYSQL_PASSWORD=some_password
  16. Environment variable rules:
  17. * Prefix fix with "WEED_"
  18. * Upppercase the reset of variable name.
  19. * Replace '.' with '_'
  20. `,
  21. }
  22. var (
  23. outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
  24. config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
  25. )
  26. func runScaffold(cmd *Command, args []string) bool {
  27. content := ""
  28. switch *config {
  29. case "filer":
  30. content = FILER_TOML_EXAMPLE
  31. case "notification":
  32. content = NOTIFICATION_TOML_EXAMPLE
  33. case "replication":
  34. content = REPLICATION_TOML_EXAMPLE
  35. case "security":
  36. content = SECURITY_TOML_EXAMPLE
  37. case "master":
  38. content = MASTER_TOML_EXAMPLE
  39. }
  40. if content == "" {
  41. println("need a valid -config option")
  42. return false
  43. }
  44. if *outputPath != "" {
  45. ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
  46. } else {
  47. println(content)
  48. }
  49. return true
  50. }
  51. const (
  52. FILER_TOML_EXAMPLE = `
  53. # A sample TOML config file for SeaweedFS filer store
  54. # Used with "weed filer" or "weed server -filer"
  55. # Put this file to one of the location, with descending priority
  56. # ./filer.toml
  57. # $HOME/.seaweedfs/filer.toml
  58. # /etc/seaweedfs/filer.toml
  59. ####################################################
  60. # Customizable filer server options
  61. ####################################################
  62. [filer.options]
  63. # with http DELETE, by default the filer would check whether a folder is empty.
  64. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  65. recursive_delete = false
  66. # directories under this folder will be automatically creating a separate bucket
  67. buckets_folder = "/buckets"
  68. # directories under this folder will be store message queue data
  69. queues_folder = "/queues"
  70. ####################################################
  71. # The following are filer store options
  72. ####################################################
  73. [leveldb2]
  74. # local on disk, mostly for simple single-machine setup, fairly scalable
  75. # faster than previous leveldb, recommended.
  76. enabled = true
  77. dir = "." # directory to store level db files
  78. [mysql] # or tidb
  79. # CREATE TABLE IF NOT EXISTS filemeta (
  80. # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
  81. # name VARCHAR(1000) COMMENT 'directory or file name',
  82. # directory TEXT COMMENT 'full path to parent directory',
  83. # meta LONGBLOB,
  84. # PRIMARY KEY (dirhash, name)
  85. # ) DEFAULT CHARSET=utf8;
  86. enabled = false
  87. hostname = "localhost"
  88. port = 3306
  89. username = "root"
  90. password = ""
  91. database = "" # create or use an existing database
  92. connection_max_idle = 2
  93. connection_max_open = 100
  94. interpolateParams = false
  95. [postgres] # or cockroachdb
  96. # CREATE TABLE IF NOT EXISTS filemeta (
  97. # dirhash BIGINT,
  98. # name VARCHAR(65535),
  99. # directory VARCHAR(65535),
  100. # meta bytea,
  101. # PRIMARY KEY (dirhash, name)
  102. # );
  103. enabled = false
  104. hostname = "localhost"
  105. port = 5432
  106. username = "postgres"
  107. password = ""
  108. database = "" # create or use an existing database
  109. sslmode = "disable"
  110. connection_max_idle = 100
  111. connection_max_open = 100
  112. [cassandra]
  113. # CREATE TABLE filemeta (
  114. # directory varchar,
  115. # name varchar,
  116. # meta blob,
  117. # PRIMARY KEY (directory, name)
  118. # ) WITH CLUSTERING ORDER BY (name ASC);
  119. enabled = false
  120. keyspace="seaweedfs"
  121. hosts=[
  122. "localhost:9042",
  123. ]
  124. [redis]
  125. enabled = false
  126. address = "localhost:6379"
  127. password = ""
  128. database = 0
  129. [redis_cluster]
  130. enabled = false
  131. addresses = [
  132. "localhost:30001",
  133. "localhost:30002",
  134. "localhost:30003",
  135. "localhost:30004",
  136. "localhost:30005",
  137. "localhost:30006",
  138. ]
  139. password = ""
  140. # allows reads from slave servers or the master, but all writes still go to the master
  141. readOnly = true
  142. # automatically use the closest Redis server for reads
  143. routeByLatency = true
  144. [etcd]
  145. enabled = false
  146. servers = "localhost:2379"
  147. timeout = "3s"
  148. [tikv]
  149. enabled = false
  150. pdAddress = "192.168.199.113:2379"
  151. `
  152. NOTIFICATION_TOML_EXAMPLE = `
  153. # A sample TOML config file for SeaweedFS filer store
  154. # Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
  155. # Put this file to one of the location, with descending priority
  156. # ./notification.toml
  157. # $HOME/.seaweedfs/notification.toml
  158. # /etc/seaweedfs/notification.toml
  159. ####################################################
  160. # notification
  161. # send and receive filer updates for each file to an external message queue
  162. ####################################################
  163. [notification.log]
  164. # this is only for debugging perpose and does not work with "weed filer.replicate"
  165. enabled = false
  166. [notification.kafka]
  167. enabled = false
  168. hosts = [
  169. "localhost:9092"
  170. ]
  171. topic = "seaweedfs_filer"
  172. offsetFile = "./last.offset"
  173. offsetSaveIntervalSeconds = 10
  174. [notification.aws_sqs]
  175. # experimental, let me know if it works
  176. enabled = false
  177. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  178. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  179. region = "us-east-2"
  180. sqs_queue_name = "my_filer_queue" # an existing queue name
  181. [notification.google_pub_sub]
  182. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  183. enabled = false
  184. google_application_credentials = "/path/to/x.json" # path to json credential file
  185. project_id = "" # an existing project id
  186. topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
  187. [notification.gocdk_pub_sub]
  188. # The Go Cloud Development Kit (https://gocloud.dev).
  189. # PubSub API (https://godoc.org/gocloud.dev/pubsub).
  190. # Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
  191. enabled = false
  192. # This URL will Dial the RabbitMQ server at the URL in the environment
  193. # variable RABBIT_SERVER_URL and open the exchange "myexchange".
  194. # The exchange must have already been created by some other means, like
  195. # the RabbitMQ management plugin.
  196. topic_url = "rabbit://myexchange"
  197. sub_url = "rabbit://myqueue"
  198. `
  199. REPLICATION_TOML_EXAMPLE = `
  200. # A sample TOML config file for replicating SeaweedFS filer
  201. # Used with "weed filer.replicate"
  202. # Put this file to one of the location, with descending priority
  203. # ./replication.toml
  204. # $HOME/.seaweedfs/replication.toml
  205. # /etc/seaweedfs/replication.toml
  206. [source.filer]
  207. enabled = true
  208. grpcAddress = "localhost:18888"
  209. # all files under this directory tree are replicated.
  210. # this is not a directory on your hard drive, but on your filer.
  211. # i.e., all files with this "prefix" are sent to notification message queue.
  212. directory = "/buckets"
  213. [sink.filer]
  214. enabled = false
  215. grpcAddress = "localhost:18888"
  216. # all replicated files are under this directory tree
  217. # this is not a directory on your hard drive, but on your filer.
  218. # i.e., all received files will be "prefixed" to this directory.
  219. directory = "/backup"
  220. replication = ""
  221. collection = ""
  222. ttlSec = 0
  223. [sink.s3]
  224. # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
  225. # default loads credentials from the shared credentials file (~/.aws/credentials).
  226. enabled = false
  227. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  228. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  229. region = "us-east-2"
  230. bucket = "your_bucket_name" # an existing bucket
  231. directory = "/" # destination directory
  232. [sink.google_cloud_storage]
  233. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  234. enabled = false
  235. google_application_credentials = "/path/to/x.json" # path to json credential file
  236. bucket = "your_bucket_seaweedfs" # an existing bucket
  237. directory = "/" # destination directory
  238. [sink.azure]
  239. # experimental, let me know if it works
  240. enabled = false
  241. account_name = ""
  242. account_key = ""
  243. container = "mycontainer" # an existing container
  244. directory = "/" # destination directory
  245. [sink.backblaze]
  246. enabled = false
  247. b2_account_id = ""
  248. b2_master_application_key = ""
  249. bucket = "mybucket" # an existing bucket
  250. directory = "/" # destination directory
  251. `
  252. SECURITY_TOML_EXAMPLE = `
  253. # Put this file to one of the location, with descending priority
  254. # ./security.toml
  255. # $HOME/.seaweedfs/security.toml
  256. # /etc/seaweedfs/security.toml
  257. # this file is read by master, volume server, and filer
  258. # the jwt signing key is read by master and volume server.
  259. # a jwt defaults to expire after 10 seconds.
  260. [jwt.signing]
  261. key = ""
  262. expires_after_seconds = 10 # seconds
  263. # jwt for read is only supported with master+volume setup. Filer does not support this mode.
  264. [jwt.signing.read]
  265. key = ""
  266. expires_after_seconds = 10 # seconds
  267. # all grpc tls authentications are mutual
  268. # the values for the following ca, cert, and key are paths to the PERM files.
  269. # the host name is not checked, so the PERM files can be shared.
  270. [grpc]
  271. ca = ""
  272. [grpc.volume]
  273. cert = ""
  274. key = ""
  275. [grpc.master]
  276. cert = ""
  277. key = ""
  278. [grpc.filer]
  279. cert = ""
  280. key = ""
  281. [grpc.msg_broker]
  282. cert = ""
  283. key = ""
  284. # use this for any place needs a grpc client
  285. # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
  286. [grpc.client]
  287. cert = ""
  288. key = ""
  289. # volume server https options
  290. # Note: work in progress!
  291. # this does not work with other clients, e.g., "weed filer|mount" etc, yet.
  292. [https.client]
  293. enabled = true
  294. [https.volume]
  295. cert = ""
  296. key = ""
  297. `
  298. MASTER_TOML_EXAMPLE = `
  299. # Put this file to one of the location, with descending priority
  300. # ./master.toml
  301. # $HOME/.seaweedfs/master.toml
  302. # /etc/seaweedfs/master.toml
  303. # this file is read by master
  304. [master.maintenance]
  305. # periodically run these scripts are the same as running them from 'weed shell'
  306. scripts = """
  307. ec.encode -fullPercent=95 -quietFor=1h
  308. ec.rebuild -force
  309. ec.balance -force
  310. volume.balance -force
  311. """
  312. sleep_minutes = 17 # sleep minutes between each script execution
  313. [master.filer]
  314. default_filer_url = "http://localhost:8888/"
  315. [master.sequencer]
  316. type = "memory" # Choose [memory|etcd] type for storing the file id sequence
  317. # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
  318. # example : http://127.0.0.1:2379,http://127.0.0.1:2389
  319. sequencer_etcd_urls = "http://127.0.0.1:2379"
  320. # configurations for tiered cloud storage
  321. # old volumes are transparently moved to cloud for cost efficiency
  322. [storage.backend]
  323. [storage.backend.s3.default]
  324. enabled = false
  325. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  326. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  327. region = "us-east-2"
  328. bucket = "your_bucket_name" # an existing bucket
  329. # create this number of logical volumes if no more writable volumes
  330. # count_x means how many copies of data.
  331. # e.g.:
  332. # 000 has only one copy, count_1
  333. # 010 and 001 has two copies, count_2
  334. # 011 has only 3 copies, count_3
  335. [master.volume_growth]
  336. count_1 = 7 # create 1 x 7 = 7 actual volumes
  337. count_2 = 6 # create 2 x 6 = 12 actual volumes
  338. count_3 = 3 # create 3 x 3 = 9 actual volumes
  339. count_other = 1 # create n x 1 = n actual volumes
  340. `
  341. )