You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

400 lines
12 KiB

3 years ago
3 years ago
6 months ago
  1. # A sample TOML config file for SeaweedFS filer store
  2. # Used with "weed filer" or "weed server -filer"
  3. # Put this file to one of the location, with descending priority
  4. # ./filer.toml
  5. # $HOME/.seaweedfs/filer.toml
  6. # /etc/seaweedfs/filer.toml
  7. ####################################################
  8. # Customizable filer server options
  9. ####################################################
  10. [filer.options]
  11. # with http DELETE, by default the filer would check whether a folder is empty.
  12. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  13. recursive_delete = false
  14. #max_file_name_length = 255
  15. ####################################################
  16. # The following are filer store options
  17. ####################################################
  18. [leveldb2]
  19. # local on disk, mostly for simple single-machine setup, fairly scalable
  20. # faster than previous leveldb, recommended.
  21. enabled = true
  22. dir = "./filerldb2" # directory to store level db files
  23. [leveldb3]
  24. # similar to leveldb2.
  25. # each bucket has its own meta store.
  26. enabled = false
  27. dir = "./filerldb3" # directory to store level db files
  28. [rocksdb]
  29. # local on disk, similar to leveldb
  30. # since it is using a C wrapper, you need to install rocksdb and build it by yourself
  31. enabled = false
  32. dir = "./filerrdb" # directory to store rocksdb files
  33. [sqlite]
  34. # local on disk, similar to leveldb
  35. enabled = false
  36. dbFile = "./filer.db" # sqlite db file
  37. [dameng] # or memsql, tidb
  38. # CREATE TABLE SEAWEEDFS.FILEMETA (
  39. # DIRHASH BIGINT NOT NULL,
  40. # NAME VARCHAR(4000) NOT NULL,
  41. # DIRECTORY TEXT NOT NULL,
  42. # META LONGVARBINARY,
  43. # PRIMARY KEY (DIRHASH,NAME)
  44. # );
  45. enabled = true
  46. # dsn will take priority over "hostname, port, username, password, database".
  47. # [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...&paramN=valueN]
  48. dsn = ""
  49. hostname = ""
  50. port = 5236
  51. username = "SYSDBA"
  52. password = "SYSDBA001"
  53. database = "seaweedfs" # create or use an existing database
  54. connection_max_idle = 2
  55. connection_max_open = 100
  56. connection_max_lifetime_seconds = 0
  57. interpolateParams = false
  58. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  59. enableUpsert = true
  60. upsertQuery = """MERGE INTO %s AS target
  61. USING (SELECT ? AS dirhash, ? AS name, ? AS directory, ? AS meta FROM dual) AS source
  62. ON (target.dirhash = source.dirhash AND target.name = source.name)
  63. WHEN MATCHED THEN
  64. UPDATE SET target.meta = source.meta
  65. WHEN NOT MATCHED THEN
  66. INSERT (dirhash, name, directory, meta)
  67. VALUES (source.dirhash, source.name, source.directory, source.meta);"""
  68. [mysql] # or memsql, tidb
  69. # CREATE TABLE IF NOT EXISTS `filemeta` (
  70. # `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
  71. # `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
  72. # `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
  73. # `meta` LONGBLOB,
  74. # PRIMARY KEY (`dirhash`, `name`)
  75. # ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  76. enabled = false
  77. # dsn will take priority over "hostname, port, username, password, database".
  78. # [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...&paramN=valueN]
  79. dsn = "root@tcp(localhost:3306)/seaweedfs?collation=utf8mb4_bin"
  80. hostname = "localhost"
  81. port = 3306
  82. username = "root"
  83. password = ""
  84. database = "" # create or use an existing database
  85. connection_max_idle = 2
  86. connection_max_open = 100
  87. connection_max_lifetime_seconds = 0
  88. interpolateParams = false
  89. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  90. enableUpsert = true
  91. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  92. [mysql2] # or memsql, tidb
  93. enabled = false
  94. createTable = """
  95. CREATE TABLE IF NOT EXISTS `%s` (
  96. `dirhash` BIGINT NOT NULL,
  97. `name` VARCHAR(766) NOT NULL,
  98. `directory` TEXT NOT NULL,
  99. `meta` LONGBLOB,
  100. PRIMARY KEY (`dirhash`, `name`)
  101. ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  102. """
  103. hostname = "localhost"
  104. port = 3306
  105. username = "root"
  106. password = ""
  107. database = "" # create or use an existing database
  108. connection_max_idle = 2
  109. connection_max_open = 100
  110. connection_max_lifetime_seconds = 0
  111. interpolateParams = false
  112. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  113. enableUpsert = true
  114. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  115. [postgres] # or cockroachdb, YugabyteDB
  116. # CREATE TABLE IF NOT EXISTS filemeta (
  117. # dirhash BIGINT,
  118. # name VARCHAR(65535),
  119. # directory VARCHAR(65535),
  120. # meta bytea,
  121. # PRIMARY KEY (dirhash, name)
  122. # );
  123. enabled = false
  124. hostname = "localhost"
  125. port = 5432
  126. username = "postgres"
  127. password = ""
  128. database = "postgres" # create or use an existing database
  129. schema = ""
  130. sslmode = "disable"
  131. connection_max_idle = 100
  132. connection_max_open = 100
  133. connection_max_lifetime_seconds = 0
  134. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  135. enableUpsert = true
  136. upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
  137. [postgres2]
  138. enabled = false
  139. createTable = """
  140. CREATE TABLE IF NOT EXISTS "%s" (
  141. dirhash BIGINT,
  142. name VARCHAR(65535),
  143. directory VARCHAR(65535),
  144. meta bytea,
  145. PRIMARY KEY (dirhash, name)
  146. );
  147. """
  148. hostname = "localhost"
  149. port = 5432
  150. username = "postgres"
  151. password = ""
  152. database = "postgres" # create or use an existing database
  153. schema = ""
  154. sslmode = "disable"
  155. connection_max_idle = 100
  156. connection_max_open = 100
  157. connection_max_lifetime_seconds = 0
  158. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  159. enableUpsert = true
  160. upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
  161. [cassandra]
  162. # CREATE TABLE filemeta (
  163. # directory varchar,
  164. # name varchar,
  165. # meta blob,
  166. # PRIMARY KEY (directory, name)
  167. # ) WITH CLUSTERING ORDER BY (name ASC);
  168. enabled = false
  169. keyspace = "seaweedfs"
  170. hosts = [
  171. "localhost:9042",
  172. ]
  173. username = ""
  174. password = ""
  175. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  176. superLargeDirectories = []
  177. # Name of the datacenter local to this filer, used as host selection fallback.
  178. localDC = ""
  179. # Gocql connection timeout, default: 600ms
  180. connection_timeout_millisecond = 600
  181. [hbase]
  182. enabled = false
  183. zkquorum = ""
  184. table = "seaweedfs"
  185. [redis2]
  186. enabled = false
  187. address = "localhost:6379"
  188. password = ""
  189. database = 0
  190. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  191. superLargeDirectories = []
  192. [redis2_sentinel]
  193. enabled = false
  194. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  195. masterName = "master"
  196. username = ""
  197. password = ""
  198. database = 0
  199. [redis_cluster2]
  200. enabled = false
  201. addresses = [
  202. "localhost:30001",
  203. "localhost:30002",
  204. "localhost:30003",
  205. "localhost:30004",
  206. "localhost:30005",
  207. "localhost:30006",
  208. ]
  209. password = ""
  210. # allows reads from slave servers or the master, but all writes still go to the master
  211. readOnly = false
  212. # automatically use the closest Redis server for reads
  213. routeByLatency = false
  214. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  215. superLargeDirectories = []
  216. [redis_lua]
  217. enabled = false
  218. address = "localhost:6379"
  219. password = ""
  220. database = 0
  221. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  222. superLargeDirectories = []
  223. [redis_lua_sentinel]
  224. enabled = false
  225. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  226. masterName = "master"
  227. username = ""
  228. password = ""
  229. database = 0
  230. [redis_lua_cluster]
  231. enabled = false
  232. addresses = [
  233. "localhost:30001",
  234. "localhost:30002",
  235. "localhost:30003",
  236. "localhost:30004",
  237. "localhost:30005",
  238. "localhost:30006",
  239. ]
  240. password = ""
  241. # allows reads from slave servers or the master, but all writes still go to the master
  242. readOnly = false
  243. # automatically use the closest Redis server for reads
  244. routeByLatency = false
  245. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  246. superLargeDirectories = []
  247. [redis3] # beta
  248. enabled = false
  249. address = "localhost:6379"
  250. password = ""
  251. database = 0
  252. [redis3_sentinel]
  253. enabled = false
  254. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  255. masterName = "master"
  256. username = ""
  257. password = ""
  258. database = 0
  259. [redis_cluster3] # beta
  260. enabled = false
  261. addresses = [
  262. "localhost:30001",
  263. "localhost:30002",
  264. "localhost:30003",
  265. "localhost:30004",
  266. "localhost:30005",
  267. "localhost:30006",
  268. ]
  269. password = ""
  270. # allows reads from slave servers or the master, but all writes still go to the master
  271. readOnly = false
  272. # automatically use the closest Redis server for reads
  273. routeByLatency = false
  274. [etcd]
  275. enabled = false
  276. servers = "localhost:2379"
  277. username = ""
  278. password = ""
  279. key_prefix = "seaweedfs."
  280. timeout = "3s"
  281. # Set the CA certificate path
  282. tls_ca_file=""
  283. # Set the client certificate path
  284. tls_client_crt_file=""
  285. # Set the client private key path
  286. tls_client_key_file=""
  287. [mongodb]
  288. enabled = false
  289. uri = "mongodb://localhost:27017"
  290. username = ""
  291. password = ""
  292. ssl = false
  293. ssl_ca_file = ""
  294. ssl_cert_file = ""
  295. ssl_key_file = ""
  296. insecure_skip_verify = false
  297. option_pool_size = 0
  298. database = "seaweedfs"
  299. [elastic7]
  300. enabled = false
  301. servers = [
  302. "http://localhost1:9200",
  303. "http://localhost2:9200",
  304. "http://localhost3:9200",
  305. ]
  306. username = ""
  307. password = ""
  308. sniff_enabled = false
  309. healthcheck_enabled = false
  310. # increase the value is recommend, be sure the value in Elastic is greater or equal here
  311. index.max_result_window = 10000
  312. [arangodb] # in development dont use it
  313. enabled = false
  314. db_name = "seaweedfs"
  315. servers=["http://localhost:8529"] # list of servers to connect to
  316. # only basic auth supported for now
  317. username=""
  318. password=""
  319. # skip tls cert validation
  320. insecure_skip_verify = true
  321. [ydb] # https://ydb.tech/
  322. enabled = false
  323. dsn = "grpc://localhost:2136?database=/local"
  324. prefix = "seaweedfs"
  325. useBucketPrefix = true # Fast Bucket Deletion
  326. poolSizeLimit = 50
  327. dialTimeOut = 10
  328. # Authenticate produced with one of next environment variables:
  329. # YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
  330. # YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
  331. # YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
  332. # YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
  333. ##########################
  334. ##########################
  335. # To add path-specific filer store:
  336. #
  337. # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
  338. # 2. Add a location configuration. E.g., location = "/tmp/"
  339. # 3. Copy and customize all other configurations.
  340. # Make sure they are not the same if using the same store type!
  341. # 4. Set enabled to true
  342. #
  343. # The following is just using redis as an example
  344. ##########################
  345. [redis2.tmp]
  346. enabled = false
  347. location = "/tmp/"
  348. address = "localhost:6379"
  349. password = ""
  350. database = 1
  351. [tikv]
  352. enabled = false
  353. # If you have many pd address, use ',' split then:
  354. # pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
  355. pdaddrs = "localhost:2379"
  356. # Concurrency for TiKV delete range
  357. deleterange_concurrency = 1
  358. # Enable 1PC
  359. enable_1pc = false
  360. # Set the CA certificate path
  361. ca_path=""
  362. # Set the certificate path
  363. cert_path=""
  364. # Set the private key path
  365. key_path=""
  366. # The name list used to verify the cn name
  367. verify_cn=""