You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

350 lines
10 KiB

3 years ago
3 years ago
  1. # A sample TOML config file for SeaweedFS filer store
  2. # Used with "weed filer" or "weed server -filer"
  3. # Put this file to one of the location, with descending priority
  4. # ./filer.toml
  5. # $HOME/.seaweedfs/filer.toml
  6. # /etc/seaweedfs/filer.toml
  7. ####################################################
  8. # Customizable filer server options
  9. ####################################################
  10. [filer.options]
  11. # with http DELETE, by default the filer would check whether a folder is empty.
  12. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  13. recursive_delete = false
  14. ####################################################
  15. # The following are filer store options
  16. ####################################################
  17. [leveldb2]
  18. # local on disk, mostly for simple single-machine setup, fairly scalable
  19. # faster than previous leveldb, recommended.
  20. enabled = true
  21. dir = "./filerldb2" # directory to store level db files
  22. [leveldb3]
  23. # similar to leveldb2.
  24. # each bucket has its own meta store.
  25. enabled = false
  26. dir = "./filerldb3" # directory to store level db files
  27. [rocksdb]
  28. # local on disk, similar to leveldb
  29. # since it is using a C wrapper, you need to install rocksdb and build it by yourself
  30. enabled = false
  31. dir = "./filerrdb" # directory to store rocksdb files
  32. [sqlite]
  33. # local on disk, similar to leveldb
  34. enabled = false
  35. dbFile = "./filer.db" # sqlite db file
  36. [mysql] # or memsql, tidb
  37. # CREATE TABLE IF NOT EXISTS `filemeta` (
  38. # `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
  39. # `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
  40. # `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
  41. # `meta` LONGBLOB,
  42. # PRIMARY KEY (`dirhash`, `name`)
  43. # ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  44. enabled = false
  45. hostname = "localhost"
  46. port = 3306
  47. username = "root"
  48. password = ""
  49. database = "" # create or use an existing database
  50. connection_max_idle = 2
  51. connection_max_open = 100
  52. connection_max_lifetime_seconds = 0
  53. interpolateParams = false
  54. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  55. enableUpsert = true
  56. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  57. [mysql2] # or memsql, tidb
  58. enabled = false
  59. createTable = """
  60. CREATE TABLE IF NOT EXISTS `%s` (
  61. `dirhash` BIGINT NOT NULL,
  62. `name` VARCHAR(766) NOT NULL,
  63. `directory` TEXT NOT NULL,
  64. `meta` LONGBLOB,
  65. PRIMARY KEY (`dirhash`, `name`)
  66. ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  67. """
  68. hostname = "localhost"
  69. port = 3306
  70. username = "root"
  71. password = ""
  72. database = "" # create or use an existing database
  73. connection_max_idle = 2
  74. connection_max_open = 100
  75. connection_max_lifetime_seconds = 0
  76. interpolateParams = false
  77. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  78. enableUpsert = true
  79. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  80. [postgres] # or cockroachdb, YugabyteDB
  81. # CREATE TABLE IF NOT EXISTS filemeta (
  82. # dirhash BIGINT,
  83. # name VARCHAR(65535),
  84. # directory VARCHAR(65535),
  85. # meta bytea,
  86. # PRIMARY KEY (dirhash, name)
  87. # );
  88. enabled = false
  89. hostname = "localhost"
  90. port = 5432
  91. username = "postgres"
  92. password = ""
  93. database = "postgres" # create or use an existing database
  94. schema = ""
  95. sslmode = "disable"
  96. connection_max_idle = 100
  97. connection_max_open = 100
  98. connection_max_lifetime_seconds = 0
  99. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  100. enableUpsert = true
  101. upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
  102. [postgres2]
  103. enabled = false
  104. createTable = """
  105. CREATE TABLE IF NOT EXISTS "%s" (
  106. dirhash BIGINT,
  107. name VARCHAR(65535),
  108. directory VARCHAR(65535),
  109. meta bytea,
  110. PRIMARY KEY (dirhash, name)
  111. );
  112. """
  113. hostname = "localhost"
  114. port = 5432
  115. username = "postgres"
  116. password = ""
  117. database = "postgres" # create or use an existing database
  118. schema = ""
  119. sslmode = "disable"
  120. connection_max_idle = 100
  121. connection_max_open = 100
  122. connection_max_lifetime_seconds = 0
  123. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  124. enableUpsert = true
  125. upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
  126. [cassandra]
  127. # CREATE TABLE filemeta (
  128. # directory varchar,
  129. # name varchar,
  130. # meta blob,
  131. # PRIMARY KEY (directory, name)
  132. # ) WITH CLUSTERING ORDER BY (name ASC);
  133. enabled = false
  134. keyspace = "seaweedfs"
  135. hosts = [
  136. "localhost:9042",
  137. ]
  138. username = ""
  139. password = ""
  140. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  141. superLargeDirectories = []
  142. # Name of the datacenter local to this filer, used as host selection fallback.
  143. localDC = ""
  144. # Gocql connection timeout, default: 600ms
  145. connection_timeout_millisecond = 600
  146. [hbase]
  147. enabled = false
  148. zkquorum = ""
  149. table = "seaweedfs"
  150. [redis2]
  151. enabled = false
  152. address = "localhost:6379"
  153. password = ""
  154. database = 0
  155. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  156. superLargeDirectories = []
  157. [redis2_sentinel]
  158. enabled = false
  159. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  160. masterName = "master"
  161. username = ""
  162. password = ""
  163. database = 0
  164. [redis_cluster2]
  165. enabled = false
  166. addresses = [
  167. "localhost:30001",
  168. "localhost:30002",
  169. "localhost:30003",
  170. "localhost:30004",
  171. "localhost:30005",
  172. "localhost:30006",
  173. ]
  174. password = ""
  175. # allows reads from slave servers or the master, but all writes still go to the master
  176. readOnly = false
  177. # automatically use the closest Redis server for reads
  178. routeByLatency = false
  179. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  180. superLargeDirectories = []
  181. [redis_lua]
  182. enabled = false
  183. address = "localhost:6379"
  184. password = ""
  185. database = 0
  186. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  187. superLargeDirectories = []
  188. [redis_lua_sentinel]
  189. enabled = false
  190. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  191. masterName = "master"
  192. username = ""
  193. password = ""
  194. database = 0
  195. [redis_lua_cluster]
  196. enabled = false
  197. addresses = [
  198. "localhost:30001",
  199. "localhost:30002",
  200. "localhost:30003",
  201. "localhost:30004",
  202. "localhost:30005",
  203. "localhost:30006",
  204. ]
  205. password = ""
  206. # allows reads from slave servers or the master, but all writes still go to the master
  207. readOnly = false
  208. # automatically use the closest Redis server for reads
  209. routeByLatency = false
  210. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  211. superLargeDirectories = []
  212. [redis3] # beta
  213. enabled = false
  214. address = "localhost:6379"
  215. password = ""
  216. database = 0
  217. [redis3_sentinel]
  218. enabled = false
  219. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  220. masterName = "master"
  221. username = ""
  222. password = ""
  223. database = 0
  224. [redis_cluster3] # beta
  225. enabled = false
  226. addresses = [
  227. "localhost:30001",
  228. "localhost:30002",
  229. "localhost:30003",
  230. "localhost:30004",
  231. "localhost:30005",
  232. "localhost:30006",
  233. ]
  234. password = ""
  235. # allows reads from slave servers or the master, but all writes still go to the master
  236. readOnly = false
  237. # automatically use the closest Redis server for reads
  238. routeByLatency = false
  239. [etcd]
  240. enabled = false
  241. servers = "localhost:2379"
  242. username = ""
  243. password = ""
  244. key_prefix = "seaweedfs."
  245. timeout = "3s"
  246. [mongodb]
  247. enabled = false
  248. uri = "mongodb://localhost:27017"
  249. option_pool_size = 0
  250. database = "seaweedfs"
  251. [elastic7]
  252. enabled = false
  253. servers = [
  254. "http://localhost1:9200",
  255. "http://localhost2:9200",
  256. "http://localhost3:9200",
  257. ]
  258. username = ""
  259. password = ""
  260. sniff_enabled = false
  261. healthcheck_enabled = false
  262. # increase the value is recommend, be sure the value in Elastic is greater or equal here
  263. index.max_result_window = 10000
  264. [arangodb] # in development dont use it
  265. enabled = false
  266. db_name = "seaweedfs"
  267. servers=["http://localhost:8529"] # list of servers to connect to
  268. # only basic auth supported for now
  269. username=""
  270. password=""
  271. # skip tls cert validation
  272. insecure_skip_verify = true
  273. [ydb] # https://ydb.tech/
  274. enabled = false
  275. dsn = "grpc://localhost:2136?database=/local"
  276. prefix = "seaweedfs"
  277. useBucketPrefix = true # Fast Bucket Deletion
  278. poolSizeLimit = 50
  279. dialTimeOut = 10
  280. # Authenticate produced with one of next environment variables:
  281. # YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
  282. # YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
  283. # YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
  284. # YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
  285. ##########################
  286. ##########################
  287. # To add path-specific filer store:
  288. #
  289. # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
  290. # 2. Add a location configuration. E.g., location = "/tmp/"
  291. # 3. Copy and customize all other configurations.
  292. # Make sure they are not the same if using the same store type!
  293. # 4. Set enabled to true
  294. #
  295. # The following is just using redis as an example
  296. ##########################
  297. [redis2.tmp]
  298. enabled = false
  299. location = "/tmp/"
  300. address = "localhost:6379"
  301. password = ""
  302. database = 1
  303. [tikv]
  304. enabled = false
  305. # If you have many pd address, use ',' split then:
  306. # pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
  307. pdaddrs = "localhost:2379"
  308. # Concurrency for TiKV delete range
  309. deleterange_concurrency = 1
  310. # Enable 1PC
  311. enable_1pc = false
  312. # Set the CA certificate path
  313. ca_path=""
  314. # Set the certificate path
  315. cert_path=""
  316. # Set the private key path
  317. key_path=""
  318. # The name list used to verify the cn name
  319. verify_cn=""