You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

351 lines
10 KiB

3 years ago
3 years ago
  1. # A sample TOML config file for SeaweedFS filer store
  2. # Used with "weed filer" or "weed server -filer"
  3. # Put this file to one of the location, with descending priority
  4. # ./filer.toml
  5. # $HOME/.seaweedfs/filer.toml
  6. # /etc/seaweedfs/filer.toml
  7. ####################################################
  8. # Customizable filer server options
  9. ####################################################
  10. [filer.options]
  11. # with http DELETE, by default the filer would check whether a folder is empty.
  12. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  13. recursive_delete = false
  14. #max_file_name_length = 255
  15. ####################################################
  16. # The following are filer store options
  17. ####################################################
  18. [leveldb2]
  19. # local on disk, mostly for simple single-machine setup, fairly scalable
  20. # faster than previous leveldb, recommended.
  21. enabled = true
  22. dir = "./filerldb2" # directory to store level db files
  23. [leveldb3]
  24. # similar to leveldb2.
  25. # each bucket has its own meta store.
  26. enabled = false
  27. dir = "./filerldb3" # directory to store level db files
  28. [rocksdb]
  29. # local on disk, similar to leveldb
  30. # since it is using a C wrapper, you need to install rocksdb and build it by yourself
  31. enabled = false
  32. dir = "./filerrdb" # directory to store rocksdb files
  33. [sqlite]
  34. # local on disk, similar to leveldb
  35. enabled = false
  36. dbFile = "./filer.db" # sqlite db file
  37. [mysql] # or memsql, tidb
  38. # CREATE TABLE IF NOT EXISTS `filemeta` (
  39. # `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
  40. # `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
  41. # `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
  42. # `meta` LONGBLOB,
  43. # PRIMARY KEY (`dirhash`, `name`)
  44. # ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  45. enabled = false
  46. hostname = "localhost"
  47. port = 3306
  48. username = "root"
  49. password = ""
  50. database = "" # create or use an existing database
  51. connection_max_idle = 2
  52. connection_max_open = 100
  53. connection_max_lifetime_seconds = 0
  54. interpolateParams = false
  55. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  56. enableUpsert = true
  57. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  58. [mysql2] # or memsql, tidb
  59. enabled = false
  60. createTable = """
  61. CREATE TABLE IF NOT EXISTS `%s` (
  62. `dirhash` BIGINT NOT NULL,
  63. `name` VARCHAR(766) NOT NULL,
  64. `directory` TEXT NOT NULL,
  65. `meta` LONGBLOB,
  66. PRIMARY KEY (`dirhash`, `name`)
  67. ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  68. """
  69. hostname = "localhost"
  70. port = 3306
  71. username = "root"
  72. password = ""
  73. database = "" # create or use an existing database
  74. connection_max_idle = 2
  75. connection_max_open = 100
  76. connection_max_lifetime_seconds = 0
  77. interpolateParams = false
  78. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  79. enableUpsert = true
  80. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  81. [postgres] # or cockroachdb, YugabyteDB
  82. # CREATE TABLE IF NOT EXISTS filemeta (
  83. # dirhash BIGINT,
  84. # name VARCHAR(65535),
  85. # directory VARCHAR(65535),
  86. # meta bytea,
  87. # PRIMARY KEY (dirhash, name)
  88. # );
  89. enabled = false
  90. hostname = "localhost"
  91. port = 5432
  92. username = "postgres"
  93. password = ""
  94. database = "postgres" # create or use an existing database
  95. schema = ""
  96. sslmode = "disable"
  97. connection_max_idle = 100
  98. connection_max_open = 100
  99. connection_max_lifetime_seconds = 0
  100. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  101. enableUpsert = true
  102. upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
  103. [postgres2]
  104. enabled = false
  105. createTable = """
  106. CREATE TABLE IF NOT EXISTS "%s" (
  107. dirhash BIGINT,
  108. name VARCHAR(65535),
  109. directory VARCHAR(65535),
  110. meta bytea,
  111. PRIMARY KEY (dirhash, name)
  112. );
  113. """
  114. hostname = "localhost"
  115. port = 5432
  116. username = "postgres"
  117. password = ""
  118. database = "postgres" # create or use an existing database
  119. schema = ""
  120. sslmode = "disable"
  121. connection_max_idle = 100
  122. connection_max_open = 100
  123. connection_max_lifetime_seconds = 0
  124. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  125. enableUpsert = true
  126. upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
  127. [cassandra]
  128. # CREATE TABLE filemeta (
  129. # directory varchar,
  130. # name varchar,
  131. # meta blob,
  132. # PRIMARY KEY (directory, name)
  133. # ) WITH CLUSTERING ORDER BY (name ASC);
  134. enabled = false
  135. keyspace = "seaweedfs"
  136. hosts = [
  137. "localhost:9042",
  138. ]
  139. username = ""
  140. password = ""
  141. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  142. superLargeDirectories = []
  143. # Name of the datacenter local to this filer, used as host selection fallback.
  144. localDC = ""
  145. # Gocql connection timeout, default: 600ms
  146. connection_timeout_millisecond = 600
  147. [hbase]
  148. enabled = false
  149. zkquorum = ""
  150. table = "seaweedfs"
  151. [redis2]
  152. enabled = false
  153. address = "localhost:6379"
  154. password = ""
  155. database = 0
  156. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  157. superLargeDirectories = []
  158. [redis2_sentinel]
  159. enabled = false
  160. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  161. masterName = "master"
  162. username = ""
  163. password = ""
  164. database = 0
  165. [redis_cluster2]
  166. enabled = false
  167. addresses = [
  168. "localhost:30001",
  169. "localhost:30002",
  170. "localhost:30003",
  171. "localhost:30004",
  172. "localhost:30005",
  173. "localhost:30006",
  174. ]
  175. password = ""
  176. # allows reads from slave servers or the master, but all writes still go to the master
  177. readOnly = false
  178. # automatically use the closest Redis server for reads
  179. routeByLatency = false
  180. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  181. superLargeDirectories = []
  182. [redis_lua]
  183. enabled = false
  184. address = "localhost:6379"
  185. password = ""
  186. database = 0
  187. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  188. superLargeDirectories = []
  189. [redis_lua_sentinel]
  190. enabled = false
  191. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  192. masterName = "master"
  193. username = ""
  194. password = ""
  195. database = 0
  196. [redis_lua_cluster]
  197. enabled = false
  198. addresses = [
  199. "localhost:30001",
  200. "localhost:30002",
  201. "localhost:30003",
  202. "localhost:30004",
  203. "localhost:30005",
  204. "localhost:30006",
  205. ]
  206. password = ""
  207. # allows reads from slave servers or the master, but all writes still go to the master
  208. readOnly = false
  209. # automatically use the closest Redis server for reads
  210. routeByLatency = false
  211. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  212. superLargeDirectories = []
  213. [redis3] # beta
  214. enabled = false
  215. address = "localhost:6379"
  216. password = ""
  217. database = 0
  218. [redis3_sentinel]
  219. enabled = false
  220. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  221. masterName = "master"
  222. username = ""
  223. password = ""
  224. database = 0
  225. [redis_cluster3] # beta
  226. enabled = false
  227. addresses = [
  228. "localhost:30001",
  229. "localhost:30002",
  230. "localhost:30003",
  231. "localhost:30004",
  232. "localhost:30005",
  233. "localhost:30006",
  234. ]
  235. password = ""
  236. # allows reads from slave servers or the master, but all writes still go to the master
  237. readOnly = false
  238. # automatically use the closest Redis server for reads
  239. routeByLatency = false
  240. [etcd]
  241. enabled = false
  242. servers = "localhost:2379"
  243. username = ""
  244. password = ""
  245. key_prefix = "seaweedfs."
  246. timeout = "3s"
  247. [mongodb]
  248. enabled = false
  249. uri = "mongodb://localhost:27017"
  250. option_pool_size = 0
  251. database = "seaweedfs"
  252. [elastic7]
  253. enabled = false
  254. servers = [
  255. "http://localhost1:9200",
  256. "http://localhost2:9200",
  257. "http://localhost3:9200",
  258. ]
  259. username = ""
  260. password = ""
  261. sniff_enabled = false
  262. healthcheck_enabled = false
  263. # increase the value is recommend, be sure the value in Elastic is greater or equal here
  264. index.max_result_window = 10000
  265. [arangodb] # in development dont use it
  266. enabled = false
  267. db_name = "seaweedfs"
  268. servers=["http://localhost:8529"] # list of servers to connect to
  269. # only basic auth supported for now
  270. username=""
  271. password=""
  272. # skip tls cert validation
  273. insecure_skip_verify = true
  274. [ydb] # https://ydb.tech/
  275. enabled = false
  276. dsn = "grpc://localhost:2136?database=/local"
  277. prefix = "seaweedfs"
  278. useBucketPrefix = true # Fast Bucket Deletion
  279. poolSizeLimit = 50
  280. dialTimeOut = 10
  281. # Authenticate produced with one of next environment variables:
  282. # YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
  283. # YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
  284. # YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
  285. # YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
  286. ##########################
  287. ##########################
  288. # To add path-specific filer store:
  289. #
  290. # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
  291. # 2. Add a location configuration. E.g., location = "/tmp/"
  292. # 3. Copy and customize all other configurations.
  293. # Make sure they are not the same if using the same store type!
  294. # 4. Set enabled to true
  295. #
  296. # The following is just using redis as an example
  297. ##########################
  298. [redis2.tmp]
  299. enabled = false
  300. location = "/tmp/"
  301. address = "localhost:6379"
  302. password = ""
  303. database = 1
  304. [tikv]
  305. enabled = false
  306. # If you have many pd address, use ',' split then:
  307. # pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
  308. pdaddrs = "localhost:2379"
  309. # Concurrency for TiKV delete range
  310. deleterange_concurrency = 1
  311. # Enable 1PC
  312. enable_1pc = false
  313. # Set the CA certificate path
  314. ca_path=""
  315. # Set the certificate path
  316. cert_path=""
  317. # Set the private key path
  318. key_path=""
  319. # The name list used to verify the cn name
  320. verify_cn=""