You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4153 lines
84 KiB

10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
2 years ago
10 months ago
2 years ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
2 years ago
2 years ago
10 months ago
10 months ago
10 months ago
10 months ago
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. /* For pthread_rwlock_t */
  8. #ifndef _GNU_SOURCE
  9. #define _GNU_SOURCE
  10. #endif
  11. #include "crc32b.h"
  12. #include "fuse_node.h"
  13. #include "khash.h"
  14. #include "kvec.h"
  15. #include "node.h"
  16. #include "config.h"
  17. #include "fuse_dirents.h"
  18. #include "fuse_i.h"
  19. #include "fuse_kernel.h"
  20. #include "fuse_lowlevel.h"
  21. #include "fuse_misc.h"
  22. #include "fuse_opt.h"
  23. #include "fuse_pollhandle.h"
  24. #include "fuse_msgbuf.hpp"
  25. #include <assert.h>
  26. #include <dlfcn.h>
  27. #include <errno.h>
  28. #include <fcntl.h>
  29. #include <inttypes.h>
  30. #include <limits.h>
  31. #include <poll.h>
  32. #include <signal.h>
  33. #include <stdbool.h>
  34. #include <stddef.h>
  35. #include <stdint.h>
  36. #include <stdio.h>
  37. #include <stdlib.h>
  38. #include <string.h>
  39. #include <sys/file.h>
  40. #include <sys/mman.h>
  41. #include <sys/param.h>
  42. #include <sys/time.h>
  43. #include <sys/uio.h>
  44. #include <syslog.h>
  45. #include <time.h>
  46. #include <unistd.h>
  47. #ifdef HAVE_MALLOC_TRIM
  48. #include <malloc.h>
  49. #endif
  50. #define FUSE_UNKNOWN_INO UINT64_MAX
  51. #define OFFSET_MAX 0x7fffffffffffffffLL
  52. #define NODE_TABLE_MIN_SIZE 8192
  53. #define PARAM(inarg) ((void*)(((char*)(inarg)) + sizeof(*(inarg))))
  54. static int g_LOG_METRICS = 0;
  55. struct fuse_config
  56. {
  57. unsigned int uid;
  58. unsigned int gid;
  59. unsigned int umask;
  60. int remember;
  61. int debug;
  62. int nogc;
  63. int set_mode;
  64. int set_uid;
  65. int set_gid;
  66. int help;
  67. };
  68. struct fuse_fs
  69. {
  70. struct fuse_operations op;
  71. };
  72. struct lock_queue_element
  73. {
  74. struct lock_queue_element *next;
  75. pthread_cond_t cond;
  76. uint64_t nodeid1;
  77. const char *name1;
  78. char **path1;
  79. node_t **wnode1;
  80. uint64_t nodeid2;
  81. const char *name2;
  82. char **path2;
  83. node_t **wnode2;
  84. int err;
  85. bool done : 1;
  86. };
  87. struct node_table
  88. {
  89. node_t **array;
  90. size_t use;
  91. size_t size;
  92. size_t split;
  93. };
  94. struct list_head
  95. {
  96. struct list_head *next;
  97. struct list_head *prev;
  98. };
  99. typedef struct remembered_node_t remembered_node_t;
  100. struct remembered_node_t
  101. {
  102. node_t *node;
  103. time_t time;
  104. };
  105. typedef struct nodeid_gen_t nodeid_gen_t;
  106. struct nodeid_gen_t
  107. {
  108. uint64_t nodeid;
  109. uint64_t generation;
  110. };
  111. struct fuse
  112. {
  113. struct fuse_session *se;
  114. struct node_table name_table;
  115. struct node_table id_table;
  116. nodeid_gen_t nodeid_gen;
  117. unsigned int hidectr;
  118. pthread_mutex_t lock;
  119. struct fuse_config conf;
  120. struct fuse_fs *fs;
  121. struct lock_queue_element *lockq;
  122. pthread_t maintenance_thread;
  123. kvec_t(remembered_node_t) remembered_nodes;
  124. };
  125. struct lock
  126. {
  127. int type;
  128. off_t start;
  129. off_t end;
  130. pid_t pid;
  131. uint64_t owner;
  132. struct lock *next;
  133. };
  134. #define TREELOCK_WRITE -1
  135. #define TREELOCK_WAIT_OFFSET INT_MIN
  136. struct fuse_dh
  137. {
  138. pthread_mutex_t lock;
  139. uint64_t fh;
  140. fuse_dirents_t d;
  141. };
  142. struct fuse_context_i
  143. {
  144. struct fuse_context ctx;
  145. fuse_req_t req;
  146. };
  147. static pthread_key_t fuse_context_key;
  148. static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
  149. static int fuse_context_ref;
  150. /*
  151. Why was the nodeid:generation logic simplified?
  152. nodeid is uint64_t: max value of 18446744073709551616
  153. If nodes were created at a rate of 1048576 per second it would take
  154. over 500 thousand years to roll over. I'm fine with risking that.
  155. */
  156. static
  157. uint64_t
  158. generate_nodeid(nodeid_gen_t *ng_)
  159. {
  160. ng_->nodeid++;
  161. return ng_->nodeid;
  162. }
  163. static
  164. char*
  165. filename_strdup(struct fuse *f_,
  166. const char *fn_)
  167. {
  168. return strdup(fn_);
  169. }
  170. static
  171. void
  172. filename_free(struct fuse *f_,
  173. char *fn_)
  174. {
  175. free(fn_);
  176. }
  177. static
  178. void*
  179. fuse_hdr_arg(const struct fuse_in_header *hdr_)
  180. {
  181. return (void*)&hdr_[1];
  182. }
  183. static
  184. void
  185. list_add(struct list_head *new_,
  186. struct list_head *prev_,
  187. struct list_head *next_)
  188. {
  189. next_->prev = new_;
  190. new_->next = next_;
  191. new_->prev = prev_;
  192. prev_->next = new_;
  193. }
  194. static
  195. inline
  196. void
  197. list_add_head(struct list_head *new_,
  198. struct list_head *head_)
  199. {
  200. list_add(new_,head_,head_->next);
  201. }
  202. static
  203. inline
  204. void
  205. list_add_tail(struct list_head *new_,
  206. struct list_head *head_)
  207. {
  208. list_add(new_,head_->prev,head_);
  209. }
  210. static
  211. inline
  212. void
  213. list_del(struct list_head *entry)
  214. {
  215. struct list_head *prev = entry->prev;
  216. struct list_head *next = entry->next;
  217. next->prev = prev;
  218. prev->next = next;
  219. }
  220. static
  221. size_t
  222. id_hash(struct fuse *f,
  223. uint64_t ino)
  224. {
  225. uint64_t hash = ((uint32_t)ino * 2654435761U) % f->id_table.size;
  226. uint64_t oldhash = hash % (f->id_table.size / 2);
  227. if(oldhash >= f->id_table.split)
  228. return oldhash;
  229. else
  230. return hash;
  231. }
  232. static
  233. node_t*
  234. get_node_nocheck(struct fuse *f,
  235. uint64_t nodeid)
  236. {
  237. size_t hash = id_hash(f,nodeid);
  238. node_t *node;
  239. for(node = f->id_table.array[hash]; node != NULL; node = node->id_next)
  240. if(node->nodeid == nodeid)
  241. return node;
  242. return NULL;
  243. }
  244. static
  245. node_t*
  246. get_node(struct fuse *f,
  247. const uint64_t nodeid)
  248. {
  249. node_t *node = get_node_nocheck(f,nodeid);
  250. if(!node)
  251. {
  252. fprintf(stderr,"fuse internal error: node %llu not found\n",
  253. (unsigned long long)nodeid);
  254. abort();
  255. }
  256. return node;
  257. }
  258. static
  259. void
  260. remove_remembered_node(struct fuse *f_,
  261. node_t *node_)
  262. {
  263. for(size_t i = 0; i < kv_size(f_->remembered_nodes); i++)
  264. {
  265. if(kv_A(f_->remembered_nodes,i).node != node_)
  266. continue;
  267. kv_delete(f_->remembered_nodes,i);
  268. break;
  269. }
  270. }
  271. static
  272. uint32_t
  273. stat_crc32b(const struct stat *st_)
  274. {
  275. uint32_t crc;
  276. crc = crc32b_start();
  277. crc = crc32b_continue(&st_->st_ino,sizeof(st_->st_ino),crc);
  278. crc = crc32b_continue(&st_->st_size,sizeof(st_->st_size),crc);
  279. crc = crc32b_continue(&st_->st_mtim,sizeof(st_->st_mtim),crc);
  280. crc = crc32b_finish(crc);
  281. return crc;
  282. }
  283. #ifndef CLOCK_MONOTONIC
  284. # define CLOCK_MONOTONIC CLOCK_REALTIME
  285. #endif
  286. static
  287. time_t
  288. current_time()
  289. {
  290. int rv;
  291. struct timespec now;
  292. static clockid_t clockid = CLOCK_MONOTONIC;
  293. rv = clock_gettime(clockid,&now);
  294. if((rv == -1) && (errno == EINVAL))
  295. {
  296. clockid = CLOCK_REALTIME;
  297. rv = clock_gettime(clockid,&now);
  298. }
  299. if(rv == -1)
  300. now.tv_sec = time(NULL);
  301. return now.tv_sec;
  302. }
  303. static
  304. void
  305. free_node(struct fuse *f_,
  306. node_t *node_)
  307. {
  308. filename_free(f_,node_->name);
  309. if(node_->hidden_fh)
  310. f_->fs->op.free_hide(node_->hidden_fh);
  311. node_free(node_);
  312. }
  313. static
  314. void
  315. node_table_reduce(struct node_table *t)
  316. {
  317. size_t newsize = t->size / 2;
  318. void *newarray;
  319. if(newsize < NODE_TABLE_MIN_SIZE)
  320. return;
  321. newarray = realloc(t->array,sizeof(node_t*) * newsize);
  322. if(newarray != NULL)
  323. t->array = (node_t**)newarray;
  324. t->size = newsize;
  325. t->split = t->size / 2;
  326. }
  327. static
  328. void
  329. remerge_id(struct fuse *f)
  330. {
  331. struct node_table *t = &f->id_table;
  332. int iter;
  333. if(t->split == 0)
  334. node_table_reduce(t);
  335. for(iter = 8; t->split > 0 && iter; iter--)
  336. {
  337. node_t **upper;
  338. t->split--;
  339. upper = &t->array[t->split + t->size / 2];
  340. if(*upper)
  341. {
  342. node_t **nodep;
  343. for(nodep = &t->array[t->split]; *nodep;
  344. nodep = &(*nodep)->id_next);
  345. *nodep = *upper;
  346. *upper = NULL;
  347. break;
  348. }
  349. }
  350. }
  351. static
  352. void
  353. unhash_id(struct fuse *f,
  354. node_t *node)
  355. {
  356. node_t **nodep = &f->id_table.array[id_hash(f,node->nodeid)];
  357. for(; *nodep != NULL; nodep = &(*nodep)->id_next)
  358. if(*nodep == node)
  359. {
  360. *nodep = node->id_next;
  361. f->id_table.use--;
  362. if(f->id_table.use < f->id_table.size / 4)
  363. remerge_id(f);
  364. return;
  365. }
  366. }
  367. static
  368. int
  369. node_table_resize(struct node_table *t)
  370. {
  371. size_t newsize = t->size * 2;
  372. void *newarray;
  373. newarray = realloc(t->array,sizeof(node_t*) * newsize);
  374. if(newarray == NULL)
  375. return -1;
  376. t->array = (node_t**)newarray;
  377. memset(t->array + t->size,0,t->size * sizeof(node_t*));
  378. t->size = newsize;
  379. t->split = 0;
  380. return 0;
  381. }
  382. static
  383. void
  384. rehash_id(struct fuse *f)
  385. {
  386. struct node_table *t = &f->id_table;
  387. node_t **nodep;
  388. node_t **next;
  389. size_t hash;
  390. if(t->split == t->size / 2)
  391. return;
  392. hash = t->split;
  393. t->split++;
  394. for(nodep = &t->array[hash]; *nodep != NULL; nodep = next)
  395. {
  396. node_t *node = *nodep;
  397. size_t newhash = id_hash(f,node->nodeid);
  398. if(newhash != hash)
  399. {
  400. next = nodep;
  401. *nodep = node->id_next;
  402. node->id_next = t->array[newhash];
  403. t->array[newhash] = node;
  404. }
  405. else
  406. {
  407. next = &node->id_next;
  408. }
  409. }
  410. if(t->split == t->size / 2)
  411. node_table_resize(t);
  412. }
  413. static
  414. void
  415. hash_id(struct fuse *f,
  416. node_t *node)
  417. {
  418. size_t hash;
  419. hash = id_hash(f,node->nodeid);
  420. node->id_next = f->id_table.array[hash];
  421. f->id_table.array[hash] = node;
  422. f->id_table.use++;
  423. if(f->id_table.use >= f->id_table.size / 2)
  424. rehash_id(f);
  425. }
  426. static
  427. size_t
  428. name_hash(struct fuse *f,
  429. uint64_t parent,
  430. const char *name)
  431. {
  432. uint64_t hash = parent;
  433. uint64_t oldhash;
  434. for(; *name; name++)
  435. hash = hash * 31 + (unsigned char)*name;
  436. hash %= f->name_table.size;
  437. oldhash = hash % (f->name_table.size / 2);
  438. if(oldhash >= f->name_table.split)
  439. return oldhash;
  440. else
  441. return hash;
  442. }
  443. static
  444. void
  445. unref_node(struct fuse *f,
  446. node_t *node);
  447. static
  448. void
  449. remerge_name(struct fuse *f)
  450. {
  451. int iter;
  452. struct node_table *t = &f->name_table;
  453. if(t->split == 0)
  454. node_table_reduce(t);
  455. for(iter = 8; t->split > 0 && iter; iter--)
  456. {
  457. node_t **upper;
  458. t->split--;
  459. upper = &t->array[t->split + t->size / 2];
  460. if(*upper)
  461. {
  462. node_t **nodep;
  463. for(nodep = &t->array[t->split]; *nodep; nodep = &(*nodep)->name_next);
  464. *nodep = *upper;
  465. *upper = NULL;
  466. break;
  467. }
  468. }
  469. }
  470. static
  471. void
  472. unhash_name(struct fuse *f,
  473. node_t *node)
  474. {
  475. if(node->name)
  476. {
  477. size_t hash = name_hash(f,node->parent->nodeid,node->name);
  478. node_t **nodep = &f->name_table.array[hash];
  479. for(; *nodep != NULL; nodep = &(*nodep)->name_next)
  480. if(*nodep == node)
  481. {
  482. *nodep = node->name_next;
  483. node->name_next = NULL;
  484. unref_node(f,node->parent);
  485. filename_free(f,node->name);
  486. node->name = NULL;
  487. node->parent = NULL;
  488. f->name_table.use--;
  489. if(f->name_table.use < f->name_table.size / 4)
  490. remerge_name(f);
  491. return;
  492. }
  493. fprintf(stderr,
  494. "fuse internal error: unable to unhash node: %llu\n",
  495. (unsigned long long)node->nodeid);
  496. abort();
  497. }
  498. }
  499. static
  500. void
  501. rehash_name(struct fuse *f)
  502. {
  503. struct node_table *t = &f->name_table;
  504. node_t **nodep;
  505. node_t **next;
  506. size_t hash;
  507. if(t->split == t->size / 2)
  508. return;
  509. hash = t->split;
  510. t->split++;
  511. for(nodep = &t->array[hash]; *nodep != NULL; nodep = next)
  512. {
  513. node_t *node = *nodep;
  514. size_t newhash = name_hash(f,node->parent->nodeid,node->name);
  515. if(newhash != hash)
  516. {
  517. next = nodep;
  518. *nodep = node->name_next;
  519. node->name_next = t->array[newhash];
  520. t->array[newhash] = node;
  521. }
  522. else
  523. {
  524. next = &node->name_next;
  525. }
  526. }
  527. if(t->split == t->size / 2)
  528. node_table_resize(t);
  529. }
  530. static
  531. int
  532. hash_name(struct fuse *f,
  533. node_t *node,
  534. uint64_t parentid,
  535. const char *name)
  536. {
  537. size_t hash = name_hash(f,parentid,name);
  538. node_t *parent = get_node(f,parentid);
  539. node->name = filename_strdup(f,name);
  540. if(node->name == NULL)
  541. return -1;
  542. parent->refctr++;
  543. node->parent = parent;
  544. node->name_next = f->name_table.array[hash];
  545. f->name_table.array[hash] = node;
  546. f->name_table.use++;
  547. if(f->name_table.use >= f->name_table.size / 2)
  548. rehash_name(f);
  549. return 0;
  550. }
  551. static
  552. inline
  553. int
  554. remember_nodes(struct fuse *f_)
  555. {
  556. return (f_->conf.remember > 0);
  557. }
  558. static
  559. void
  560. delete_node(struct fuse *f,
  561. node_t *node)
  562. {
  563. assert(node->treelock == 0);
  564. unhash_name(f,node);
  565. if(remember_nodes(f))
  566. remove_remembered_node(f,node);
  567. unhash_id(f,node);
  568. node_free(node);
  569. }
  570. static
  571. void
  572. unref_node(struct fuse *f,
  573. node_t *node)
  574. {
  575. assert(node->refctr > 0);
  576. node->refctr--;
  577. if(!node->refctr)
  578. delete_node(f,node);
  579. }
  580. static
  581. uint64_t
  582. rand64(void)
  583. {
  584. uint64_t rv;
  585. rv = rand();
  586. rv <<= 32;
  587. rv |= rand();
  588. return rv;
  589. }
  590. static
  591. node_t*
  592. lookup_node(struct fuse *f,
  593. uint64_t parent,
  594. const char *name)
  595. {
  596. size_t hash;
  597. node_t *node;
  598. hash = name_hash(f,parent,name);
  599. for(node = f->name_table.array[hash]; node != NULL; node = node->name_next)
  600. if(node->parent->nodeid == parent && strcmp(node->name,name) == 0)
  601. return node;
  602. return NULL;
  603. }
  604. static
  605. void
  606. inc_nlookup(node_t *node)
  607. {
  608. if(!node->nlookup)
  609. node->refctr++;
  610. node->nlookup++;
  611. }
  612. static
  613. node_t*
  614. find_node(struct fuse *f,
  615. uint64_t parent,
  616. const char *name)
  617. {
  618. node_t *node;
  619. pthread_mutex_lock(&f->lock);
  620. if(!name)
  621. node = get_node(f,parent);
  622. else
  623. node = lookup_node(f,parent,name);
  624. if(node == NULL)
  625. {
  626. node = node_alloc();
  627. if(node == NULL)
  628. goto out_err;
  629. node->nodeid = generate_nodeid(&f->nodeid_gen);
  630. if(f->conf.remember)
  631. inc_nlookup(node);
  632. if(hash_name(f,node,parent,name) == -1)
  633. {
  634. free_node(f,node);
  635. node = NULL;
  636. goto out_err;
  637. }
  638. hash_id(f,node);
  639. }
  640. else if((node->nlookup == 1) && remember_nodes(f))
  641. {
  642. remove_remembered_node(f,node);
  643. }
  644. inc_nlookup(node);
  645. out_err:
  646. pthread_mutex_unlock(&f->lock);
  647. return node;
  648. }
  649. static
  650. char*
  651. add_name(char **buf,
  652. unsigned *bufsize,
  653. char *s,
  654. const char *name)
  655. {
  656. size_t len = strlen(name);
  657. if(s - len <= *buf)
  658. {
  659. unsigned pathlen = *bufsize - (s - *buf);
  660. unsigned newbufsize = *bufsize;
  661. char *newbuf;
  662. while(newbufsize < pathlen + len + 1)
  663. {
  664. if(newbufsize >= 0x80000000)
  665. newbufsize = 0xffffffff;
  666. else
  667. newbufsize *= 2;
  668. }
  669. newbuf = (char*)realloc(*buf,newbufsize);
  670. if(newbuf == NULL)
  671. return NULL;
  672. *buf = newbuf;
  673. s = newbuf + newbufsize - pathlen;
  674. memmove(s,newbuf + *bufsize - pathlen,pathlen);
  675. *bufsize = newbufsize;
  676. }
  677. s -= len;
  678. strncpy(s,name,len);
  679. s--;
  680. *s = '/';
  681. return s;
  682. }
  683. static
  684. void
  685. unlock_path(struct fuse *f,
  686. uint64_t nodeid,
  687. node_t *wnode,
  688. node_t *end)
  689. {
  690. node_t *node;
  691. if(wnode)
  692. {
  693. assert(wnode->treelock == TREELOCK_WRITE);
  694. wnode->treelock = 0;
  695. }
  696. for(node = get_node(f,nodeid); node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent)
  697. {
  698. assert(node->treelock != 0);
  699. assert(node->treelock != TREELOCK_WAIT_OFFSET);
  700. assert(node->treelock != TREELOCK_WRITE);
  701. node->treelock--;
  702. if(node->treelock == TREELOCK_WAIT_OFFSET)
  703. node->treelock = 0;
  704. }
  705. }
  706. static
  707. int
  708. try_get_path(struct fuse *f,
  709. uint64_t nodeid,
  710. const char *name,
  711. char **path,
  712. node_t **wnodep,
  713. bool need_lock)
  714. {
  715. unsigned bufsize = 256;
  716. char *buf;
  717. char *s;
  718. node_t *node;
  719. node_t *wnode = NULL;
  720. int err;
  721. *path = NULL;
  722. err = -ENOMEM;
  723. buf = (char*)malloc(bufsize);
  724. if(buf == NULL)
  725. goto out_err;
  726. s = buf + bufsize - 1;
  727. *s = '\0';
  728. if(name != NULL)
  729. {
  730. s = add_name(&buf,&bufsize,s,name);
  731. err = -ENOMEM;
  732. if(s == NULL)
  733. goto out_free;
  734. }
  735. if(wnodep)
  736. {
  737. assert(need_lock);
  738. wnode = lookup_node(f,nodeid,name);
  739. if(wnode)
  740. {
  741. if(wnode->treelock != 0)
  742. {
  743. if(wnode->treelock > 0)
  744. wnode->treelock += TREELOCK_WAIT_OFFSET;
  745. err = -EAGAIN;
  746. goto out_free;
  747. }
  748. wnode->treelock = TREELOCK_WRITE;
  749. }
  750. }
  751. for(node = get_node(f,nodeid); node->nodeid != FUSE_ROOT_ID; node = node->parent)
  752. {
  753. err = -ESTALE;
  754. if(node->name == NULL || node->parent == NULL)
  755. goto out_unlock;
  756. err = -ENOMEM;
  757. s = add_name(&buf,&bufsize,s,node->name);
  758. if(s == NULL)
  759. goto out_unlock;
  760. if(need_lock)
  761. {
  762. err = -EAGAIN;
  763. if(node->treelock < 0)
  764. goto out_unlock;
  765. node->treelock++;
  766. }
  767. }
  768. if(s[0])
  769. memmove(buf,s,bufsize - (s - buf));
  770. else
  771. strcpy(buf,"/");
  772. *path = buf;
  773. if(wnodep)
  774. *wnodep = wnode;
  775. return 0;
  776. out_unlock:
  777. if(need_lock)
  778. unlock_path(f,nodeid,wnode,node);
  779. out_free:
  780. free(buf);
  781. out_err:
  782. return err;
  783. }
  784. static
  785. int
  786. try_get_path2(struct fuse *f,
  787. uint64_t nodeid1,
  788. const char *name1,
  789. uint64_t nodeid2,
  790. const char *name2,
  791. char **path1,
  792. char **path2,
  793. node_t **wnode1,
  794. node_t **wnode2)
  795. {
  796. int err;
  797. err = try_get_path(f,nodeid1,name1,path1,wnode1,true);
  798. if(!err)
  799. {
  800. err = try_get_path(f,nodeid2,name2,path2,wnode2,true);
  801. if(err)
  802. {
  803. node_t *wn1 = wnode1 ? *wnode1 : NULL;
  804. unlock_path(f,nodeid1,wn1,NULL);
  805. free(*path1);
  806. }
  807. }
  808. return err;
  809. }
  810. static
  811. void
  812. queue_element_wakeup(struct fuse *f,
  813. struct lock_queue_element *qe)
  814. {
  815. int err;
  816. if(!qe->path1)
  817. {
  818. /* Just waiting for it to be unlocked */
  819. if(get_node(f,qe->nodeid1)->treelock == 0)
  820. pthread_cond_signal(&qe->cond);
  821. return;
  822. }
  823. if(qe->done)
  824. return;
  825. if(!qe->path2)
  826. {
  827. err = try_get_path(f,
  828. qe->nodeid1,
  829. qe->name1,
  830. qe->path1,
  831. qe->wnode1,
  832. true);
  833. }
  834. else
  835. {
  836. err = try_get_path2(f,
  837. qe->nodeid1,
  838. qe->name1,
  839. qe->nodeid2,
  840. qe->name2,
  841. qe->path1,
  842. qe->path2,
  843. qe->wnode1,
  844. qe->wnode2);
  845. }
  846. if(err == -EAGAIN)
  847. return;
  848. qe->err = err;
  849. qe->done = true;
  850. pthread_cond_signal(&qe->cond);
  851. }
  852. static
  853. void
  854. wake_up_queued(struct fuse *f)
  855. {
  856. struct lock_queue_element *qe;
  857. for(qe = f->lockq; qe != NULL; qe = qe->next)
  858. queue_element_wakeup(f,qe);
  859. }
  860. static
  861. void
  862. queue_path(struct fuse *f,
  863. struct lock_queue_element *qe)
  864. {
  865. struct lock_queue_element **qp;
  866. qe->done = false;
  867. pthread_cond_init(&qe->cond,NULL);
  868. qe->next = NULL;
  869. for(qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
  870. *qp = qe;
  871. }
  872. static
  873. void
  874. dequeue_path(struct fuse *f,
  875. struct lock_queue_element *qe)
  876. {
  877. struct lock_queue_element **qp;
  878. pthread_cond_destroy(&qe->cond);
  879. for(qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
  880. *qp = qe->next;
  881. }
  882. static
  883. int
  884. wait_path(struct fuse *f,
  885. struct lock_queue_element *qe)
  886. {
  887. queue_path(f,qe);
  888. do
  889. {
  890. pthread_cond_wait(&qe->cond,&f->lock);
  891. } while(!qe->done);
  892. dequeue_path(f,qe);
  893. return qe->err;
  894. }
  895. static
  896. int
  897. get_path_common(struct fuse *f,
  898. uint64_t nodeid,
  899. const char *name,
  900. char **path,
  901. node_t **wnode)
  902. {
  903. int err;
  904. pthread_mutex_lock(&f->lock);
  905. err = try_get_path(f,nodeid,name,path,wnode,true);
  906. if(err == -EAGAIN)
  907. {
  908. struct lock_queue_element qe = {0};
  909. qe.nodeid1 = nodeid;
  910. qe.name1 = name;
  911. qe.path1 = path;
  912. qe.wnode1 = wnode;
  913. err = wait_path(f,&qe);
  914. }
  915. pthread_mutex_unlock(&f->lock);
  916. return err;
  917. }
  918. static
  919. int
  920. get_path(struct fuse *f,
  921. uint64_t nodeid,
  922. char **path)
  923. {
  924. return get_path_common(f,nodeid,NULL,path,NULL);
  925. }
  926. static
  927. int
  928. get_path_name(struct fuse *f,
  929. uint64_t nodeid,
  930. const char *name,
  931. char **path)
  932. {
  933. return get_path_common(f,nodeid,name,path,NULL);
  934. }
  935. static
  936. int
  937. get_path_wrlock(struct fuse *f,
  938. uint64_t nodeid,
  939. const char *name,
  940. char **path,
  941. node_t **wnode)
  942. {
  943. return get_path_common(f,nodeid,name,path,wnode);
  944. }
  945. static
  946. int
  947. get_path2(struct fuse *f,
  948. uint64_t nodeid1,
  949. const char *name1,
  950. uint64_t nodeid2,
  951. const char *name2,
  952. char **path1,
  953. char **path2,
  954. node_t **wnode1,
  955. node_t **wnode2)
  956. {
  957. int err;
  958. pthread_mutex_lock(&f->lock);
  959. err = try_get_path2(f,nodeid1,name1,nodeid2,name2,
  960. path1,path2,wnode1,wnode2);
  961. if(err == -EAGAIN)
  962. {
  963. struct lock_queue_element qe = {0};
  964. qe.nodeid1 = nodeid1;
  965. qe.name1 = name1;
  966. qe.path1 = path1;
  967. qe.wnode1 = wnode1;
  968. qe.nodeid2 = nodeid2;
  969. qe.name2 = name2;
  970. qe.path2 = path2;
  971. qe.wnode2 = wnode2;
  972. err = wait_path(f,&qe);
  973. }
  974. pthread_mutex_unlock(&f->lock);
  975. return err;
  976. }
  977. static
  978. void
  979. free_path_wrlock(struct fuse *f,
  980. uint64_t nodeid,
  981. node_t *wnode,
  982. char *path)
  983. {
  984. pthread_mutex_lock(&f->lock);
  985. unlock_path(f,nodeid,wnode,NULL);
  986. if(f->lockq)
  987. wake_up_queued(f);
  988. pthread_mutex_unlock(&f->lock);
  989. free(path);
  990. }
  991. static
  992. void
  993. free_path(struct fuse *f,
  994. uint64_t nodeid,
  995. char *path)
  996. {
  997. if(path)
  998. free_path_wrlock(f,nodeid,NULL,path);
  999. }
  1000. static
  1001. void
  1002. free_path2(struct fuse *f,
  1003. uint64_t nodeid1,
  1004. uint64_t nodeid2,
  1005. node_t *wnode1,
  1006. node_t *wnode2,
  1007. char *path1,
  1008. char *path2)
  1009. {
  1010. pthread_mutex_lock(&f->lock);
  1011. unlock_path(f,nodeid1,wnode1,NULL);
  1012. unlock_path(f,nodeid2,wnode2,NULL);
  1013. wake_up_queued(f);
  1014. pthread_mutex_unlock(&f->lock);
  1015. free(path1);
  1016. free(path2);
  1017. }
  1018. static
  1019. void
  1020. forget_node(struct fuse *f,
  1021. const uint64_t nodeid,
  1022. const uint64_t nlookup)
  1023. {
  1024. node_t *node;
  1025. if(nodeid == FUSE_ROOT_ID)
  1026. return;
  1027. pthread_mutex_lock(&f->lock);
  1028. node = get_node(f,nodeid);
  1029. /*
  1030. * Node may still be locked due to interrupt idiocy in open,
  1031. * create and opendir
  1032. */
  1033. while(node->nlookup == nlookup && node->treelock)
  1034. {
  1035. struct lock_queue_element qe = {0};
  1036. qe.nodeid1 = nodeid;
  1037. queue_path(f,&qe);
  1038. do
  1039. {
  1040. pthread_cond_wait(&qe.cond,&f->lock);
  1041. }
  1042. while((node->nlookup == nlookup) && node->treelock);
  1043. dequeue_path(f,&qe);
  1044. }
  1045. assert(node->nlookup >= nlookup);
  1046. node->nlookup -= nlookup;
  1047. if(node->nlookup == 0)
  1048. {
  1049. unref_node(f,node);
  1050. }
  1051. else if((node->nlookup == 1) && remember_nodes(f))
  1052. {
  1053. remembered_node_t fn;
  1054. fn.node = node;
  1055. fn.time = current_time();
  1056. kv_push(remembered_node_t,f->remembered_nodes,fn);
  1057. }
  1058. pthread_mutex_unlock(&f->lock);
  1059. }
  1060. static
  1061. void
  1062. unlink_node(struct fuse *f,
  1063. node_t *node)
  1064. {
  1065. if(remember_nodes(f))
  1066. {
  1067. assert(node->nlookup > 1);
  1068. node->nlookup--;
  1069. }
  1070. unhash_name(f,node);
  1071. }
  1072. static
  1073. void
  1074. remove_node(struct fuse *f,
  1075. uint64_t dir,
  1076. const char *name)
  1077. {
  1078. node_t *node;
  1079. pthread_mutex_lock(&f->lock);
  1080. node = lookup_node(f,dir,name);
  1081. if(node != NULL)
  1082. unlink_node(f,node);
  1083. pthread_mutex_unlock(&f->lock);
  1084. }
  1085. static
  1086. int
  1087. rename_node(struct fuse *f,
  1088. uint64_t olddir,
  1089. const char *oldname,
  1090. uint64_t newdir,
  1091. const char *newname)
  1092. {
  1093. node_t *node;
  1094. node_t *newnode;
  1095. int err = 0;
  1096. pthread_mutex_lock(&f->lock);
  1097. node = lookup_node(f,olddir,oldname);
  1098. newnode = lookup_node(f,newdir,newname);
  1099. if(node == NULL)
  1100. goto out;
  1101. if(newnode != NULL)
  1102. unlink_node(f,newnode);
  1103. unhash_name(f,node);
  1104. if(hash_name(f,node,newdir,newname) == -1)
  1105. {
  1106. err = -ENOMEM;
  1107. goto out;
  1108. }
  1109. out:
  1110. pthread_mutex_unlock(&f->lock);
  1111. return err;
  1112. }
  1113. static
  1114. void
  1115. set_stat(struct fuse *f,
  1116. uint64_t nodeid,
  1117. struct stat *stbuf)
  1118. {
  1119. if(f->conf.set_mode)
  1120. stbuf->st_mode = (stbuf->st_mode & S_IFMT) | (0777 & ~f->conf.umask);
  1121. if(f->conf.set_uid)
  1122. stbuf->st_uid = f->conf.uid;
  1123. if(f->conf.set_gid)
  1124. stbuf->st_gid = f->conf.gid;
  1125. }
  1126. static
  1127. struct fuse*
  1128. req_fuse(fuse_req_t req)
  1129. {
  1130. return (struct fuse*)fuse_req_userdata(req);
  1131. }
  1132. static
  1133. int
  1134. node_open(const node_t *node_)
  1135. {
  1136. return ((node_ != NULL) && (node_->open_count > 0));
  1137. }
  1138. static
  1139. void
  1140. update_stat(node_t *node_,
  1141. const struct stat *stnew_)
  1142. {
  1143. uint32_t crc32b;
  1144. crc32b = stat_crc32b(stnew_);
  1145. if(node_->is_stat_cache_valid && (crc32b != node_->stat_crc32b))
  1146. node_->is_stat_cache_valid = 0;
  1147. node_->stat_crc32b = crc32b;
  1148. }
  1149. static
  1150. int
  1151. set_path_info(struct fuse *f,
  1152. uint64_t nodeid,
  1153. const char *name,
  1154. struct fuse_entry_param *e)
  1155. {
  1156. node_t *node;
  1157. node = find_node(f,nodeid,name);
  1158. if(node == NULL)
  1159. return -ENOMEM;
  1160. e->ino = node->nodeid;
  1161. e->generation = ((e->ino == FUSE_ROOT_ID) ? 0 : f->nodeid_gen.generation);
  1162. pthread_mutex_lock(&f->lock);
  1163. update_stat(node,&e->attr);
  1164. pthread_mutex_unlock(&f->lock);
  1165. set_stat(f,e->ino,&e->attr);
  1166. return 0;
  1167. }
  1168. /*
  1169. lookup requests only come in for FUSE_ROOT_ID when a "parent of
  1170. child of root node" request is made. This can happen when using
  1171. EXPORT_SUPPORT=true and a file handle is used to keep a reference to
  1172. a node which has been forgotten. Mostly a NFS concern but not
  1173. excluslively. Root node always has a nodeid of 1 and generation of
  1174. 0. To ensure this set_path_info() explicitly ensures the root id has
  1175. a generation of 0.
  1176. */
  1177. static
  1178. int
  1179. lookup_path(struct fuse *f,
  1180. uint64_t nodeid,
  1181. const char *name,
  1182. const char *path,
  1183. struct fuse_entry_param *e,
  1184. fuse_file_info_t *fi)
  1185. {
  1186. int rv;
  1187. memset(e,0,sizeof(struct fuse_entry_param));
  1188. rv = ((fi == NULL) ?
  1189. f->fs->op.getattr(path,&e->attr,&e->timeout) :
  1190. f->fs->op.fgetattr(fi,&e->attr,&e->timeout));
  1191. if(rv)
  1192. return rv;
  1193. return set_path_info(f,nodeid,name,e);
  1194. }
  1195. static
  1196. struct fuse_context_i*
  1197. fuse_get_context_internal(void)
  1198. {
  1199. struct fuse_context_i *c;
  1200. c = (struct fuse_context_i *)pthread_getspecific(fuse_context_key);
  1201. if(c == NULL)
  1202. {
  1203. c = (struct fuse_context_i*)calloc(1,sizeof(struct fuse_context_i));
  1204. if(c == NULL)
  1205. {
  1206. /* This is hard to deal with properly,so just
  1207. abort. If memory is so low that the
  1208. context cannot be allocated,there's not
  1209. much hope for the filesystem anyway */
  1210. fprintf(stderr,"fuse: failed to allocate thread specific data\n");
  1211. abort();
  1212. }
  1213. pthread_setspecific(fuse_context_key,c);
  1214. }
  1215. return c;
  1216. }
  1217. static
  1218. void
  1219. fuse_freecontext(void *data)
  1220. {
  1221. free(data);
  1222. }
  1223. static
  1224. int
  1225. fuse_create_context_key(void)
  1226. {
  1227. int err = 0;
  1228. pthread_mutex_lock(&fuse_context_lock);
  1229. if(!fuse_context_ref)
  1230. {
  1231. err = pthread_key_create(&fuse_context_key,fuse_freecontext);
  1232. if(err)
  1233. {
  1234. fprintf(stderr,"fuse: failed to create thread specific key: %s\n",
  1235. strerror(err));
  1236. pthread_mutex_unlock(&fuse_context_lock);
  1237. return -1;
  1238. }
  1239. }
  1240. fuse_context_ref++;
  1241. pthread_mutex_unlock(&fuse_context_lock);
  1242. return 0;
  1243. }
  1244. static
  1245. void
  1246. fuse_delete_context_key(void)
  1247. {
  1248. pthread_mutex_lock(&fuse_context_lock);
  1249. fuse_context_ref--;
  1250. if(!fuse_context_ref)
  1251. {
  1252. free(pthread_getspecific(fuse_context_key));
  1253. pthread_key_delete(fuse_context_key);
  1254. }
  1255. pthread_mutex_unlock(&fuse_context_lock);
  1256. }
  1257. static
  1258. struct fuse*
  1259. req_fuse_prepare(fuse_req_t req)
  1260. {
  1261. struct fuse_context_i *c = fuse_get_context_internal();
  1262. const struct fuse_ctx *ctx = fuse_req_ctx(req);
  1263. c->req = req;
  1264. c->ctx.fuse = req_fuse(req);
  1265. c->ctx.uid = ctx->uid;
  1266. c->ctx.gid = ctx->gid;
  1267. c->ctx.pid = ctx->pid;
  1268. c->ctx.umask = ctx->umask;
  1269. return c->ctx.fuse;
  1270. }
  1271. static
  1272. void
  1273. reply_entry(fuse_req_t req,
  1274. const struct fuse_entry_param *e,
  1275. int err)
  1276. {
  1277. if(!err)
  1278. {
  1279. struct fuse *f = req_fuse(req);
  1280. if(fuse_reply_entry(req,e) == -ENOENT)
  1281. {
  1282. /* Skip forget for negative result */
  1283. if(e->ino != 0)
  1284. forget_node(f,e->ino,1);
  1285. }
  1286. }
  1287. else
  1288. {
  1289. fuse_reply_err(req,err);
  1290. }
  1291. }
  1292. static
  1293. void
  1294. fuse_lib_init(void *data,
  1295. struct fuse_conn_info *conn)
  1296. {
  1297. struct fuse *f = (struct fuse *)data;
  1298. struct fuse_context_i *c = fuse_get_context_internal();
  1299. memset(c,0,sizeof(*c));
  1300. c->ctx.fuse = f;
  1301. f->fs->op.init(conn);
  1302. }
  1303. static
  1304. void
  1305. fuse_lib_destroy(void *data)
  1306. {
  1307. struct fuse *f = (struct fuse *)data;
  1308. struct fuse_context_i *c = fuse_get_context_internal();
  1309. memset(c,0,sizeof(*c));
  1310. c->ctx.fuse = f;
  1311. f->fs->op.destroy();
  1312. free(f->fs);
  1313. f->fs = NULL;
  1314. }
  1315. static
  1316. void
  1317. fuse_lib_lookup(fuse_req_t req,
  1318. struct fuse_in_header *hdr_)
  1319. {
  1320. int err;
  1321. uint64_t nodeid;
  1322. char *path;
  1323. const char *name;
  1324. struct fuse *f;
  1325. node_t *dot = NULL;
  1326. struct fuse_entry_param e = {0};
  1327. name = (const char*)fuse_hdr_arg(hdr_);
  1328. nodeid = hdr_->nodeid;
  1329. f = req_fuse_prepare(req);
  1330. if(name[0] == '.')
  1331. {
  1332. if(name[1] == '\0')
  1333. {
  1334. name = NULL;
  1335. pthread_mutex_lock(&f->lock);
  1336. dot = get_node_nocheck(f,nodeid);
  1337. if(dot == NULL)
  1338. {
  1339. pthread_mutex_unlock(&f->lock);
  1340. reply_entry(req,&e,-ESTALE);
  1341. return;
  1342. }
  1343. dot->refctr++;
  1344. pthread_mutex_unlock(&f->lock);
  1345. }
  1346. else if((name[1] == '.') && (name[2] == '\0'))
  1347. {
  1348. if(nodeid == 1)
  1349. {
  1350. reply_entry(req,&e,-ENOENT);
  1351. return;
  1352. }
  1353. name = NULL;
  1354. pthread_mutex_lock(&f->lock);
  1355. nodeid = get_node(f,nodeid)->parent->nodeid;
  1356. pthread_mutex_unlock(&f->lock);
  1357. }
  1358. }
  1359. err = get_path_name(f,nodeid,name,&path);
  1360. if(!err)
  1361. {
  1362. err = lookup_path(f,nodeid,name,path,&e,NULL);
  1363. if(err == -ENOENT)
  1364. {
  1365. e.ino = 0;
  1366. err = 0;
  1367. }
  1368. free_path(f,nodeid,path);
  1369. }
  1370. if(dot)
  1371. {
  1372. pthread_mutex_lock(&f->lock);
  1373. unref_node(f,dot);
  1374. pthread_mutex_unlock(&f->lock);
  1375. }
  1376. reply_entry(req,&e,err);
  1377. }
  1378. static
  1379. void
  1380. fuse_lib_forget(fuse_req_t req,
  1381. struct fuse_in_header *hdr_)
  1382. {
  1383. struct fuse *f;
  1384. struct fuse_forget_in *arg;
  1385. f = req_fuse(req);
  1386. arg = (fuse_forget_in*)fuse_hdr_arg(hdr_);
  1387. forget_node(f,hdr_->nodeid,arg->nlookup);
  1388. fuse_reply_none(req);
  1389. }
  1390. static
  1391. void
  1392. fuse_lib_forget_multi(fuse_req_t req,
  1393. struct fuse_in_header *hdr_)
  1394. {
  1395. struct fuse *f;
  1396. struct fuse_batch_forget_in *arg;
  1397. struct fuse_forget_one *entry;
  1398. f = req_fuse(req);
  1399. arg = (fuse_batch_forget_in*)fuse_hdr_arg(hdr_);
  1400. entry = (fuse_forget_one*)PARAM(arg);
  1401. for(uint32_t i = 0; i < arg->count; i++)
  1402. {
  1403. forget_node(f,
  1404. entry[i].nodeid,
  1405. entry[i].nlookup);
  1406. }
  1407. fuse_reply_none(req);
  1408. }
  1409. static
  1410. void
  1411. fuse_lib_getattr(fuse_req_t req,
  1412. struct fuse_in_header *hdr_)
  1413. {
  1414. int err;
  1415. char *path;
  1416. struct fuse *f;
  1417. struct stat buf;
  1418. node_t *node;
  1419. fuse_timeouts_t timeout;
  1420. fuse_file_info_t ffi = {0};
  1421. const struct fuse_getattr_in *arg;
  1422. arg = (fuse_getattr_in*)fuse_hdr_arg(hdr_);
  1423. f = req_fuse_prepare(req);
  1424. if(arg->getattr_flags & FUSE_GETATTR_FH)
  1425. {
  1426. ffi.fh = arg->fh;
  1427. }
  1428. else
  1429. {
  1430. pthread_mutex_lock(&f->lock);
  1431. node = get_node(f,hdr_->nodeid);
  1432. if(node->hidden_fh)
  1433. ffi.fh = node->hidden_fh;
  1434. pthread_mutex_unlock(&f->lock);
  1435. }
  1436. memset(&buf,0,sizeof(buf));
  1437. err = 0;
  1438. path = NULL;
  1439. if(ffi.fh == 0)
  1440. err = get_path(f,hdr_->nodeid,&path);
  1441. if(!err)
  1442. {
  1443. err = ((ffi.fh == 0) ?
  1444. f->fs->op.getattr(path,&buf,&timeout) :
  1445. f->fs->op.fgetattr(&ffi,&buf,&timeout));
  1446. free_path(f,hdr_->nodeid,path);
  1447. }
  1448. if(!err)
  1449. {
  1450. pthread_mutex_lock(&f->lock);
  1451. node = get_node(f,hdr_->nodeid);
  1452. update_stat(node,&buf);
  1453. pthread_mutex_unlock(&f->lock);
  1454. set_stat(f,hdr_->nodeid,&buf);
  1455. fuse_reply_attr(req,&buf,timeout.attr);
  1456. }
  1457. else
  1458. {
  1459. fuse_reply_err(req,err);
  1460. }
  1461. }
  1462. static
  1463. void
  1464. fuse_lib_setattr(fuse_req_t req,
  1465. struct fuse_in_header *hdr_)
  1466. {
  1467. struct fuse *f = req_fuse_prepare(req);
  1468. struct stat stbuf = {0};
  1469. char *path;
  1470. int err;
  1471. node_t *node;
  1472. fuse_timeouts_t timeout;
  1473. fuse_file_info_t *fi;
  1474. fuse_file_info_t ffi = {0};
  1475. struct fuse_setattr_in *arg;
  1476. arg = (fuse_setattr_in*)fuse_hdr_arg(hdr_);
  1477. fi = NULL;
  1478. if(arg->valid & FATTR_FH)
  1479. {
  1480. fi = &ffi;
  1481. fi->fh = arg->fh;
  1482. }
  1483. else
  1484. {
  1485. pthread_mutex_lock(&f->lock);
  1486. node = get_node(f,hdr_->nodeid);
  1487. if(node->hidden_fh)
  1488. {
  1489. fi = &ffi;
  1490. fi->fh = node->hidden_fh;
  1491. }
  1492. pthread_mutex_unlock(&f->lock);
  1493. }
  1494. err = 0;
  1495. path = NULL;
  1496. if(fi == NULL)
  1497. err = get_path(f,hdr_->nodeid,&path);
  1498. if(!err)
  1499. {
  1500. err = 0;
  1501. if(!err && (arg->valid & FATTR_MODE))
  1502. err = ((fi == NULL) ?
  1503. f->fs->op.chmod(path,arg->mode) :
  1504. f->fs->op.fchmod(fi,arg->mode));
  1505. if(!err && (arg->valid & (FATTR_UID | FATTR_GID)))
  1506. {
  1507. uid_t uid = ((arg->valid & FATTR_UID) ? arg->uid : (uid_t)-1);
  1508. gid_t gid = ((arg->valid & FATTR_GID) ? arg->gid : (gid_t)-1);
  1509. err = ((fi == NULL) ?
  1510. f->fs->op.chown(path,uid,gid) :
  1511. f->fs->op.fchown(fi,uid,gid));
  1512. }
  1513. if(!err && (arg->valid & FATTR_SIZE))
  1514. err = ((fi == NULL) ?
  1515. f->fs->op.truncate(path,arg->size) :
  1516. f->fs->op.ftruncate(fi,arg->size));
  1517. #ifdef HAVE_UTIMENSAT
  1518. if(!err && (arg->valid & (FATTR_ATIME | FATTR_MTIME)))
  1519. {
  1520. struct timespec tv[2];
  1521. tv[0].tv_sec = 0;
  1522. tv[1].tv_sec = 0;
  1523. tv[0].tv_nsec = UTIME_OMIT;
  1524. tv[1].tv_nsec = UTIME_OMIT;
  1525. if(arg->valid & FATTR_ATIME_NOW)
  1526. tv[0].tv_nsec = UTIME_NOW;
  1527. else if(arg->valid & FATTR_ATIME)
  1528. tv[0] = (struct timespec){ static_cast<time_t>(arg->atime), arg->atimensec };
  1529. if(arg->valid & FATTR_MTIME_NOW)
  1530. tv[1].tv_nsec = UTIME_NOW;
  1531. else if(arg->valid & FATTR_MTIME)
  1532. tv[1] = (struct timespec){ static_cast<time_t>(arg->mtime), arg->mtimensec };
  1533. err = ((fi == NULL) ?
  1534. f->fs->op.utimens(path,tv) :
  1535. f->fs->op.futimens(fi,tv));
  1536. }
  1537. else
  1538. #endif
  1539. if(!err && ((arg->valid & (FATTR_ATIME|FATTR_MTIME)) == (FATTR_ATIME|FATTR_MTIME)))
  1540. {
  1541. struct timespec tv[2];
  1542. tv[0].tv_sec = arg->atime;
  1543. tv[0].tv_nsec = arg->atimensec;
  1544. tv[1].tv_sec = arg->mtime;
  1545. tv[1].tv_nsec = arg->mtimensec;
  1546. err = ((fi == NULL) ?
  1547. f->fs->op.utimens(path,tv) :
  1548. f->fs->op.futimens(fi,tv));
  1549. }
  1550. if(!err)
  1551. err = ((fi == NULL) ?
  1552. f->fs->op.getattr(path,&stbuf,&timeout) :
  1553. f->fs->op.fgetattr(fi,&stbuf,&timeout));
  1554. free_path(f,hdr_->nodeid,path);
  1555. }
  1556. if(!err)
  1557. {
  1558. pthread_mutex_lock(&f->lock);
  1559. update_stat(get_node(f,hdr_->nodeid),&stbuf);
  1560. pthread_mutex_unlock(&f->lock);
  1561. set_stat(f,hdr_->nodeid,&stbuf);
  1562. fuse_reply_attr(req,&stbuf,timeout.attr);
  1563. }
  1564. else
  1565. {
  1566. fuse_reply_err(req,err);
  1567. }
  1568. }
  1569. static
  1570. void
  1571. fuse_lib_access(fuse_req_t req,
  1572. struct fuse_in_header *hdr_)
  1573. {
  1574. int err;
  1575. char *path;
  1576. struct fuse *f;
  1577. struct fuse_access_in *arg;
  1578. arg = (fuse_access_in*)fuse_hdr_arg(hdr_);
  1579. f = req_fuse_prepare(req);
  1580. err = get_path(f,hdr_->nodeid,&path);
  1581. if(!err)
  1582. {
  1583. err = f->fs->op.access(path,arg->mask);
  1584. free_path(f,hdr_->nodeid,path);
  1585. }
  1586. fuse_reply_err(req,err);
  1587. }
  1588. static
  1589. void
  1590. fuse_lib_readlink(fuse_req_t req,
  1591. struct fuse_in_header *hdr_)
  1592. {
  1593. int err;
  1594. char *path;
  1595. struct fuse *f;
  1596. char linkname[PATH_MAX + 1];
  1597. f = req_fuse_prepare(req);
  1598. err = get_path(f,hdr_->nodeid,&path);
  1599. if(!err)
  1600. {
  1601. err = f->fs->op.readlink(path,linkname,sizeof(linkname));
  1602. free_path(f,hdr_->nodeid,path);
  1603. }
  1604. if(!err)
  1605. {
  1606. linkname[PATH_MAX] = '\0';
  1607. fuse_reply_readlink(req,linkname);
  1608. }
  1609. else
  1610. {
  1611. fuse_reply_err(req,err);
  1612. }
  1613. }
  1614. static
  1615. void
  1616. fuse_lib_mknod(fuse_req_t req,
  1617. struct fuse_in_header *hdr_)
  1618. {
  1619. int err;
  1620. char *path;
  1621. struct fuse *f;
  1622. const char* name;
  1623. struct fuse_entry_param e;
  1624. struct fuse_mknod_in *arg;
  1625. arg = (fuse_mknod_in*)fuse_hdr_arg(hdr_);
  1626. name = (const char*)PARAM(arg);
  1627. if(req->f->conn.proto_minor >= 12)
  1628. req->ctx.umask = arg->umask;
  1629. else
  1630. name = (char*)arg + FUSE_COMPAT_MKNOD_IN_SIZE;
  1631. f = req_fuse_prepare(req);
  1632. err = get_path_name(f,hdr_->nodeid,name,&path);
  1633. if(!err)
  1634. {
  1635. err = -ENOSYS;
  1636. if(S_ISREG(arg->mode))
  1637. {
  1638. fuse_file_info_t fi;
  1639. memset(&fi,0,sizeof(fi));
  1640. fi.flags = O_CREAT | O_EXCL | O_WRONLY;
  1641. err = f->fs->op.create(path,arg->mode,&fi);
  1642. if(!err)
  1643. {
  1644. err = lookup_path(f,hdr_->nodeid,name,path,&e,&fi);
  1645. f->fs->op.release(&fi);
  1646. }
  1647. }
  1648. if(err == -ENOSYS)
  1649. {
  1650. err = f->fs->op.mknod(path,arg->mode,arg->rdev);
  1651. if(!err)
  1652. err = lookup_path(f,hdr_->nodeid,name,path,&e,NULL);
  1653. }
  1654. free_path(f,hdr_->nodeid,path);
  1655. }
  1656. reply_entry(req,&e,err);
  1657. }
  1658. static
  1659. void
  1660. fuse_lib_mkdir(fuse_req_t req,
  1661. struct fuse_in_header *hdr_)
  1662. {
  1663. int err;
  1664. char *path;
  1665. struct fuse *f;
  1666. const char *name;
  1667. struct fuse_entry_param e;
  1668. struct fuse_mkdir_in *arg;
  1669. arg = (fuse_mkdir_in*)fuse_hdr_arg(hdr_);
  1670. name = (const char*)PARAM(arg);
  1671. if(req->f->conn.proto_minor >= 12)
  1672. req->ctx.umask = arg->umask;
  1673. f = req_fuse_prepare(req);
  1674. err = get_path_name(f,hdr_->nodeid,name,&path);
  1675. if(!err)
  1676. {
  1677. err = f->fs->op.mkdir(path,arg->mode);
  1678. if(!err)
  1679. err = lookup_path(f,hdr_->nodeid,name,path,&e,NULL);
  1680. free_path(f,hdr_->nodeid,path);
  1681. }
  1682. reply_entry(req,&e,err);
  1683. }
  1684. static
  1685. void
  1686. fuse_lib_unlink(fuse_req_t req,
  1687. struct fuse_in_header *hdr_)
  1688. {
  1689. int err;
  1690. char *path;
  1691. struct fuse *f;
  1692. const char *name;
  1693. node_t *wnode;
  1694. name = (const char*)PARAM(hdr_);
  1695. f = req_fuse_prepare(req);
  1696. err = get_path_wrlock(f,hdr_->nodeid,name,&path,&wnode);
  1697. if(!err)
  1698. {
  1699. pthread_mutex_lock(&f->lock);
  1700. if(node_open(wnode))
  1701. err = f->fs->op.prepare_hide(path,&wnode->hidden_fh);
  1702. pthread_mutex_unlock(&f->lock);
  1703. err = f->fs->op.unlink(path);
  1704. if(!err)
  1705. remove_node(f,hdr_->nodeid,name);
  1706. free_path_wrlock(f,hdr_->nodeid,wnode,path);
  1707. }
  1708. fuse_reply_err(req,err);
  1709. }
  1710. static
  1711. void
  1712. fuse_lib_rmdir(fuse_req_t req,
  1713. struct fuse_in_header *hdr_)
  1714. {
  1715. int err;
  1716. char *path;
  1717. struct fuse *f;
  1718. const char *name;
  1719. node_t *wnode;
  1720. name = (const char*)PARAM(hdr_);
  1721. f = req_fuse_prepare(req);
  1722. err = get_path_wrlock(f,hdr_->nodeid,name,&path,&wnode);
  1723. if(!err)
  1724. {
  1725. err = f->fs->op.rmdir(path);
  1726. if(!err)
  1727. remove_node(f,hdr_->nodeid,name);
  1728. free_path_wrlock(f,hdr_->nodeid,wnode,path);
  1729. }
  1730. fuse_reply_err(req,err);
  1731. }
  1732. static
  1733. void
  1734. fuse_lib_symlink(fuse_req_t req_,
  1735. struct fuse_in_header *hdr_)
  1736. {
  1737. int rv;
  1738. char *path;
  1739. struct fuse *f;
  1740. const char *name;
  1741. const char *linkname;
  1742. struct fuse_entry_param e = {0};
  1743. name = (const char*)fuse_hdr_arg(hdr_);
  1744. linkname = (name + strlen(name) + 1);
  1745. f = req_fuse_prepare(req_);
  1746. rv = get_path_name(f,hdr_->nodeid,name,&path);
  1747. if(rv == 0)
  1748. {
  1749. rv = f->fs->op.symlink(linkname,path,&e.attr,&e.timeout);
  1750. if(rv == 0)
  1751. rv = set_path_info(f,hdr_->nodeid,name,&e);
  1752. free_path(f,hdr_->nodeid,path);
  1753. }
  1754. reply_entry(req_,&e,rv);
  1755. }
  1756. static
  1757. void
  1758. fuse_lib_rename(fuse_req_t req,
  1759. struct fuse_in_header *hdr_)
  1760. {
  1761. int err;
  1762. struct fuse *f;
  1763. char *oldpath;
  1764. char *newpath;
  1765. const char *oldname;
  1766. const char *newname;
  1767. node_t *wnode1;
  1768. node_t *wnode2;
  1769. struct fuse_rename_in *arg;
  1770. arg = (fuse_rename_in*)fuse_hdr_arg(hdr_);
  1771. oldname = (const char*)PARAM(arg);
  1772. newname = (oldname + strlen(oldname) + 1);
  1773. f = req_fuse_prepare(req);
  1774. err = get_path2(f,hdr_->nodeid,oldname,arg->newdir,newname,
  1775. &oldpath,&newpath,&wnode1,&wnode2);
  1776. if(!err)
  1777. {
  1778. pthread_mutex_lock(&f->lock);
  1779. if(node_open(wnode2))
  1780. err = f->fs->op.prepare_hide(newpath,&wnode2->hidden_fh);
  1781. pthread_mutex_unlock(&f->lock);
  1782. err = f->fs->op.rename(oldpath,newpath);
  1783. if(!err)
  1784. err = rename_node(f,hdr_->nodeid,oldname,arg->newdir,newname);
  1785. free_path2(f,hdr_->nodeid,arg->newdir,wnode1,wnode2,oldpath,newpath);
  1786. }
  1787. fuse_reply_err(req,err);
  1788. }
  1789. static
  1790. void
  1791. fuse_lib_link(fuse_req_t req,
  1792. struct fuse_in_header *hdr_)
  1793. {
  1794. int rv;
  1795. char *oldpath;
  1796. char *newpath;
  1797. struct fuse *f;
  1798. const char *newname;
  1799. struct fuse_link_in *arg;
  1800. struct fuse_entry_param e = {0};
  1801. arg = (fuse_link_in*)fuse_hdr_arg(hdr_);
  1802. newname = (const char*)PARAM(arg);
  1803. f = req_fuse_prepare(req);
  1804. rv = get_path2(f,
  1805. arg->oldnodeid,NULL,
  1806. hdr_->nodeid,newname,
  1807. &oldpath,&newpath,NULL,NULL);
  1808. if(!rv)
  1809. {
  1810. rv = f->fs->op.link(oldpath,newpath,&e.attr,&e.timeout);
  1811. if(rv == 0)
  1812. rv = set_path_info(f,hdr_->nodeid,newname,&e);
  1813. free_path2(f,arg->oldnodeid,hdr_->nodeid,NULL,NULL,oldpath,newpath);
  1814. }
  1815. reply_entry(req,&e,rv);
  1816. }
  1817. static
  1818. void
  1819. fuse_do_release(struct fuse *f,
  1820. uint64_t ino,
  1821. fuse_file_info_t *fi)
  1822. {
  1823. uint64_t fh;
  1824. node_t *node;
  1825. fh = 0;
  1826. f->fs->op.release(fi);
  1827. pthread_mutex_lock(&f->lock);
  1828. {
  1829. node = get_node(f,ino);
  1830. assert(node->open_count > 0);
  1831. node->open_count--;
  1832. if(node->hidden_fh && (node->open_count == 0))
  1833. {
  1834. fh = node->hidden_fh;
  1835. node->hidden_fh = 0;
  1836. }
  1837. }
  1838. pthread_mutex_unlock(&f->lock);
  1839. if(fh)
  1840. f->fs->op.free_hide(fh);
  1841. }
  1842. static
  1843. void
  1844. fuse_lib_create(fuse_req_t req,
  1845. struct fuse_in_header *hdr_)
  1846. {
  1847. int err;
  1848. char *path;
  1849. struct fuse *f;
  1850. const char *name;
  1851. fuse_file_info_t ffi = {0};
  1852. struct fuse_entry_param e;
  1853. struct fuse_create_in *arg;
  1854. arg = (fuse_create_in*)fuse_hdr_arg(hdr_);
  1855. name = (const char*)PARAM(arg);
  1856. ffi.flags = arg->flags;
  1857. if(req->f->conn.proto_minor >= 12)
  1858. req->ctx.umask = arg->umask;
  1859. else
  1860. name = (char*)arg + sizeof(struct fuse_open_in);
  1861. f = req_fuse_prepare(req);
  1862. err = get_path_name(f,hdr_->nodeid,name,&path);
  1863. if(!err)
  1864. {
  1865. err = f->fs->op.create(path,arg->mode,&ffi);
  1866. if(!err)
  1867. {
  1868. err = lookup_path(f,hdr_->nodeid,name,path,&e,&ffi);
  1869. if(err)
  1870. {
  1871. f->fs->op.release(&ffi);
  1872. }
  1873. else if(!S_ISREG(e.attr.st_mode))
  1874. {
  1875. err = -EIO;
  1876. f->fs->op.release(&ffi);
  1877. forget_node(f,e.ino,1);
  1878. }
  1879. }
  1880. }
  1881. if(!err)
  1882. {
  1883. pthread_mutex_lock(&f->lock);
  1884. get_node(f,e.ino)->open_count++;
  1885. pthread_mutex_unlock(&f->lock);
  1886. if(fuse_reply_create(req,&e,&ffi) == -ENOENT)
  1887. {
  1888. /* The open syscall was interrupted,so it
  1889. must be cancelled */
  1890. fuse_do_release(f,e.ino,&ffi);
  1891. forget_node(f,e.ino,1);
  1892. }
  1893. }
  1894. else
  1895. {
  1896. fuse_reply_err(req,err);
  1897. }
  1898. free_path(f,hdr_->nodeid,path);
  1899. }
  1900. static
  1901. void
  1902. open_auto_cache(struct fuse *f,
  1903. uint64_t ino,
  1904. const char *path,
  1905. fuse_file_info_t *fi)
  1906. {
  1907. node_t *node;
  1908. fuse_timeouts_t timeout;
  1909. pthread_mutex_lock(&f->lock);
  1910. node = get_node(f,ino);
  1911. if(node->is_stat_cache_valid)
  1912. {
  1913. int err;
  1914. struct stat stbuf;
  1915. pthread_mutex_unlock(&f->lock);
  1916. err = f->fs->op.fgetattr(fi,&stbuf,&timeout);
  1917. pthread_mutex_lock(&f->lock);
  1918. if(!err)
  1919. update_stat(node,&stbuf);
  1920. else
  1921. node->is_stat_cache_valid = 0;
  1922. }
  1923. if(node->is_stat_cache_valid)
  1924. fi->keep_cache = 1;
  1925. node->is_stat_cache_valid = 1;
  1926. pthread_mutex_unlock(&f->lock);
  1927. }
  1928. static
  1929. void
  1930. fuse_lib_open(fuse_req_t req,
  1931. struct fuse_in_header *hdr_)
  1932. {
  1933. int err;
  1934. char *path;
  1935. struct fuse *f;
  1936. fuse_file_info_t ffi = {0};
  1937. struct fuse_open_in *arg;
  1938. arg = (fuse_open_in*)fuse_hdr_arg(hdr_);
  1939. ffi.flags = arg->flags;
  1940. f = req_fuse_prepare(req);
  1941. err = get_path(f,hdr_->nodeid,&path);
  1942. if(!err)
  1943. {
  1944. err = f->fs->op.open(path,&ffi);
  1945. if(!err)
  1946. {
  1947. if(ffi.auto_cache)
  1948. open_auto_cache(f,hdr_->nodeid,path,&ffi);
  1949. }
  1950. }
  1951. if(!err)
  1952. {
  1953. pthread_mutex_lock(&f->lock);
  1954. get_node(f,hdr_->nodeid)->open_count++;
  1955. pthread_mutex_unlock(&f->lock);
  1956. /* The open syscall was interrupted,so it must be cancelled */
  1957. if(fuse_reply_open(req,&ffi) == -ENOENT)
  1958. fuse_do_release(f,hdr_->nodeid,&ffi);
  1959. }
  1960. else
  1961. {
  1962. fuse_reply_err(req,err);
  1963. }
  1964. free_path(f,hdr_->nodeid,path);
  1965. }
  1966. static
  1967. void
  1968. fuse_lib_read(fuse_req_t req,
  1969. struct fuse_in_header *hdr_)
  1970. {
  1971. int res;
  1972. struct fuse *f;
  1973. fuse_file_info_t ffi = {0};
  1974. struct fuse_read_in *arg;
  1975. fuse_msgbuf_t *msgbuf;
  1976. arg = (fuse_read_in*)fuse_hdr_arg(hdr_);
  1977. ffi.fh = arg->fh;
  1978. if(req->f->conn.proto_minor >= 9)
  1979. {
  1980. ffi.flags = arg->flags;
  1981. ffi.lock_owner = arg->lock_owner;
  1982. }
  1983. f = req_fuse_prepare(req);
  1984. msgbuf = msgbuf_alloc_page_aligned();
  1985. res = f->fs->op.read(&ffi,msgbuf->mem,arg->size,arg->offset);
  1986. if(res >= 0)
  1987. fuse_reply_data(req,msgbuf->mem,res);
  1988. else
  1989. fuse_reply_err(req,res);
  1990. msgbuf_free(msgbuf);
  1991. }
  1992. static
  1993. void
  1994. fuse_lib_write(fuse_req_t req,
  1995. struct fuse_in_header *hdr_)
  1996. {
  1997. int res;
  1998. char *data;
  1999. struct fuse *f;
  2000. fuse_file_info_t ffi = {0};
  2001. struct fuse_write_in *arg;
  2002. arg = (fuse_write_in*)fuse_hdr_arg(hdr_);
  2003. ffi.fh = arg->fh;
  2004. ffi.writepage = !!(arg->write_flags & 1);
  2005. if(req->f->conn.proto_minor < 9)
  2006. {
  2007. data = ((char*)arg) + FUSE_COMPAT_WRITE_IN_SIZE;
  2008. }
  2009. else
  2010. {
  2011. ffi.flags = arg->flags;
  2012. ffi.lock_owner = arg->lock_owner;
  2013. data = (char*)PARAM(arg);
  2014. }
  2015. f = req_fuse_prepare(req);
  2016. res = f->fs->op.write(&ffi,data,arg->size,arg->offset);
  2017. free_path(f,hdr_->nodeid,NULL);
  2018. if(res >= 0)
  2019. fuse_reply_write(req,res);
  2020. else
  2021. fuse_reply_err(req,res);
  2022. }
  2023. static
  2024. void
  2025. fuse_lib_fsync(fuse_req_t req,
  2026. struct fuse_in_header *hdr_)
  2027. {
  2028. int err;
  2029. struct fuse *f;
  2030. struct fuse_fsync_in *arg;
  2031. fuse_file_info_t ffi = {0};
  2032. arg = (fuse_fsync_in*)fuse_hdr_arg(hdr_);
  2033. ffi.fh = arg->fh;
  2034. f = req_fuse_prepare(req);
  2035. err = f->fs->op.fsync(&ffi,
  2036. !!(arg->fsync_flags & 1));
  2037. fuse_reply_err(req,err);
  2038. }
  2039. static
  2040. struct fuse_dh*
  2041. get_dirhandle(const fuse_file_info_t *llfi,
  2042. fuse_file_info_t *fi)
  2043. {
  2044. struct fuse_dh *dh = (struct fuse_dh *)(uintptr_t)llfi->fh;
  2045. memset(fi,0,sizeof(fuse_file_info_t));
  2046. fi->fh = dh->fh;
  2047. return dh;
  2048. }
  2049. static
  2050. void
  2051. fuse_lib_opendir(fuse_req_t req,
  2052. struct fuse_in_header *hdr_)
  2053. {
  2054. int err;
  2055. char *path;
  2056. struct fuse_dh *dh;
  2057. fuse_file_info_t llffi = {0};
  2058. fuse_file_info_t ffi = {0};
  2059. struct fuse *f;
  2060. struct fuse_open_in *arg;
  2061. arg = (fuse_open_in*)fuse_hdr_arg(hdr_);
  2062. llffi.flags = arg->flags;
  2063. f = req_fuse_prepare(req);
  2064. dh = (struct fuse_dh *)calloc(1,sizeof(struct fuse_dh));
  2065. if(dh == NULL)
  2066. {
  2067. fuse_reply_err(req,ENOMEM);
  2068. return;
  2069. }
  2070. fuse_dirents_init(&dh->d);
  2071. fuse_mutex_init(&dh->lock);
  2072. llffi.fh = (uintptr_t)dh;
  2073. ffi.flags = llffi.flags;
  2074. err = get_path(f,hdr_->nodeid,&path);
  2075. if(!err)
  2076. {
  2077. err = f->fs->op.opendir(path,&ffi);
  2078. dh->fh = ffi.fh;
  2079. llffi.keep_cache = ffi.keep_cache;
  2080. llffi.cache_readdir = ffi.cache_readdir;
  2081. }
  2082. if(!err)
  2083. {
  2084. if(fuse_reply_open(req,&llffi) == -ENOENT)
  2085. {
  2086. /* The opendir syscall was interrupted,so it
  2087. must be cancelled */
  2088. f->fs->op.releasedir(&ffi);
  2089. pthread_mutex_destroy(&dh->lock);
  2090. free(dh);
  2091. }
  2092. }
  2093. else
  2094. {
  2095. fuse_reply_err(req,err);
  2096. pthread_mutex_destroy(&dh->lock);
  2097. free(dh);
  2098. }
  2099. free_path(f,hdr_->nodeid,path);
  2100. }
  2101. static
  2102. size_t
  2103. readdir_buf_size(fuse_dirents_t *d_,
  2104. size_t size_,
  2105. off_t off_)
  2106. {
  2107. if(off_ >= kv_size(d_->offs))
  2108. return 0;
  2109. if((kv_A(d_->offs,off_) + size_) > kv_size(d_->data))
  2110. return (kv_size(d_->data) - kv_A(d_->offs,off_));
  2111. return size_;
  2112. }
  2113. static
  2114. char*
  2115. readdir_buf(fuse_dirents_t *d_,
  2116. off_t off_)
  2117. {
  2118. size_t i;
  2119. i = kv_A(d_->offs,off_);
  2120. return &kv_A(d_->data,i);
  2121. }
  2122. static
  2123. void
  2124. fuse_lib_readdir(fuse_req_t req_,
  2125. struct fuse_in_header *hdr_)
  2126. {
  2127. int rv;
  2128. size_t size;
  2129. struct fuse *f;
  2130. fuse_dirents_t *d;
  2131. struct fuse_dh *dh;
  2132. fuse_file_info_t ffi = {0};
  2133. fuse_file_info_t llffi = {0};
  2134. struct fuse_read_in *arg;
  2135. arg = (fuse_read_in*)fuse_hdr_arg(hdr_);
  2136. size = arg->size;
  2137. llffi.fh = arg->fh;
  2138. f = req_fuse_prepare(req_);
  2139. dh = get_dirhandle(&llffi,&ffi);
  2140. d = &dh->d;
  2141. pthread_mutex_lock(&dh->lock);
  2142. rv = 0;
  2143. if((arg->offset == 0) || (kv_size(d->data) == 0))
  2144. rv = f->fs->op.readdir(&ffi,d);
  2145. if(rv)
  2146. {
  2147. fuse_reply_err(req_,rv);
  2148. goto out;
  2149. }
  2150. size = readdir_buf_size(d,size,arg->offset);
  2151. fuse_reply_buf(req_,
  2152. readdir_buf(d,arg->offset),
  2153. size);
  2154. out:
  2155. pthread_mutex_unlock(&dh->lock);
  2156. }
  2157. static
  2158. void
  2159. fuse_lib_readdir_plus(fuse_req_t req_,
  2160. struct fuse_in_header *hdr_)
  2161. {
  2162. int rv;
  2163. size_t size;
  2164. struct fuse *f;
  2165. fuse_dirents_t *d;
  2166. struct fuse_dh *dh;
  2167. fuse_file_info_t ffi = {0};
  2168. fuse_file_info_t llffi = {0};
  2169. struct fuse_read_in *arg;
  2170. arg = (fuse_read_in*)fuse_hdr_arg(hdr_);
  2171. size = arg->size;
  2172. llffi.fh = arg->fh;
  2173. f = req_fuse_prepare(req_);
  2174. dh = get_dirhandle(&llffi,&ffi);
  2175. d = &dh->d;
  2176. pthread_mutex_lock(&dh->lock);
  2177. rv = 0;
  2178. if((arg->offset == 0) || (kv_size(d->data) == 0))
  2179. rv = f->fs->op.readdir_plus(&ffi,d);
  2180. if(rv)
  2181. {
  2182. fuse_reply_err(req_,rv);
  2183. goto out;
  2184. }
  2185. size = readdir_buf_size(d,size,arg->offset);
  2186. fuse_reply_buf(req_,
  2187. readdir_buf(d,arg->offset),
  2188. size);
  2189. out:
  2190. pthread_mutex_unlock(&dh->lock);
  2191. }
  2192. static
  2193. void
  2194. fuse_lib_releasedir(fuse_req_t req_,
  2195. struct fuse_in_header *hdr_)
  2196. {
  2197. struct fuse *f;
  2198. struct fuse_dh *dh;
  2199. fuse_file_info_t ffi;
  2200. fuse_file_info_t llffi = {0};
  2201. struct fuse_release_in *arg;
  2202. arg = (fuse_release_in*)fuse_hdr_arg(hdr_);
  2203. llffi.fh = arg->fh;
  2204. llffi.flags = arg->flags;
  2205. f = req_fuse_prepare(req_);
  2206. dh = get_dirhandle(&llffi,&ffi);
  2207. f->fs->op.releasedir(&ffi);
  2208. /* Done to keep race condition between last readdir reply and the unlock */
  2209. pthread_mutex_lock(&dh->lock);
  2210. pthread_mutex_unlock(&dh->lock);
  2211. pthread_mutex_destroy(&dh->lock);
  2212. fuse_dirents_free(&dh->d);
  2213. free(dh);
  2214. fuse_reply_err(req_,0);
  2215. }
  2216. static
  2217. void
  2218. fuse_lib_fsyncdir(fuse_req_t req,
  2219. struct fuse_in_header *hdr_)
  2220. {
  2221. int err;
  2222. struct fuse *f;
  2223. fuse_file_info_t ffi;
  2224. fuse_file_info_t llffi = {0};
  2225. struct fuse_fsync_in *arg;
  2226. arg = (fuse_fsync_in*)fuse_hdr_arg(hdr_);
  2227. llffi.fh = arg->fh;
  2228. f = req_fuse_prepare(req);
  2229. get_dirhandle(&llffi,&ffi);
  2230. err = f->fs->op.fsyncdir(&ffi,
  2231. !!(arg->fsync_flags & FUSE_FSYNC_FDATASYNC));
  2232. fuse_reply_err(req,err);
  2233. }
  2234. static
  2235. void
  2236. fuse_lib_statfs(fuse_req_t req,
  2237. struct fuse_in_header *hdr_)
  2238. {
  2239. int err = 0;
  2240. char *path = NULL;
  2241. struct fuse *f;
  2242. struct statvfs buf = {0};
  2243. f = req_fuse_prepare(req);
  2244. if(hdr_->nodeid)
  2245. err = get_path(f,hdr_->nodeid,&path);
  2246. if(!err)
  2247. {
  2248. err = f->fs->op.statfs(path ? path : "/",&buf);
  2249. free_path(f,hdr_->nodeid,path);
  2250. }
  2251. if(!err)
  2252. fuse_reply_statfs(req,&buf);
  2253. else
  2254. fuse_reply_err(req,err);
  2255. }
  2256. static
  2257. void
  2258. fuse_lib_setxattr(fuse_req_t req,
  2259. struct fuse_in_header *hdr_)
  2260. {
  2261. int err;
  2262. char *path;
  2263. const char *name;
  2264. const char *value;
  2265. struct fuse *f;
  2266. struct fuse_setxattr_in *arg;
  2267. arg = (fuse_setxattr_in*)fuse_hdr_arg(hdr_);
  2268. if((req->f->conn.capable & FUSE_SETXATTR_EXT) && (req->f->conn.want & FUSE_SETXATTR_EXT))
  2269. name = (const char*)PARAM(arg);
  2270. else
  2271. name = (((char*)arg) + FUSE_COMPAT_SETXATTR_IN_SIZE);
  2272. value = (name + strlen(name) + 1);
  2273. f = req_fuse_prepare(req);
  2274. err = get_path(f,hdr_->nodeid,&path);
  2275. if(!err)
  2276. {
  2277. err = f->fs->op.setxattr(path,name,value,arg->size,arg->flags);
  2278. free_path(f,hdr_->nodeid,path);
  2279. }
  2280. fuse_reply_err(req,err);
  2281. }
  2282. static
  2283. int
  2284. common_getxattr(struct fuse *f,
  2285. fuse_req_t req,
  2286. uint64_t ino,
  2287. const char *name,
  2288. char *value,
  2289. size_t size)
  2290. {
  2291. int err;
  2292. char *path;
  2293. err = get_path(f,ino,&path);
  2294. if(!err)
  2295. {
  2296. err = f->fs->op.getxattr(path,name,value,size);
  2297. free_path(f,ino,path);
  2298. }
  2299. return err;
  2300. }
  2301. static
  2302. void
  2303. fuse_lib_getxattr(fuse_req_t req,
  2304. struct fuse_in_header *hdr_)
  2305. {
  2306. int res;
  2307. struct fuse *f;
  2308. const char* name;
  2309. struct fuse_getxattr_in *arg;
  2310. arg = (fuse_getxattr_in*)fuse_hdr_arg(hdr_);
  2311. name = (const char*)PARAM(arg);
  2312. f = req_fuse_prepare(req);
  2313. if(arg->size)
  2314. {
  2315. char *value = (char*)malloc(arg->size);
  2316. if(value == NULL)
  2317. {
  2318. fuse_reply_err(req,ENOMEM);
  2319. return;
  2320. }
  2321. res = common_getxattr(f,req,hdr_->nodeid,name,value,arg->size);
  2322. if(res > 0)
  2323. fuse_reply_buf(req,value,res);
  2324. else
  2325. fuse_reply_err(req,res);
  2326. free(value);
  2327. }
  2328. else
  2329. {
  2330. res = common_getxattr(f,req,hdr_->nodeid,name,NULL,0);
  2331. if(res >= 0)
  2332. fuse_reply_xattr(req,res);
  2333. else
  2334. fuse_reply_err(req,res);
  2335. }
  2336. }
  2337. static
  2338. int
  2339. common_listxattr(struct fuse *f,
  2340. fuse_req_t req,
  2341. uint64_t ino,
  2342. char *list,
  2343. size_t size)
  2344. {
  2345. char *path;
  2346. int err;
  2347. err = get_path(f,ino,&path);
  2348. if(!err)
  2349. {
  2350. err = f->fs->op.listxattr(path,list,size);
  2351. free_path(f,ino,path);
  2352. }
  2353. return err;
  2354. }
  2355. static
  2356. void
  2357. fuse_lib_listxattr(fuse_req_t req,
  2358. struct fuse_in_header *hdr_)
  2359. {
  2360. int res;
  2361. struct fuse *f;
  2362. struct fuse_getxattr_in *arg;
  2363. arg = (fuse_getxattr_in*)fuse_hdr_arg(hdr_);
  2364. f = req_fuse_prepare(req);
  2365. if(arg->size)
  2366. {
  2367. char *list = (char*)malloc(arg->size);
  2368. if(list == NULL)
  2369. {
  2370. fuse_reply_err(req,ENOMEM);
  2371. return;
  2372. }
  2373. res = common_listxattr(f,req,hdr_->nodeid,list,arg->size);
  2374. if(res > 0)
  2375. fuse_reply_buf(req,list,res);
  2376. else
  2377. fuse_reply_err(req,res);
  2378. free(list);
  2379. }
  2380. else
  2381. {
  2382. res = common_listxattr(f,req,hdr_->nodeid,NULL,0);
  2383. if(res >= 0)
  2384. fuse_reply_xattr(req,res);
  2385. else
  2386. fuse_reply_err(req,res);
  2387. }
  2388. }
  2389. static
  2390. void
  2391. fuse_lib_removexattr(fuse_req_t req,
  2392. const struct fuse_in_header *hdr_)
  2393. {
  2394. int err;
  2395. char *path;
  2396. const char *name;
  2397. struct fuse *f;
  2398. name = (const char*)fuse_hdr_arg(hdr_);
  2399. f = req_fuse_prepare(req);
  2400. err = get_path(f,hdr_->nodeid,&path);
  2401. if(!err)
  2402. {
  2403. err = f->fs->op.removexattr(path,name);
  2404. free_path(f,hdr_->nodeid,path);
  2405. }
  2406. fuse_reply_err(req,err);
  2407. }
  2408. static
  2409. void
  2410. fuse_lib_copy_file_range(fuse_req_t req_,
  2411. const struct fuse_in_header *hdr_)
  2412. {
  2413. ssize_t rv;
  2414. struct fuse *f;
  2415. fuse_file_info_t ffi_in = {0};
  2416. fuse_file_info_t ffi_out = {0};
  2417. const struct fuse_copy_file_range_in *arg;
  2418. arg = (fuse_copy_file_range_in*)fuse_hdr_arg(hdr_);
  2419. ffi_in.fh = arg->fh_in;
  2420. ffi_out.fh = arg->fh_out;
  2421. f = req_fuse_prepare(req_);
  2422. rv = f->fs->op.copy_file_range(&ffi_in,
  2423. arg->off_in,
  2424. &ffi_out,
  2425. arg->off_out,
  2426. arg->len,
  2427. arg->flags);
  2428. if(rv >= 0)
  2429. fuse_reply_write(req_,rv);
  2430. else
  2431. fuse_reply_err(req_,rv);
  2432. }
  2433. static
  2434. void
  2435. fuse_lib_setupmapping(fuse_req_t req_,
  2436. const struct fuse_in_header *hdr_)
  2437. {
  2438. fuse_reply_err(req_,ENOSYS);
  2439. }
  2440. static
  2441. void
  2442. fuse_lib_removemapping(fuse_req_t req_,
  2443. const struct fuse_in_header *hdr_)
  2444. {
  2445. fuse_reply_err(req_,ENOSYS);
  2446. }
  2447. static
  2448. void
  2449. fuse_lib_syncfs(fuse_req_t req_,
  2450. const struct fuse_in_header *hdr_)
  2451. {
  2452. fuse_reply_err(req_,ENOSYS);
  2453. }
  2454. // TODO: This is just a copy of fuse_lib_create. Needs to be rewritten
  2455. // so a nameless node can be setup.
  2456. // name is always '/'
  2457. // nodeid is the base directory
  2458. static
  2459. void
  2460. fuse_lib_tmpfile(fuse_req_t req_,
  2461. const struct fuse_in_header *hdr_)
  2462. {
  2463. int err;
  2464. char *path;
  2465. struct fuse *f;
  2466. const char *name;
  2467. fuse_file_info_t ffi = {0};
  2468. struct fuse_entry_param e;
  2469. struct fuse_create_in *arg;
  2470. arg = (fuse_create_in*)fuse_hdr_arg(hdr_);
  2471. name = (const char*)PARAM(arg);
  2472. ffi.flags = arg->flags;
  2473. if(req_->f->conn.proto_minor >= 12)
  2474. req_->ctx.umask = arg->umask;
  2475. else
  2476. name = (char*)arg + sizeof(struct fuse_open_in);
  2477. f = req_fuse_prepare(req_);
  2478. err = get_path_name(f,hdr_->nodeid,name,&path);
  2479. if(!err)
  2480. {
  2481. err = f->fs->op.tmpfile(path,arg->mode,&ffi);
  2482. if(!err)
  2483. {
  2484. err = lookup_path(f,hdr_->nodeid,name,path,&e,&ffi);
  2485. if(err)
  2486. {
  2487. f->fs->op.release(&ffi);
  2488. }
  2489. else if(!S_ISREG(e.attr.st_mode))
  2490. {
  2491. err = -EIO;
  2492. f->fs->op.release(&ffi);
  2493. forget_node(f,e.ino,1);
  2494. }
  2495. }
  2496. }
  2497. if(!err)
  2498. {
  2499. pthread_mutex_lock(&f->lock);
  2500. get_node(f,e.ino)->open_count++;
  2501. pthread_mutex_unlock(&f->lock);
  2502. if(fuse_reply_create(req_,&e,&ffi) == -ENOENT)
  2503. {
  2504. /* The open syscall was interrupted,so it
  2505. must be cancelled */
  2506. fuse_do_release(f,e.ino,&ffi);
  2507. forget_node(f,e.ino,1);
  2508. }
  2509. }
  2510. else
  2511. {
  2512. fuse_reply_err(req_,err);
  2513. }
  2514. free_path(f,hdr_->nodeid,path);
  2515. }
  2516. static
  2517. lock_t*
  2518. locks_conflict(node_t *node,
  2519. const lock_t *lock)
  2520. {
  2521. lock_t *l;
  2522. for(l = node->locks; l; l = l->next)
  2523. if(l->owner != lock->owner &&
  2524. lock->start <= l->end && l->start <= lock->end &&
  2525. (l->type == F_WRLCK || lock->type == F_WRLCK))
  2526. break;
  2527. return l;
  2528. }
  2529. static
  2530. void
  2531. delete_lock(lock_t **lockp)
  2532. {
  2533. lock_t *l = *lockp;
  2534. *lockp = l->next;
  2535. free(l);
  2536. }
  2537. static
  2538. void
  2539. insert_lock(lock_t **pos,
  2540. lock_t *lock)
  2541. {
  2542. lock->next = *pos;
  2543. *pos = lock;
  2544. }
  2545. static
  2546. int
  2547. locks_insert(node_t *node,
  2548. lock_t *lock)
  2549. {
  2550. lock_t **lp;
  2551. lock_t *newl1 = NULL;
  2552. lock_t *newl2 = NULL;
  2553. if(lock->type != F_UNLCK || lock->start != 0 || lock->end != OFFSET_MAX)
  2554. {
  2555. newl1 = (lock_t*)malloc(sizeof(lock_t));
  2556. newl2 = (lock_t*)malloc(sizeof(lock_t));
  2557. if(!newl1 || !newl2)
  2558. {
  2559. free(newl1);
  2560. free(newl2);
  2561. return -ENOLCK;
  2562. }
  2563. }
  2564. for(lp = &node->locks; *lp;)
  2565. {
  2566. lock_t *l = *lp;
  2567. if(l->owner != lock->owner)
  2568. goto skip;
  2569. if(lock->type == l->type)
  2570. {
  2571. if(l->end < lock->start - 1)
  2572. goto skip;
  2573. if(lock->end < l->start - 1)
  2574. break;
  2575. if(l->start <= lock->start && lock->end <= l->end)
  2576. goto out;
  2577. if(l->start < lock->start)
  2578. lock->start = l->start;
  2579. if(lock->end < l->end)
  2580. lock->end = l->end;
  2581. goto delete_lock;
  2582. }
  2583. else
  2584. {
  2585. if(l->end < lock->start)
  2586. goto skip;
  2587. if(lock->end < l->start)
  2588. break;
  2589. if(lock->start <= l->start && l->end <= lock->end)
  2590. goto delete_lock;
  2591. if(l->end <= lock->end)
  2592. {
  2593. l->end = lock->start - 1;
  2594. goto skip;
  2595. }
  2596. if(lock->start <= l->start)
  2597. {
  2598. l->start = lock->end + 1;
  2599. break;
  2600. }
  2601. *newl2 = *l;
  2602. newl2->start = lock->end + 1;
  2603. l->end = lock->start - 1;
  2604. insert_lock(&l->next,newl2);
  2605. newl2 = NULL;
  2606. }
  2607. skip:
  2608. lp = &l->next;
  2609. continue;
  2610. delete_lock:
  2611. delete_lock(lp);
  2612. }
  2613. if(lock->type != F_UNLCK)
  2614. {
  2615. *newl1 = *lock;
  2616. insert_lock(lp,newl1);
  2617. newl1 = NULL;
  2618. }
  2619. out:
  2620. free(newl1);
  2621. free(newl2);
  2622. return 0;
  2623. }
  2624. static
  2625. void
  2626. flock_to_lock(struct flock *flock,
  2627. lock_t *lock)
  2628. {
  2629. memset(lock,0,sizeof(lock_t));
  2630. lock->type = flock->l_type;
  2631. lock->start = flock->l_start;
  2632. lock->end = flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
  2633. lock->pid = flock->l_pid;
  2634. }
  2635. static
  2636. void
  2637. lock_to_flock(lock_t *lock,
  2638. struct flock *flock)
  2639. {
  2640. flock->l_type = lock->type;
  2641. flock->l_start = lock->start;
  2642. flock->l_len = (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
  2643. flock->l_pid = lock->pid;
  2644. }
  2645. static
  2646. int
  2647. fuse_flush_common(struct fuse *f,
  2648. fuse_req_t req,
  2649. uint64_t ino,
  2650. fuse_file_info_t *fi)
  2651. {
  2652. struct flock lock;
  2653. lock_t l;
  2654. int err;
  2655. int errlock;
  2656. memset(&lock,0,sizeof(lock));
  2657. lock.l_type = F_UNLCK;
  2658. lock.l_whence = SEEK_SET;
  2659. err = f->fs->op.flush(fi);
  2660. errlock = f->fs->op.lock(fi,F_SETLK,&lock);
  2661. if(errlock != -ENOSYS)
  2662. {
  2663. flock_to_lock(&lock,&l);
  2664. l.owner = fi->lock_owner;
  2665. pthread_mutex_lock(&f->lock);
  2666. locks_insert(get_node(f,ino),&l);
  2667. pthread_mutex_unlock(&f->lock);
  2668. /* if op.lock() is defined FLUSH is needed regardless
  2669. of op.flush() */
  2670. if(err == -ENOSYS)
  2671. err = 0;
  2672. }
  2673. return err;
  2674. }
  2675. static
  2676. void
  2677. fuse_lib_release(fuse_req_t req,
  2678. struct fuse_in_header *hdr_)
  2679. {
  2680. int err = 0;
  2681. struct fuse *f;
  2682. fuse_file_info_t ffi = {0};
  2683. struct fuse_release_in *arg;
  2684. arg = (fuse_release_in*)fuse_hdr_arg(hdr_);
  2685. ffi.fh = arg->fh;
  2686. ffi.flags = arg->flags;
  2687. if(req->f->conn.proto_minor >= 8)
  2688. {
  2689. ffi.flush = !!(arg->release_flags & FUSE_RELEASE_FLUSH);
  2690. ffi.lock_owner = arg->lock_owner;
  2691. }
  2692. else
  2693. {
  2694. ffi.flock_release = 1;
  2695. ffi.lock_owner = arg->lock_owner;
  2696. }
  2697. f = req_fuse_prepare(req);
  2698. if(ffi.flush)
  2699. {
  2700. err = fuse_flush_common(f,req,hdr_->nodeid,&ffi);
  2701. if(err == -ENOSYS)
  2702. err = 0;
  2703. }
  2704. fuse_do_release(f,hdr_->nodeid,&ffi);
  2705. fuse_reply_err(req,err);
  2706. }
  2707. static
  2708. void
  2709. fuse_lib_flush(fuse_req_t req,
  2710. struct fuse_in_header *hdr_)
  2711. {
  2712. int err;
  2713. struct fuse *f;
  2714. fuse_file_info_t ffi = {0};
  2715. struct fuse_flush_in *arg;
  2716. arg = (fuse_flush_in*)fuse_hdr_arg(hdr_);
  2717. ffi.fh = arg->fh;
  2718. ffi.flush = 1;
  2719. if(req->f->conn.proto_minor >= 7)
  2720. ffi.lock_owner = arg->lock_owner;
  2721. f = req_fuse_prepare(req);
  2722. err = fuse_flush_common(f,req,hdr_->nodeid,&ffi);
  2723. fuse_reply_err(req,err);
  2724. }
  2725. static
  2726. int
  2727. fuse_lock_common(fuse_req_t req,
  2728. uint64_t ino,
  2729. fuse_file_info_t *fi,
  2730. struct flock *lock,
  2731. int cmd)
  2732. {
  2733. int err;
  2734. struct fuse *f = req_fuse_prepare(req);
  2735. err = f->fs->op.lock(fi,cmd,lock);
  2736. return err;
  2737. }
  2738. static
  2739. void
  2740. convert_fuse_file_lock(const struct fuse_file_lock *fl,
  2741. struct flock *flock)
  2742. {
  2743. memset(flock, 0, sizeof(struct flock));
  2744. flock->l_type = fl->type;
  2745. flock->l_whence = SEEK_SET;
  2746. flock->l_start = fl->start;
  2747. if (fl->end == OFFSET_MAX)
  2748. flock->l_len = 0;
  2749. else
  2750. flock->l_len = fl->end - fl->start + 1;
  2751. flock->l_pid = fl->pid;
  2752. }
  2753. static
  2754. void
  2755. fuse_lib_getlk(fuse_req_t req,
  2756. const struct fuse_in_header *hdr_)
  2757. {
  2758. int err;
  2759. struct fuse *f;
  2760. lock_t lk;
  2761. struct flock flk;
  2762. lock_t *conflict;
  2763. fuse_file_info_t ffi = {0};
  2764. const struct fuse_lk_in *arg;
  2765. arg = (fuse_lk_in*)fuse_hdr_arg(hdr_);
  2766. ffi.fh = arg->fh;
  2767. ffi.lock_owner = arg->owner;
  2768. convert_fuse_file_lock(&arg->lk,&flk);
  2769. f = req_fuse(req);
  2770. flock_to_lock(&flk,&lk);
  2771. lk.owner = ffi.lock_owner;
  2772. pthread_mutex_lock(&f->lock);
  2773. conflict = locks_conflict(get_node(f,hdr_->nodeid),&lk);
  2774. if(conflict)
  2775. lock_to_flock(conflict,&flk);
  2776. pthread_mutex_unlock(&f->lock);
  2777. if(!conflict)
  2778. err = fuse_lock_common(req,hdr_->nodeid,&ffi,&flk,F_GETLK);
  2779. else
  2780. err = 0;
  2781. if(!err)
  2782. fuse_reply_lock(req,&flk);
  2783. else
  2784. fuse_reply_err(req,err);
  2785. }
  2786. static
  2787. void
  2788. fuse_lib_setlk(fuse_req_t req,
  2789. uint64_t ino,
  2790. fuse_file_info_t *fi,
  2791. struct flock *lock,
  2792. int sleep)
  2793. {
  2794. int err = fuse_lock_common(req,ino,fi,lock,
  2795. sleep ? F_SETLKW : F_SETLK);
  2796. if(!err)
  2797. {
  2798. struct fuse *f = req_fuse(req);
  2799. lock_t l;
  2800. flock_to_lock(lock,&l);
  2801. l.owner = fi->lock_owner;
  2802. pthread_mutex_lock(&f->lock);
  2803. locks_insert(get_node(f,ino),&l);
  2804. pthread_mutex_unlock(&f->lock);
  2805. }
  2806. fuse_reply_err(req,err);
  2807. }
  2808. static
  2809. void
  2810. fuse_lib_flock(fuse_req_t req,
  2811. uint64_t ino,
  2812. fuse_file_info_t *fi,
  2813. int op)
  2814. {
  2815. int err;
  2816. struct fuse *f = req_fuse_prepare(req);
  2817. err = f->fs->op.flock(fi,op);
  2818. fuse_reply_err(req,err);
  2819. }
  2820. static
  2821. void
  2822. fuse_lib_bmap(fuse_req_t req,
  2823. const struct fuse_in_header *hdr_)
  2824. {
  2825. int err;
  2826. char *path;
  2827. struct fuse *f;
  2828. uint64_t block;
  2829. const struct fuse_bmap_in *arg;
  2830. arg = (fuse_bmap_in*)fuse_hdr_arg(hdr_);
  2831. block = arg->block;
  2832. f = req_fuse_prepare(req);
  2833. err = get_path(f,hdr_->nodeid,&path);
  2834. if(!err)
  2835. {
  2836. err = f->fs->op.bmap(path,arg->blocksize,&block);
  2837. free_path(f,hdr_->nodeid,path);
  2838. }
  2839. if(!err)
  2840. fuse_reply_bmap(req,block);
  2841. else
  2842. fuse_reply_err(req,err);
  2843. }
  2844. static
  2845. void
  2846. fuse_lib_ioctl(fuse_req_t req,
  2847. const struct fuse_in_header *hdr_)
  2848. {
  2849. int err;
  2850. char *out_buf = NULL;
  2851. struct fuse *f = req_fuse_prepare(req);
  2852. fuse_file_info_t ffi;
  2853. fuse_file_info_t llffi = {0};
  2854. const void *in_buf;
  2855. uint32_t out_size;
  2856. const struct fuse_ioctl_in *arg;
  2857. arg = (fuse_ioctl_in*)fuse_hdr_arg(hdr_);
  2858. if((arg->flags & FUSE_IOCTL_DIR) && !(req->f->conn.want & FUSE_CAP_IOCTL_DIR))
  2859. {
  2860. fuse_reply_err(req,ENOTTY);
  2861. return;
  2862. }
  2863. if((sizeof(void*) == 4) &&
  2864. (req->f->conn.proto_minor >= 16) &&
  2865. !(arg->flags & FUSE_IOCTL_32BIT))
  2866. {
  2867. req->ioctl_64bit = 1;
  2868. }
  2869. llffi.fh = arg->fh;
  2870. out_size = arg->out_size;
  2871. in_buf = (arg->in_size ? PARAM(arg) : NULL);
  2872. err = -EPERM;
  2873. if(arg->flags & FUSE_IOCTL_UNRESTRICTED)
  2874. goto err;
  2875. if(arg->flags & FUSE_IOCTL_DIR)
  2876. get_dirhandle(&llffi,&ffi);
  2877. else
  2878. ffi = llffi;
  2879. if(out_size)
  2880. {
  2881. err = -ENOMEM;
  2882. out_buf = (char*)malloc(out_size);
  2883. if(!out_buf)
  2884. goto err;
  2885. }
  2886. assert(!arg->in_size || !out_size || arg->in_size == out_size);
  2887. if(out_buf)
  2888. memcpy(out_buf,in_buf,arg->in_size);
  2889. err = f->fs->op.ioctl(&ffi,
  2890. arg->cmd,
  2891. (void*)(uintptr_t)arg->arg,
  2892. arg->flags,
  2893. out_buf ?: (void *)in_buf,
  2894. &out_size);
  2895. if(err < 0)
  2896. goto err;
  2897. fuse_reply_ioctl(req,err,out_buf,out_size);
  2898. goto out;
  2899. err:
  2900. fuse_reply_err(req,err);
  2901. out:
  2902. free(out_buf);
  2903. }
  2904. static
  2905. void
  2906. fuse_lib_poll(fuse_req_t req,
  2907. const struct fuse_in_header *hdr_)
  2908. {
  2909. int err;
  2910. struct fuse *f = req_fuse_prepare(req);
  2911. unsigned revents = 0;
  2912. fuse_file_info_t ffi = {0};
  2913. fuse_pollhandle_t *ph = NULL;
  2914. const struct fuse_poll_in *arg;
  2915. arg = (fuse_poll_in*)fuse_hdr_arg(hdr_);
  2916. ffi.fh = arg->fh;
  2917. if(arg->flags & FUSE_POLL_SCHEDULE_NOTIFY)
  2918. {
  2919. ph = (fuse_pollhandle_t*)malloc(sizeof(fuse_pollhandle_t));
  2920. if(ph == NULL)
  2921. {
  2922. fuse_reply_err(req,ENOMEM);
  2923. return;
  2924. }
  2925. ph->kh = arg->kh;
  2926. ph->ch = req->ch;
  2927. ph->f = req->f;
  2928. }
  2929. err = f->fs->op.poll(&ffi,ph,&revents);
  2930. if(!err)
  2931. fuse_reply_poll(req,revents);
  2932. else
  2933. fuse_reply_err(req,err);
  2934. }
  2935. static
  2936. void
  2937. fuse_lib_fallocate(fuse_req_t req,
  2938. const struct fuse_in_header *hdr_)
  2939. {
  2940. int err;
  2941. struct fuse *f;
  2942. fuse_file_info_t ffi = {0};
  2943. const struct fuse_fallocate_in *arg;
  2944. arg = (fuse_fallocate_in*)fuse_hdr_arg(hdr_);
  2945. ffi.fh = arg->fh;
  2946. f = req_fuse_prepare(req);
  2947. err = f->fs->op.fallocate(&ffi,
  2948. arg->mode,
  2949. arg->offset,
  2950. arg->length);
  2951. fuse_reply_err(req,err);
  2952. }
  2953. static
  2954. int
  2955. remembered_node_cmp(const void *a_,
  2956. const void *b_)
  2957. {
  2958. const remembered_node_t *a = (const remembered_node_t*)a_;
  2959. const remembered_node_t *b = (const remembered_node_t*)b_;
  2960. return (a->time - b->time);
  2961. }
  2962. static
  2963. void
  2964. remembered_nodes_sort(struct fuse *f_)
  2965. {
  2966. pthread_mutex_lock(&f_->lock);
  2967. qsort(&kv_first(f_->remembered_nodes),
  2968. kv_size(f_->remembered_nodes),
  2969. sizeof(remembered_node_t),
  2970. remembered_node_cmp);
  2971. pthread_mutex_unlock(&f_->lock);
  2972. }
  2973. #define MAX_PRUNE 100
  2974. #define MAX_CHECK 1000
  2975. int
  2976. fuse_prune_some_remembered_nodes(struct fuse *f_,
  2977. int *offset_)
  2978. {
  2979. time_t now;
  2980. int pruned;
  2981. int checked;
  2982. pthread_mutex_lock(&f_->lock);
  2983. pruned = 0;
  2984. checked = 0;
  2985. now = current_time();
  2986. while(*offset_ < kv_size(f_->remembered_nodes))
  2987. {
  2988. time_t age;
  2989. remembered_node_t *fn = &kv_A(f_->remembered_nodes,*offset_);
  2990. if(pruned >= MAX_PRUNE)
  2991. break;
  2992. if(checked >= MAX_CHECK)
  2993. break;
  2994. checked++;
  2995. age = (now - fn->time);
  2996. if(f_->conf.remember > age)
  2997. break;
  2998. assert(fn->node->nlookup == 1);
  2999. /* Don't forget active directories */
  3000. if(fn->node->refctr > 1)
  3001. {
  3002. (*offset_)++;
  3003. continue;
  3004. }
  3005. fn->node->nlookup = 0;
  3006. unref_node(f_,fn->node);
  3007. kv_delete(f_->remembered_nodes,*offset_);
  3008. pruned++;
  3009. }
  3010. pthread_mutex_unlock(&f_->lock);
  3011. if((pruned < MAX_PRUNE) && (checked < MAX_CHECK))
  3012. *offset_ = -1;
  3013. return pruned;
  3014. }
  3015. #undef MAX_PRUNE
  3016. #undef MAX_CHECK
  3017. static
  3018. void
  3019. sleep_100ms(void)
  3020. {
  3021. const struct timespec ms100 = {0,100 * 1000000};
  3022. nanosleep(&ms100,NULL);
  3023. }
  3024. void
  3025. fuse_prune_remembered_nodes(struct fuse *f_)
  3026. {
  3027. int offset;
  3028. int pruned;
  3029. offset = 0;
  3030. pruned = 0;
  3031. for(;;)
  3032. {
  3033. pruned += fuse_prune_some_remembered_nodes(f_,&offset);
  3034. if(offset >= 0)
  3035. {
  3036. sleep_100ms();
  3037. continue;
  3038. }
  3039. break;
  3040. }
  3041. if(pruned > 0)
  3042. remembered_nodes_sort(f_);
  3043. }
  3044. static struct fuse_lowlevel_ops fuse_path_ops =
  3045. {
  3046. .access = fuse_lib_access,
  3047. .bmap = fuse_lib_bmap,
  3048. .copy_file_range = fuse_lib_copy_file_range,
  3049. .create = fuse_lib_create,
  3050. .destroy = fuse_lib_destroy,
  3051. .fallocate = fuse_lib_fallocate,
  3052. .flock = fuse_lib_flock,
  3053. .flush = fuse_lib_flush,
  3054. .forget = fuse_lib_forget,
  3055. .forget_multi = fuse_lib_forget_multi,
  3056. .fsync = fuse_lib_fsync,
  3057. .fsyncdir = fuse_lib_fsyncdir,
  3058. .getattr = fuse_lib_getattr,
  3059. .getlk = fuse_lib_getlk,
  3060. .getxattr = fuse_lib_getxattr,
  3061. .init = fuse_lib_init,
  3062. .ioctl = fuse_lib_ioctl,
  3063. .link = fuse_lib_link,
  3064. .listxattr = fuse_lib_listxattr,
  3065. .lookup = fuse_lib_lookup,
  3066. .mkdir = fuse_lib_mkdir,
  3067. .mknod = fuse_lib_mknod,
  3068. .open = fuse_lib_open,
  3069. .opendir = fuse_lib_opendir,
  3070. .poll = fuse_lib_poll,
  3071. .read = fuse_lib_read,
  3072. .readdir = fuse_lib_readdir,
  3073. .readdir_plus = fuse_lib_readdir_plus,
  3074. .readlink = fuse_lib_readlink,
  3075. .release = fuse_lib_release,
  3076. .releasedir = fuse_lib_releasedir,
  3077. .removemapping = fuse_lib_removemapping,
  3078. .removexattr = fuse_lib_removexattr,
  3079. .rename = fuse_lib_rename,
  3080. .retrieve_reply = NULL,
  3081. .rmdir = fuse_lib_rmdir,
  3082. .setattr = fuse_lib_setattr,
  3083. .setlk = fuse_lib_setlk,
  3084. .setupmapping = fuse_lib_setupmapping,
  3085. .setxattr = fuse_lib_setxattr,
  3086. .statfs = fuse_lib_statfs,
  3087. .symlink = fuse_lib_symlink,
  3088. .syncfs = fuse_lib_syncfs,
  3089. .tmpfile = fuse_lib_tmpfile,
  3090. .unlink = fuse_lib_unlink,
  3091. .write = fuse_lib_write,
  3092. };
  3093. int
  3094. fuse_notify_poll(fuse_pollhandle_t *ph)
  3095. {
  3096. return fuse_lowlevel_notify_poll(ph);
  3097. }
  3098. int
  3099. fuse_exited(struct fuse *f)
  3100. {
  3101. return fuse_session_exited(f->se);
  3102. }
  3103. struct fuse_session*
  3104. fuse_get_session(struct fuse *f)
  3105. {
  3106. return f->se;
  3107. }
  3108. void
  3109. fuse_exit(struct fuse *f)
  3110. {
  3111. f->se->exited = 1;
  3112. }
  3113. struct fuse_context*
  3114. fuse_get_context(void)
  3115. {
  3116. return &fuse_get_context_internal()->ctx;
  3117. }
  3118. enum {
  3119. KEY_HELP,
  3120. };
  3121. #define FUSE_LIB_OPT(t,p,v) { t,offsetof(struct fuse_config,p),v }
  3122. static const struct fuse_opt fuse_lib_opts[] =
  3123. {
  3124. FUSE_OPT_KEY("-h", KEY_HELP),
  3125. FUSE_OPT_KEY("--help", KEY_HELP),
  3126. FUSE_OPT_KEY("debug", FUSE_OPT_KEY_KEEP),
  3127. FUSE_OPT_KEY("-d", FUSE_OPT_KEY_KEEP),
  3128. FUSE_LIB_OPT("debug", debug,1),
  3129. FUSE_LIB_OPT("-d", debug,1),
  3130. FUSE_LIB_OPT("nogc", nogc,1),
  3131. FUSE_LIB_OPT("umask=", set_mode,1),
  3132. FUSE_LIB_OPT("umask=%o", umask,0),
  3133. FUSE_LIB_OPT("uid=", set_uid,1),
  3134. FUSE_LIB_OPT("uid=%d", uid,0),
  3135. FUSE_LIB_OPT("gid=", set_gid,1),
  3136. FUSE_LIB_OPT("gid=%d", gid,0),
  3137. FUSE_LIB_OPT("noforget", remember,-1),
  3138. FUSE_LIB_OPT("remember=%u", remember,0),
  3139. FUSE_OPT_END
  3140. };
  3141. static void fuse_lib_help(void)
  3142. {
  3143. fprintf(stderr,
  3144. " -o umask=M set file permissions (octal)\n"
  3145. " -o uid=N set file owner\n"
  3146. " -o gid=N set file group\n"
  3147. " -o noforget never forget cached inodes\n"
  3148. " -o remember=T remember cached inodes for T seconds (0s)\n"
  3149. " -o threads=NUM number of worker threads. 0 = autodetect.\n"
  3150. " Negative values autodetect then divide by\n"
  3151. " absolute value. default = 0\n"
  3152. "\n");
  3153. }
  3154. static
  3155. int
  3156. fuse_lib_opt_proc(void *data,
  3157. const char *arg,
  3158. int key,
  3159. struct fuse_args *outargs)
  3160. {
  3161. (void)arg; (void)outargs;
  3162. if(key == KEY_HELP)
  3163. {
  3164. struct fuse_config *conf = (struct fuse_config *)data;
  3165. fuse_lib_help();
  3166. conf->help = 1;
  3167. }
  3168. return 1;
  3169. }
  3170. int
  3171. fuse_is_lib_option(const char *opt)
  3172. {
  3173. return fuse_lowlevel_is_lib_option(opt) || fuse_opt_match(fuse_lib_opts,opt);
  3174. }
  3175. struct fuse_fs*
  3176. fuse_fs_new(const struct fuse_operations *op,
  3177. size_t op_size)
  3178. {
  3179. struct fuse_fs *fs;
  3180. if(sizeof(struct fuse_operations) < op_size)
  3181. {
  3182. fprintf(stderr,"fuse: warning: library too old,some operations may not not work\n");
  3183. op_size = sizeof(struct fuse_operations);
  3184. }
  3185. fs = (struct fuse_fs *)calloc(1,sizeof(struct fuse_fs));
  3186. if(!fs)
  3187. {
  3188. fprintf(stderr,"fuse: failed to allocate fuse_fs object\n");
  3189. return NULL;
  3190. }
  3191. if(op)
  3192. memcpy(&fs->op,op,op_size);
  3193. return fs;
  3194. }
  3195. static
  3196. int
  3197. node_table_init(struct node_table *t)
  3198. {
  3199. t->size = NODE_TABLE_MIN_SIZE;
  3200. t->array = (node_t **)calloc(1,sizeof(node_t *) * t->size);
  3201. if(t->array == NULL)
  3202. {
  3203. fprintf(stderr,"fuse: memory allocation failed\n");
  3204. return -1;
  3205. }
  3206. t->use = 0;
  3207. t->split = 0;
  3208. return 0;
  3209. }
  3210. static
  3211. struct fuse*
  3212. fuse_get_fuse_obj()
  3213. {
  3214. static struct fuse f = {0};
  3215. return &f;
  3216. }
  3217. static
  3218. void
  3219. metrics_log_nodes_info(struct fuse *f_,
  3220. FILE *file_)
  3221. {
  3222. char buf[1024];
  3223. char time_str[64];
  3224. struct tm tm;
  3225. struct timeval tv;
  3226. uint64_t sizeof_node;
  3227. float node_usage_ratio;
  3228. uint64_t node_slab_count;
  3229. uint64_t node_avail_objs;
  3230. uint64_t node_total_alloc_mem;
  3231. gettimeofday(&tv,NULL);
  3232. localtime_r(&tv.tv_sec,&tm);
  3233. strftime(time_str,sizeof(time_str),"%Y-%m-%dT%H:%M:%S.000%z",&tm);
  3234. sizeof_node = sizeof(node_t);
  3235. lfmp_t *lfmp;
  3236. lfmp = node_lfmp();
  3237. lfmp_lock(lfmp);
  3238. node_slab_count = fmp_slab_count(&lfmp->fmp);
  3239. node_usage_ratio = fmp_slab_usage_ratio(&lfmp->fmp);
  3240. node_avail_objs = fmp_avail_objs(&lfmp->fmp);
  3241. node_total_alloc_mem = fmp_total_allocated_memory(&lfmp->fmp);
  3242. lfmp_unlock(lfmp);
  3243. snprintf(buf,sizeof(buf),
  3244. "time: %s\n"
  3245. "sizeof(node): %" PRIu64 "\n"
  3246. "node id_table size: %" PRIu64 "\n"
  3247. "node id_table usage: %" PRIu64 "\n"
  3248. "node id_table total allocated memory: %" PRIu64 "\n"
  3249. "node name_table size: %" PRIu64 "\n"
  3250. "node name_table usage: %" PRIu64 "\n"
  3251. "node name_table total allocated memory: %" PRIu64 "\n"
  3252. "node memory pool slab count: %" PRIu64 "\n"
  3253. "node memory pool usage ratio: %f\n"
  3254. "node memory pool avail objs: %" PRIu64 "\n"
  3255. "node memory pool total allocated memory: %" PRIu64 "\n"
  3256. "msgbuf bufsize: %" PRIu64 "\n"
  3257. "msgbuf allocation count: %" PRIu64 "\n"
  3258. "msgbuf available count: %" PRIu64 "\n"
  3259. "msgbuf total allocated memory: %" PRIu64 "\n"
  3260. "\n"
  3261. ,
  3262. time_str,
  3263. sizeof_node,
  3264. (uint64_t)f_->id_table.size,
  3265. (uint64_t)f_->id_table.use,
  3266. (uint64_t)(f_->id_table.size * sizeof(node_t*)),
  3267. (uint64_t)f_->name_table.size,
  3268. (uint64_t)f_->name_table.use,
  3269. (uint64_t)(f_->name_table.size * sizeof(node_t*)),
  3270. node_slab_count,
  3271. node_usage_ratio,
  3272. node_avail_objs,
  3273. node_total_alloc_mem,
  3274. msgbuf_get_bufsize(),
  3275. msgbuf_alloc_count(),
  3276. msgbuf_avail_count(),
  3277. msgbuf_alloc_count() * msgbuf_get_bufsize()
  3278. );
  3279. fputs(buf,file_);
  3280. }
  3281. static
  3282. void
  3283. metrics_log_nodes_info_to_tmp_dir(struct fuse *f_)
  3284. {
  3285. int rv;
  3286. FILE *file;
  3287. char filepath[256];
  3288. struct stat st;
  3289. char const *mode = "a";
  3290. off_t const max_size = (1024 * 1024);
  3291. sprintf(filepath,"/tmp/mergerfs.%d.info",getpid());
  3292. rv = lstat(filepath,&st);
  3293. if((rv == 0) && (st.st_size > max_size))
  3294. mode = "w";
  3295. file = fopen(filepath,mode);
  3296. if(file == NULL)
  3297. return;
  3298. metrics_log_nodes_info(f_,file);
  3299. fclose(file);
  3300. }
  3301. static
  3302. void
  3303. fuse_malloc_trim(void)
  3304. {
  3305. #ifdef HAVE_MALLOC_TRIM
  3306. malloc_trim(1024 * 1024);
  3307. #endif
  3308. }
  3309. void
  3310. fuse_invalidate_all_nodes()
  3311. {
  3312. struct fuse *f = fuse_get_fuse_obj();
  3313. syslog(LOG_INFO,"invalidating file entries");
  3314. pthread_mutex_lock(&f->lock);
  3315. for(size_t i = 0; i < f->id_table.size; i++)
  3316. {
  3317. node_t *node;
  3318. for(node = f->id_table.array[i]; node != NULL; node = node->id_next)
  3319. {
  3320. if(node->nodeid == FUSE_ROOT_ID)
  3321. continue;
  3322. if(node->parent->nodeid != FUSE_ROOT_ID)
  3323. continue;
  3324. fuse_lowlevel_notify_inval_entry(f->se->ch,
  3325. node->parent->nodeid,
  3326. node->name,
  3327. strlen(node->name));
  3328. }
  3329. }
  3330. pthread_mutex_unlock(&f->lock);
  3331. }
  3332. void
  3333. fuse_gc()
  3334. {
  3335. syslog(LOG_INFO,"running thorough garbage collection");
  3336. node_gc();
  3337. msgbuf_gc();
  3338. fuse_malloc_trim();
  3339. }
  3340. void
  3341. fuse_gc1()
  3342. {
  3343. syslog(LOG_INFO,"running basic garbage collection");
  3344. node_gc1();
  3345. msgbuf_gc_10percent();
  3346. fuse_malloc_trim();
  3347. }
  3348. static
  3349. void*
  3350. fuse_maintenance_loop(void *fuse_)
  3351. {
  3352. int loops;
  3353. int sleep_time;
  3354. struct fuse *f = (struct fuse*)fuse_;
  3355. pthread_setname_np(pthread_self(),"fuse.maint");
  3356. loops = 0;
  3357. sleep_time = 60;
  3358. while(1)
  3359. {
  3360. if(remember_nodes(f))
  3361. fuse_prune_remembered_nodes(f);
  3362. if((loops % 15) == 0)
  3363. fuse_gc1();
  3364. if(g_LOG_METRICS)
  3365. metrics_log_nodes_info_to_tmp_dir(f);
  3366. loops++;
  3367. sleep(sleep_time);
  3368. }
  3369. return NULL;
  3370. }
  3371. int
  3372. fuse_start_maintenance_thread(struct fuse *f_)
  3373. {
  3374. return fuse_start_thread(&f_->maintenance_thread,fuse_maintenance_loop,f_);
  3375. }
  3376. void
  3377. fuse_stop_maintenance_thread(struct fuse *f_)
  3378. {
  3379. pthread_mutex_lock(&f_->lock);
  3380. pthread_cancel(f_->maintenance_thread);
  3381. pthread_mutex_unlock(&f_->lock);
  3382. pthread_join(f_->maintenance_thread,NULL);
  3383. }
  3384. struct fuse*
  3385. fuse_new_common(struct fuse_chan *ch,
  3386. struct fuse_args *args,
  3387. const struct fuse_operations *op,
  3388. size_t op_size)
  3389. {
  3390. struct fuse *f;
  3391. node_t *root;
  3392. struct fuse_fs *fs;
  3393. struct fuse_lowlevel_ops llop = fuse_path_ops;
  3394. if(fuse_create_context_key() == -1)
  3395. goto out;
  3396. f = fuse_get_fuse_obj();
  3397. if(f == NULL)
  3398. {
  3399. fprintf(stderr,"fuse: failed to allocate fuse object\n");
  3400. goto out_delete_context_key;
  3401. }
  3402. fs = fuse_fs_new(op,op_size);
  3403. if(!fs)
  3404. goto out_free;
  3405. f->fs = fs;
  3406. /* Oh f**k,this is ugly! */
  3407. if(!fs->op.lock)
  3408. {
  3409. llop.getlk = NULL;
  3410. llop.setlk = NULL;
  3411. }
  3412. if(fuse_opt_parse(args,&f->conf,fuse_lib_opts,fuse_lib_opt_proc) == -1)
  3413. goto out_free_fs;
  3414. g_LOG_METRICS = f->conf.debug;
  3415. f->se = fuse_lowlevel_new_common(args,&llop,sizeof(llop),f);
  3416. if(f->se == NULL)
  3417. goto out_free_fs;
  3418. fuse_session_add_chan(f->se,ch);
  3419. /* Trace topmost layer by default */
  3420. srand(time(NULL));
  3421. f->nodeid_gen.nodeid = FUSE_ROOT_ID;
  3422. f->nodeid_gen.generation = rand64();
  3423. if(node_table_init(&f->name_table) == -1)
  3424. goto out_free_session;
  3425. if(node_table_init(&f->id_table) == -1)
  3426. goto out_free_name_table;
  3427. fuse_mutex_init(&f->lock);
  3428. kv_init(f->remembered_nodes);
  3429. root = node_alloc();
  3430. if(root == NULL)
  3431. {
  3432. fprintf(stderr,"fuse: memory allocation failed\n");
  3433. goto out_free_id_table;
  3434. }
  3435. root->name = filename_strdup(f,"/");
  3436. root->parent = NULL;
  3437. root->nodeid = FUSE_ROOT_ID;
  3438. inc_nlookup(root);
  3439. hash_id(f,root);
  3440. return f;
  3441. out_free_id_table:
  3442. free(f->id_table.array);
  3443. out_free_name_table:
  3444. free(f->name_table.array);
  3445. out_free_session:
  3446. fuse_session_destroy(f->se);
  3447. out_free_fs:
  3448. /* Horrible compatibility hack to stop the destructor from being
  3449. called on the filesystem without init being called first */
  3450. fs->op.destroy = NULL;
  3451. free(f->fs);
  3452. out_free:
  3453. // free(f);
  3454. out_delete_context_key:
  3455. fuse_delete_context_key();
  3456. out:
  3457. return NULL;
  3458. }
  3459. struct fuse*
  3460. fuse_new(struct fuse_chan *ch,
  3461. struct fuse_args *args,
  3462. const struct fuse_operations *op,
  3463. size_t op_size)
  3464. {
  3465. return fuse_new_common(ch,args,op,op_size);
  3466. }
  3467. void
  3468. fuse_destroy(struct fuse *f)
  3469. {
  3470. size_t i;
  3471. if(f->fs)
  3472. {
  3473. struct fuse_context_i *c = fuse_get_context_internal();
  3474. memset(c,0,sizeof(*c));
  3475. c->ctx.fuse = f;
  3476. for(i = 0; i < f->id_table.size; i++)
  3477. {
  3478. node_t *node;
  3479. for(node = f->id_table.array[i]; node != NULL; node = node->id_next)
  3480. {
  3481. if(!node->hidden_fh)
  3482. continue;
  3483. f->fs->op.free_hide(node->hidden_fh);
  3484. node->hidden_fh = 0;
  3485. }
  3486. }
  3487. }
  3488. for(i = 0; i < f->id_table.size; i++)
  3489. {
  3490. node_t *node;
  3491. node_t *next;
  3492. for(node = f->id_table.array[i]; node != NULL; node = next)
  3493. {
  3494. next = node->id_next;
  3495. free_node(f,node);
  3496. f->id_table.use--;
  3497. }
  3498. }
  3499. free(f->id_table.array);
  3500. free(f->name_table.array);
  3501. pthread_mutex_destroy(&f->lock);
  3502. fuse_session_destroy(f->se);
  3503. kv_destroy(f->remembered_nodes);
  3504. fuse_delete_context_key();
  3505. }
  3506. void
  3507. fuse_log_metrics_set(int log_)
  3508. {
  3509. g_LOG_METRICS = log_;
  3510. }
  3511. int
  3512. fuse_log_metrics_get(void)
  3513. {
  3514. return g_LOG_METRICS;
  3515. }