You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4300 lines
86 KiB

  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. /* For pthread_rwlock_t */
  8. #define _GNU_SOURCE
  9. #include "config.h"
  10. #include "fuse_i.h"
  11. #include "fuse_lowlevel.h"
  12. #include "fuse_opt.h"
  13. #include "fuse_misc.h"
  14. #include "fuse_kernel.h"
  15. #include "fuse_dirents.h"
  16. #include <assert.h>
  17. #include <dlfcn.h>
  18. #include <errno.h>
  19. #include <fcntl.h>
  20. #include <limits.h>
  21. #include <poll.h>
  22. #include <signal.h>
  23. #include <stdbool.h>
  24. #include <stddef.h>
  25. #include <stdint.h>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <sys/file.h>
  30. #include <sys/mman.h>
  31. #include <sys/param.h>
  32. #include <sys/time.h>
  33. #include <sys/uio.h>
  34. #include <time.h>
  35. #include <unistd.h>
  36. #define FUSE_NODE_SLAB 1
  37. #ifndef MAP_ANONYMOUS
  38. #undef FUSE_NODE_SLAB
  39. #endif
  40. #define FUSE_UNKNOWN_INO UINT64_MAX
  41. #define OFFSET_MAX 0x7fffffffffffffffLL
  42. #define NODE_TABLE_MIN_SIZE 8192
  43. struct fuse_config
  44. {
  45. unsigned int uid;
  46. unsigned int gid;
  47. unsigned int umask;
  48. int remember;
  49. int debug;
  50. int use_ino;
  51. int set_mode;
  52. int set_uid;
  53. int set_gid;
  54. int help;
  55. int threads;
  56. };
  57. struct fuse_fs
  58. {
  59. struct fuse_operations op;
  60. };
  61. struct lock_queue_element
  62. {
  63. struct lock_queue_element *next;
  64. pthread_cond_t cond;
  65. fuse_ino_t nodeid1;
  66. const char *name1;
  67. char **path1;
  68. struct node **wnode1;
  69. fuse_ino_t nodeid2;
  70. const char *name2;
  71. char **path2;
  72. struct node **wnode2;
  73. int err;
  74. bool first_locked : 1;
  75. bool second_locked : 1;
  76. bool done : 1;
  77. };
  78. struct node_table
  79. {
  80. struct node **array;
  81. size_t use;
  82. size_t size;
  83. size_t split;
  84. };
  85. #define container_of(ptr,type,member) ({ \
  86. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  87. (type *)( (char *)__mptr - offsetof(type,member) );})
  88. #define list_entry(ptr,type,member) \
  89. container_of(ptr,type,member)
  90. struct list_head
  91. {
  92. struct list_head *next;
  93. struct list_head *prev;
  94. };
  95. struct node_slab
  96. {
  97. struct list_head list; /* must be the first member */
  98. struct list_head freelist;
  99. int used;
  100. };
  101. struct fuse
  102. {
  103. struct fuse_session *se;
  104. struct node_table name_table;
  105. struct node_table id_table;
  106. struct list_head lru_table;
  107. fuse_ino_t ctr;
  108. uint64_t generation;
  109. unsigned int hidectr;
  110. pthread_mutex_t lock;
  111. struct fuse_config conf;
  112. struct fuse_fs *fs;
  113. struct lock_queue_element *lockq;
  114. int pagesize;
  115. struct list_head partial_slabs;
  116. struct list_head full_slabs;
  117. pthread_t prune_thread;
  118. };
  119. struct lock
  120. {
  121. int type;
  122. off_t start;
  123. off_t end;
  124. pid_t pid;
  125. uint64_t owner;
  126. struct lock *next;
  127. };
  128. struct node
  129. {
  130. struct node *name_next;
  131. struct node *id_next;
  132. fuse_ino_t nodeid;
  133. uint64_t generation;
  134. int refctr;
  135. struct node *parent;
  136. char *name;
  137. uint64_t nlookup;
  138. int open_count;
  139. struct lock *locks;
  140. uint64_t hidden_fh;
  141. char is_hidden;
  142. int treelock;
  143. ino_t ino;
  144. off_t size;
  145. struct timespec mtim;
  146. char stat_cache_valid;
  147. char inline_name[32];
  148. };
  149. #define TREELOCK_WRITE -1
  150. #define TREELOCK_WAIT_OFFSET INT_MIN
  151. struct node_lru
  152. {
  153. struct node node;
  154. struct list_head lru;
  155. struct timespec forget_time;
  156. };
  157. struct fuse_dh
  158. {
  159. pthread_mutex_t lock;
  160. uint64_t fh;
  161. fuse_dirents_t d;
  162. };
  163. struct fuse_context_i
  164. {
  165. struct fuse_context ctx;
  166. fuse_req_t req;
  167. };
  168. static pthread_key_t fuse_context_key;
  169. static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
  170. static int fuse_context_ref;
  171. static
  172. void
  173. init_list_head(struct list_head *list)
  174. {
  175. list->next = list;
  176. list->prev = list;
  177. }
  178. static
  179. int
  180. list_empty(const struct list_head *head)
  181. {
  182. return head->next == head;
  183. }
  184. static
  185. void
  186. list_add(struct list_head *new,
  187. struct list_head *prev,
  188. struct list_head *next)
  189. {
  190. next->prev = new;
  191. new->next = next;
  192. new->prev = prev;
  193. prev->next = new;
  194. }
  195. static
  196. inline
  197. void
  198. list_add_head(struct list_head *new,
  199. struct list_head *head)
  200. {
  201. list_add(new,head,head->next);
  202. }
  203. static
  204. inline
  205. void
  206. list_add_tail(struct list_head *new,
  207. struct list_head *head)
  208. {
  209. list_add(new,head->prev,head);
  210. }
  211. static
  212. inline
  213. void
  214. list_del(struct list_head *entry)
  215. {
  216. struct list_head *prev = entry->prev;
  217. struct list_head *next = entry->next;
  218. next->prev = prev;
  219. prev->next = next;
  220. }
  221. static
  222. inline
  223. int
  224. lru_enabled(struct fuse *f)
  225. {
  226. return f->conf.remember > 0;
  227. }
  228. static
  229. struct
  230. node_lru*
  231. node_lru(struct node *node)
  232. {
  233. return (struct node_lru*)node;
  234. }
  235. static
  236. size_t
  237. get_node_size(struct fuse *f)
  238. {
  239. if(lru_enabled(f))
  240. return sizeof(struct node_lru);
  241. else
  242. return sizeof(struct node);
  243. }
  244. #ifdef FUSE_NODE_SLAB
  245. static
  246. struct node_slab*
  247. list_to_slab(struct list_head *head)
  248. {
  249. return (struct node_slab *)head;
  250. }
  251. static
  252. struct node_slab*
  253. node_to_slab(struct fuse *f,
  254. struct node *node)
  255. {
  256. return (struct node_slab *)(((uintptr_t)node) & ~((uintptr_t)f->pagesize - 1));
  257. }
  258. static
  259. int
  260. alloc_slab(struct fuse *f)
  261. {
  262. void *mem;
  263. struct node_slab *slab;
  264. char *start;
  265. size_t num;
  266. size_t i;
  267. size_t node_size = get_node_size(f);
  268. mem = mmap(NULL,f->pagesize,PROT_READ | PROT_WRITE,
  269. MAP_PRIVATE | MAP_ANONYMOUS,-1,0);
  270. if(mem == MAP_FAILED)
  271. return -1;
  272. slab = mem;
  273. init_list_head(&slab->freelist);
  274. slab->used = 0;
  275. num = (f->pagesize - sizeof(struct node_slab)) / node_size;
  276. start = (char *)mem + f->pagesize - num * node_size;
  277. for(i = 0; i < num; i++)
  278. {
  279. struct list_head *n;
  280. n = (struct list_head *)(start + i * node_size);
  281. list_add_tail(n,&slab->freelist);
  282. }
  283. list_add_tail(&slab->list,&f->partial_slabs);
  284. return 0;
  285. }
  286. static
  287. struct node*
  288. alloc_node(struct fuse *f)
  289. {
  290. struct node_slab *slab;
  291. struct list_head *node;
  292. if(list_empty(&f->partial_slabs))
  293. {
  294. int res = alloc_slab(f);
  295. if(res != 0)
  296. return NULL;
  297. }
  298. slab = list_to_slab(f->partial_slabs.next);
  299. slab->used++;
  300. node = slab->freelist.next;
  301. list_del(node);
  302. if(list_empty(&slab->freelist))
  303. {
  304. list_del(&slab->list);
  305. list_add_tail(&slab->list,&f->full_slabs);
  306. }
  307. memset(node,0,sizeof(struct node));
  308. return (struct node *)node;
  309. }
  310. static
  311. void
  312. free_slab(struct fuse *f,
  313. struct node_slab *slab)
  314. {
  315. int res;
  316. list_del(&slab->list);
  317. res = munmap(slab,f->pagesize);
  318. if(res == -1)
  319. fprintf(stderr,"fuse warning: munmap(%p) failed\n",slab);
  320. }
  321. static
  322. void
  323. free_node_mem(struct fuse *f,
  324. struct node *node)
  325. {
  326. struct node_slab *slab = node_to_slab(f,node);
  327. struct list_head *n = (struct list_head *)node;
  328. slab->used--;
  329. if(slab->used)
  330. {
  331. if(list_empty(&slab->freelist))
  332. {
  333. list_del(&slab->list);
  334. list_add_tail(&slab->list,&f->partial_slabs);
  335. }
  336. list_add_head(n,&slab->freelist);
  337. }
  338. else
  339. {
  340. free_slab(f,slab);
  341. }
  342. }
  343. #else
  344. static
  345. struct node*
  346. alloc_node(struct fuse *f)
  347. {
  348. return (struct node *)calloc(1,get_node_size(f));
  349. }
  350. static
  351. void
  352. free_node_mem(struct fuse *f,
  353. struct node *node)
  354. {
  355. (void)f;
  356. free(node);
  357. }
  358. #endif
  359. static
  360. size_t
  361. id_hash(struct fuse *f,
  362. fuse_ino_t ino)
  363. {
  364. uint64_t hash = ((uint32_t)ino * 2654435761U) % f->id_table.size;
  365. uint64_t oldhash = hash % (f->id_table.size / 2);
  366. if(oldhash >= f->id_table.split)
  367. return oldhash;
  368. else
  369. return hash;
  370. }
  371. static
  372. struct node*
  373. get_node_nocheck(struct fuse *f,
  374. fuse_ino_t nodeid)
  375. {
  376. size_t hash = id_hash(f,nodeid);
  377. struct node *node;
  378. for(node = f->id_table.array[hash]; node != NULL; node = node->id_next)
  379. if(node->nodeid == nodeid)
  380. return node;
  381. return NULL;
  382. }
  383. static
  384. struct node*
  385. get_node(struct fuse *f,
  386. const fuse_ino_t nodeid)
  387. {
  388. struct node *node = get_node_nocheck(f,nodeid);
  389. if(!node)
  390. {
  391. fprintf(stderr,"fuse internal error: node %llu not found\n",
  392. (unsigned long long)nodeid);
  393. abort();
  394. }
  395. return node;
  396. }
  397. static void curr_time(struct timespec *now);
  398. static double diff_timespec(const struct timespec *t1,
  399. const struct timespec *t2);
  400. static
  401. void
  402. remove_node_lru(struct node *node)
  403. {
  404. struct node_lru *lnode = node_lru(node);
  405. list_del(&lnode->lru);
  406. init_list_head(&lnode->lru);
  407. }
  408. static
  409. void
  410. set_forget_time(struct fuse *f,
  411. struct node *node)
  412. {
  413. struct node_lru *lnode = node_lru(node);
  414. list_del(&lnode->lru);
  415. list_add_tail(&lnode->lru,&f->lru_table);
  416. curr_time(&lnode->forget_time);
  417. }
  418. static
  419. void
  420. free_node(struct fuse *f_,
  421. struct node *node_)
  422. {
  423. if(node_->name != node_->inline_name)
  424. free(node_->name);
  425. if(node_->is_hidden)
  426. fuse_fs_free_hide(f_->fs,node_->hidden_fh);
  427. free_node_mem(f_,node_);
  428. }
  429. static
  430. void
  431. node_table_reduce(struct node_table *t)
  432. {
  433. size_t newsize = t->size / 2;
  434. void *newarray;
  435. if(newsize < NODE_TABLE_MIN_SIZE)
  436. return;
  437. newarray = realloc(t->array,sizeof(struct node *)* newsize);
  438. if(newarray != NULL)
  439. t->array = newarray;
  440. t->size = newsize;
  441. t->split = t->size / 2;
  442. }
  443. static
  444. void
  445. remerge_id(struct fuse *f)
  446. {
  447. struct node_table *t = &f->id_table;
  448. int iter;
  449. if(t->split == 0)
  450. node_table_reduce(t);
  451. for(iter = 8; t->split > 0 && iter; iter--)
  452. {
  453. struct node **upper;
  454. t->split--;
  455. upper = &t->array[t->split + t->size / 2];
  456. if(*upper)
  457. {
  458. struct node **nodep;
  459. for(nodep = &t->array[t->split]; *nodep;
  460. nodep = &(*nodep)->id_next);
  461. *nodep = *upper;
  462. *upper = NULL;
  463. break;
  464. }
  465. }
  466. }
  467. static
  468. void
  469. unhash_id(struct fuse *f,
  470. struct node *node)
  471. {
  472. struct node **nodep = &f->id_table.array[id_hash(f,node->nodeid)];
  473. for(; *nodep != NULL; nodep = &(*nodep)->id_next)
  474. if(*nodep == node)
  475. {
  476. *nodep = node->id_next;
  477. f->id_table.use--;
  478. if(f->id_table.use < f->id_table.size / 4)
  479. remerge_id(f);
  480. return;
  481. }
  482. }
  483. static
  484. int
  485. node_table_resize(struct node_table *t)
  486. {
  487. size_t newsize = t->size * 2;
  488. void *newarray;
  489. newarray = realloc(t->array,sizeof(struct node *)* newsize);
  490. if(newarray == NULL)
  491. return -1;
  492. t->array = newarray;
  493. memset(t->array + t->size,0,t->size * sizeof(struct node *));
  494. t->size = newsize;
  495. t->split = 0;
  496. return 0;
  497. }
  498. static
  499. void
  500. rehash_id(struct fuse *f)
  501. {
  502. struct node_table *t = &f->id_table;
  503. struct node **nodep;
  504. struct node **next;
  505. size_t hash;
  506. if(t->split == t->size / 2)
  507. return;
  508. hash = t->split;
  509. t->split++;
  510. for(nodep = &t->array[hash]; *nodep != NULL; nodep = next)
  511. {
  512. struct node *node = *nodep;
  513. size_t newhash = id_hash(f,node->nodeid);
  514. if(newhash != hash)
  515. {
  516. next = nodep;
  517. *nodep = node->id_next;
  518. node->id_next = t->array[newhash];
  519. t->array[newhash] = node;
  520. }
  521. else
  522. {
  523. next = &node->id_next;
  524. }
  525. }
  526. if(t->split == t->size / 2)
  527. node_table_resize(t);
  528. }
  529. static
  530. void
  531. hash_id(struct fuse *f,
  532. struct node *node)
  533. {
  534. size_t hash;
  535. hash = id_hash(f,node->nodeid);
  536. node->id_next = f->id_table.array[hash];
  537. f->id_table.array[hash] = node;
  538. f->id_table.use++;
  539. if(f->id_table.use >= f->id_table.size / 2)
  540. rehash_id(f);
  541. }
  542. static
  543. size_t
  544. name_hash(struct fuse *f,
  545. fuse_ino_t parent,
  546. const char *name)
  547. {
  548. uint64_t hash = parent;
  549. uint64_t oldhash;
  550. for(; *name; name++)
  551. hash = hash * 31 + (unsigned char)*name;
  552. hash %= f->name_table.size;
  553. oldhash = hash % (f->name_table.size / 2);
  554. if(oldhash >= f->name_table.split)
  555. return oldhash;
  556. else
  557. return hash;
  558. }
  559. static
  560. void
  561. unref_node(struct fuse *f,
  562. struct node *node);
  563. static
  564. void
  565. remerge_name(struct fuse *f)
  566. {
  567. int iter;
  568. struct node_table *t = &f->name_table;
  569. if(t->split == 0)
  570. node_table_reduce(t);
  571. for(iter = 8; t->split > 0 && iter; iter--)
  572. {
  573. struct node **upper;
  574. t->split--;
  575. upper = &t->array[t->split + t->size / 2];
  576. if(*upper)
  577. {
  578. struct node **nodep;
  579. for(nodep = &t->array[t->split]; *nodep; nodep = &(*nodep)->name_next);
  580. *nodep = *upper;
  581. *upper = NULL;
  582. break;
  583. }
  584. }
  585. }
  586. static
  587. void
  588. unhash_name(struct fuse *f,
  589. struct node *node)
  590. {
  591. if(node->name)
  592. {
  593. size_t hash = name_hash(f,node->parent->nodeid,node->name);
  594. struct node **nodep = &f->name_table.array[hash];
  595. for(; *nodep != NULL; nodep = &(*nodep)->name_next)
  596. if(*nodep == node)
  597. {
  598. *nodep = node->name_next;
  599. node->name_next = NULL;
  600. unref_node(f,node->parent);
  601. if(node->name != node->inline_name)
  602. free(node->name);
  603. node->name = NULL;
  604. node->parent = NULL;
  605. f->name_table.use--;
  606. if(f->name_table.use < f->name_table.size / 4)
  607. remerge_name(f);
  608. return;
  609. }
  610. fprintf(stderr,
  611. "fuse internal error: unable to unhash node: %llu\n",
  612. (unsigned long long)node->nodeid);
  613. abort();
  614. }
  615. }
  616. static
  617. void
  618. rehash_name(struct fuse *f)
  619. {
  620. struct node_table *t = &f->name_table;
  621. struct node **nodep;
  622. struct node **next;
  623. size_t hash;
  624. if(t->split == t->size / 2)
  625. return;
  626. hash = t->split;
  627. t->split++;
  628. for(nodep = &t->array[hash]; *nodep != NULL; nodep = next)
  629. {
  630. struct node *node = *nodep;
  631. size_t newhash = name_hash(f,node->parent->nodeid,node->name);
  632. if(newhash != hash)
  633. {
  634. next = nodep;
  635. *nodep = node->name_next;
  636. node->name_next = t->array[newhash];
  637. t->array[newhash] = node;
  638. }
  639. else
  640. {
  641. next = &node->name_next;
  642. }
  643. }
  644. if(t->split == t->size / 2)
  645. node_table_resize(t);
  646. }
  647. static
  648. int
  649. hash_name(struct fuse *f,
  650. struct node *node,
  651. fuse_ino_t parentid,
  652. const char *name)
  653. {
  654. size_t hash = name_hash(f,parentid,name);
  655. struct node *parent = get_node(f,parentid);
  656. if(strlen(name) < sizeof(node->inline_name))
  657. {
  658. strcpy(node->inline_name,name);
  659. node->name = node->inline_name;
  660. }
  661. else
  662. {
  663. node->name = strdup(name);
  664. if(node->name == NULL)
  665. return -1;
  666. }
  667. parent->refctr ++;
  668. node->parent = parent;
  669. node->name_next = f->name_table.array[hash];
  670. f->name_table.array[hash] = node;
  671. f->name_table.use++;
  672. if(f->name_table.use >= f->name_table.size / 2)
  673. rehash_name(f);
  674. return 0;
  675. }
  676. static
  677. void
  678. delete_node(struct fuse *f,
  679. struct node *node)
  680. {
  681. assert(node->treelock == 0);
  682. unhash_name(f,node);
  683. if(lru_enabled(f))
  684. remove_node_lru(node);
  685. unhash_id(f,node);
  686. free_node(f,node);
  687. }
  688. static
  689. void
  690. unref_node(struct fuse *f,
  691. struct node *node)
  692. {
  693. assert(node->refctr > 0);
  694. node->refctr--;
  695. if(!node->refctr)
  696. delete_node(f,node);
  697. }
  698. static
  699. uint64_t
  700. rand64(void)
  701. {
  702. uint64_t rv;
  703. rv = rand();
  704. rv <<= 32;
  705. rv |= rand();
  706. return rv;
  707. }
  708. static
  709. fuse_ino_t
  710. next_id(struct fuse *f)
  711. {
  712. do
  713. {
  714. f->ctr = ((f->ctr + 1) & UINT64_MAX);
  715. if(f->ctr == 0)
  716. f->generation++;
  717. } while((f->ctr == 0) ||
  718. (f->ctr == FUSE_UNKNOWN_INO) ||
  719. (get_node_nocheck(f,f->ctr) != NULL));
  720. return f->ctr;
  721. }
  722. static
  723. struct node*
  724. lookup_node(struct fuse *f,
  725. fuse_ino_t parent,
  726. const char *name)
  727. {
  728. size_t hash;
  729. struct node *node;
  730. hash = name_hash(f,parent,name);
  731. for(node = f->name_table.array[hash]; node != NULL; node = node->name_next)
  732. if(node->parent->nodeid == parent && strcmp(node->name,name) == 0)
  733. return node;
  734. return NULL;
  735. }
  736. static
  737. void
  738. inc_nlookup(struct node *node)
  739. {
  740. if(!node->nlookup)
  741. node->refctr++;
  742. node->nlookup++;
  743. }
  744. static
  745. struct node*
  746. find_node(struct fuse *f,
  747. fuse_ino_t parent,
  748. const char *name)
  749. {
  750. struct node *node;
  751. pthread_mutex_lock(&f->lock);
  752. if(!name)
  753. node = get_node(f,parent);
  754. else
  755. node = lookup_node(f,parent,name);
  756. if(node == NULL)
  757. {
  758. node = alloc_node(f);
  759. if(node == NULL)
  760. goto out_err;
  761. node->nodeid = next_id(f);
  762. node->generation = f->generation;
  763. if(f->conf.remember)
  764. inc_nlookup(node);
  765. if(hash_name(f,node,parent,name) == -1)
  766. {
  767. free_node(f,node);
  768. node = NULL;
  769. goto out_err;
  770. }
  771. hash_id(f,node);
  772. if(lru_enabled(f))
  773. {
  774. struct node_lru *lnode = node_lru(node);
  775. init_list_head(&lnode->lru);
  776. }
  777. }
  778. else if(lru_enabled(f) && node->nlookup == 1)
  779. {
  780. remove_node_lru(node);
  781. }
  782. inc_nlookup(node);
  783. out_err:
  784. pthread_mutex_unlock(&f->lock);
  785. return node;
  786. }
  787. static
  788. char*
  789. add_name(char **buf,
  790. unsigned *bufsize,
  791. char *s,
  792. const char *name)
  793. {
  794. size_t len = strlen(name);
  795. if(s - len <= *buf)
  796. {
  797. unsigned pathlen = *bufsize - (s - *buf);
  798. unsigned newbufsize = *bufsize;
  799. char *newbuf;
  800. while(newbufsize < pathlen + len + 1)
  801. {
  802. if(newbufsize >= 0x80000000)
  803. newbufsize = 0xffffffff;
  804. else
  805. newbufsize *= 2;
  806. }
  807. newbuf = realloc(*buf,newbufsize);
  808. if(newbuf == NULL)
  809. return NULL;
  810. *buf = newbuf;
  811. s = newbuf + newbufsize - pathlen;
  812. memmove(s,newbuf + *bufsize - pathlen,pathlen);
  813. *bufsize = newbufsize;
  814. }
  815. s -= len;
  816. strncpy(s,name,len);
  817. s--;
  818. *s = '/';
  819. return s;
  820. }
  821. static
  822. void
  823. unlock_path(struct fuse *f,
  824. fuse_ino_t nodeid,
  825. struct node *wnode,
  826. struct node *end)
  827. {
  828. struct node *node;
  829. if(wnode)
  830. {
  831. assert(wnode->treelock == TREELOCK_WRITE);
  832. wnode->treelock = 0;
  833. }
  834. for(node = get_node(f,nodeid); node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent)
  835. {
  836. assert(node->treelock != 0);
  837. assert(node->treelock != TREELOCK_WAIT_OFFSET);
  838. assert(node->treelock != TREELOCK_WRITE);
  839. node->treelock--;
  840. if(node->treelock == TREELOCK_WAIT_OFFSET)
  841. node->treelock = 0;
  842. }
  843. }
  844. static
  845. int
  846. try_get_path(struct fuse *f,
  847. fuse_ino_t nodeid,
  848. const char *name,
  849. char **path,
  850. struct node **wnodep,
  851. bool need_lock)
  852. {
  853. unsigned bufsize = 256;
  854. char *buf;
  855. char *s;
  856. struct node *node;
  857. struct node *wnode = NULL;
  858. int err;
  859. *path = NULL;
  860. err = -ENOMEM;
  861. buf = malloc(bufsize);
  862. if(buf == NULL)
  863. goto out_err;
  864. s = buf + bufsize - 1;
  865. *s = '\0';
  866. if(name != NULL)
  867. {
  868. s = add_name(&buf,&bufsize,s,name);
  869. err = -ENOMEM;
  870. if(s == NULL)
  871. goto out_free;
  872. }
  873. if(wnodep)
  874. {
  875. assert(need_lock);
  876. wnode = lookup_node(f,nodeid,name);
  877. if(wnode)
  878. {
  879. if(wnode->treelock != 0)
  880. {
  881. if(wnode->treelock > 0)
  882. wnode->treelock += TREELOCK_WAIT_OFFSET;
  883. err = -EAGAIN;
  884. goto out_free;
  885. }
  886. wnode->treelock = TREELOCK_WRITE;
  887. }
  888. }
  889. for(node = get_node(f,nodeid); node->nodeid != FUSE_ROOT_ID; node = node->parent)
  890. {
  891. err = -ENOENT;
  892. if(node->name == NULL || node->parent == NULL)
  893. goto out_unlock;
  894. err = -ENOMEM;
  895. s = add_name(&buf,&bufsize,s,node->name);
  896. if(s == NULL)
  897. goto out_unlock;
  898. if(need_lock)
  899. {
  900. err = -EAGAIN;
  901. if(node->treelock < 0)
  902. goto out_unlock;
  903. node->treelock++;
  904. }
  905. }
  906. if(s[0])
  907. memmove(buf,s,bufsize - (s - buf));
  908. else
  909. strcpy(buf,"/");
  910. *path = buf;
  911. if(wnodep)
  912. *wnodep = wnode;
  913. return 0;
  914. out_unlock:
  915. if(need_lock)
  916. unlock_path(f,nodeid,wnode,node);
  917. out_free:
  918. free(buf);
  919. out_err:
  920. return err;
  921. }
  922. static
  923. void
  924. queue_element_unlock(struct fuse *f,
  925. struct lock_queue_element *qe)
  926. {
  927. struct node *wnode;
  928. if(qe->first_locked)
  929. {
  930. wnode = qe->wnode1 ? *qe->wnode1 : NULL;
  931. unlock_path(f,qe->nodeid1,wnode,NULL);
  932. qe->first_locked = false;
  933. }
  934. if(qe->second_locked)
  935. {
  936. wnode = qe->wnode2 ? *qe->wnode2 : NULL;
  937. unlock_path(f,qe->nodeid2,wnode,NULL);
  938. qe->second_locked = false;
  939. }
  940. }
  941. static
  942. void
  943. queue_element_wakeup(struct fuse *f,
  944. struct lock_queue_element *qe)
  945. {
  946. int err;
  947. bool first = (qe == f->lockq);
  948. if(!qe->path1)
  949. {
  950. /* Just waiting for it to be unlocked */
  951. if(get_node(f,qe->nodeid1)->treelock == 0)
  952. pthread_cond_signal(&qe->cond);
  953. return;
  954. }
  955. if(!qe->first_locked)
  956. {
  957. err = try_get_path(f,qe->nodeid1,qe->name1,qe->path1,qe->wnode1,true);
  958. if(!err)
  959. qe->first_locked = true;
  960. else if(err != -EAGAIN)
  961. goto err_unlock;
  962. }
  963. if(!qe->second_locked && qe->path2)
  964. {
  965. err = try_get_path(f,qe->nodeid2,qe->name2,qe->path2,qe->wnode2,true);
  966. if(!err)
  967. qe->second_locked = true;
  968. else if(err != -EAGAIN)
  969. goto err_unlock;
  970. }
  971. if(qe->first_locked && (qe->second_locked || !qe->path2))
  972. {
  973. err = 0;
  974. goto done;
  975. }
  976. /*
  977. * Only let the first element be partially locked otherwise there could
  978. * be a deadlock.
  979. *
  980. * But do allow the first element to be partially locked to prevent
  981. * starvation.
  982. */
  983. if(!first)
  984. queue_element_unlock(f,qe);
  985. /* keep trying */
  986. return;
  987. err_unlock:
  988. queue_element_unlock(f,qe);
  989. done:
  990. qe->err = err;
  991. qe->done = true;
  992. pthread_cond_signal(&qe->cond);
  993. }
  994. static
  995. void
  996. wake_up_queued(struct fuse *f)
  997. {
  998. struct lock_queue_element *qe;
  999. for(qe = f->lockq; qe != NULL; qe = qe->next)
  1000. queue_element_wakeup(f,qe);
  1001. }
  1002. static
  1003. void
  1004. queue_path(struct fuse *f,
  1005. struct lock_queue_element *qe)
  1006. {
  1007. struct lock_queue_element **qp;
  1008. qe->done = false;
  1009. qe->first_locked = false;
  1010. qe->second_locked = false;
  1011. pthread_cond_init(&qe->cond,NULL);
  1012. qe->next = NULL;
  1013. for(qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
  1014. *qp = qe;
  1015. }
  1016. static
  1017. void
  1018. dequeue_path(struct fuse *f,
  1019. struct lock_queue_element *qe)
  1020. {
  1021. struct lock_queue_element **qp;
  1022. pthread_cond_destroy(&qe->cond);
  1023. for(qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
  1024. *qp = qe->next;
  1025. }
  1026. static
  1027. int
  1028. wait_path(struct fuse *f,
  1029. struct lock_queue_element *qe)
  1030. {
  1031. queue_path(f,qe);
  1032. do
  1033. {
  1034. pthread_cond_wait(&qe->cond,&f->lock);
  1035. } while(!qe->done);
  1036. dequeue_path(f,qe);
  1037. return qe->err;
  1038. }
  1039. static
  1040. int
  1041. get_path_common(struct fuse *f,
  1042. fuse_ino_t nodeid,
  1043. const char *name,
  1044. char **path,
  1045. struct node **wnode)
  1046. {
  1047. int err;
  1048. pthread_mutex_lock(&f->lock);
  1049. err = try_get_path(f,nodeid,name,path,wnode,true);
  1050. if(err == -EAGAIN)
  1051. {
  1052. struct lock_queue_element qe = {0};
  1053. qe.nodeid1 = nodeid;
  1054. qe.name1 = name;
  1055. qe.path1 = path;
  1056. qe.wnode1 = wnode;
  1057. err = wait_path(f,&qe);
  1058. }
  1059. pthread_mutex_unlock(&f->lock);
  1060. return err;
  1061. }
  1062. static
  1063. int
  1064. get_path(struct fuse *f,
  1065. fuse_ino_t nodeid,
  1066. char **path)
  1067. {
  1068. return get_path_common(f,nodeid,NULL,path,NULL);
  1069. }
  1070. static
  1071. int
  1072. get_path_name(struct fuse *f,
  1073. fuse_ino_t nodeid,
  1074. const char *name,
  1075. char **path)
  1076. {
  1077. return get_path_common(f,nodeid,name,path,NULL);
  1078. }
  1079. static
  1080. int
  1081. get_path_wrlock(struct fuse *f,
  1082. fuse_ino_t nodeid,
  1083. const char *name,
  1084. char **path,
  1085. struct node **wnode)
  1086. {
  1087. return get_path_common(f,nodeid,name,path,wnode);
  1088. }
  1089. static
  1090. int
  1091. try_get_path2(struct fuse *f,
  1092. fuse_ino_t nodeid1,
  1093. const char *name1,
  1094. fuse_ino_t nodeid2,
  1095. const char *name2,
  1096. char **path1,
  1097. char **path2,
  1098. struct node **wnode1,
  1099. struct node **wnode2)
  1100. {
  1101. int err;
  1102. /* FIXME: locking two paths needs deadlock checking */
  1103. err = try_get_path(f,nodeid1,name1,path1,wnode1,true);
  1104. if(!err)
  1105. {
  1106. err = try_get_path(f,nodeid2,name2,path2,wnode2,true);
  1107. if(err)
  1108. {
  1109. struct node *wn1 = wnode1 ? *wnode1 : NULL;
  1110. unlock_path(f,nodeid1,wn1,NULL);
  1111. free(*path1);
  1112. }
  1113. }
  1114. return err;
  1115. }
  1116. static
  1117. int
  1118. get_path2(struct fuse *f,
  1119. fuse_ino_t nodeid1,
  1120. const char *name1,
  1121. fuse_ino_t nodeid2,
  1122. const char *name2,
  1123. char **path1,
  1124. char **path2,
  1125. struct node **wnode1,
  1126. struct node **wnode2)
  1127. {
  1128. int err;
  1129. pthread_mutex_lock(&f->lock);
  1130. err = try_get_path2(f,nodeid1,name1,nodeid2,name2,
  1131. path1,path2,wnode1,wnode2);
  1132. if(err == -EAGAIN)
  1133. {
  1134. struct lock_queue_element qe = {0};
  1135. qe.nodeid1 = nodeid1;
  1136. qe.name1 = name1;
  1137. qe.path1 = path1;
  1138. qe.wnode1 = wnode1;
  1139. qe.nodeid2 = nodeid2;
  1140. qe.name2 = name2;
  1141. qe.path2 = path2;
  1142. qe.wnode2 = wnode2;
  1143. err = wait_path(f,&qe);
  1144. }
  1145. pthread_mutex_unlock(&f->lock);
  1146. return err;
  1147. }
  1148. static
  1149. void
  1150. free_path_wrlock(struct fuse *f,
  1151. fuse_ino_t nodeid,
  1152. struct node *wnode,
  1153. char *path)
  1154. {
  1155. pthread_mutex_lock(&f->lock);
  1156. unlock_path(f,nodeid,wnode,NULL);
  1157. if(f->lockq)
  1158. wake_up_queued(f);
  1159. pthread_mutex_unlock(&f->lock);
  1160. free(path);
  1161. }
  1162. static
  1163. void
  1164. free_path(struct fuse *f,
  1165. fuse_ino_t nodeid,
  1166. char *path)
  1167. {
  1168. if(path)
  1169. free_path_wrlock(f,nodeid,NULL,path);
  1170. }
  1171. static
  1172. void
  1173. free_path2(struct fuse *f,
  1174. fuse_ino_t nodeid1,
  1175. fuse_ino_t nodeid2,
  1176. struct node *wnode1,
  1177. struct node *wnode2,
  1178. char *path1,
  1179. char *path2)
  1180. {
  1181. pthread_mutex_lock(&f->lock);
  1182. unlock_path(f,nodeid1,wnode1,NULL);
  1183. unlock_path(f,nodeid2,wnode2,NULL);
  1184. wake_up_queued(f);
  1185. pthread_mutex_unlock(&f->lock);
  1186. free(path1);
  1187. free(path2);
  1188. }
  1189. static
  1190. void
  1191. forget_node(struct fuse *f,
  1192. const fuse_ino_t nodeid,
  1193. const uint64_t nlookup)
  1194. {
  1195. struct node *node;
  1196. if(nodeid == FUSE_ROOT_ID)
  1197. return;
  1198. pthread_mutex_lock(&f->lock);
  1199. node = get_node(f,nodeid);
  1200. /*
  1201. * Node may still be locked due to interrupt idiocy in open,
  1202. * create and opendir
  1203. */
  1204. while(node->nlookup == nlookup && node->treelock)
  1205. {
  1206. struct lock_queue_element qe = {0};
  1207. qe.nodeid1 = nodeid;
  1208. queue_path(f,&qe);
  1209. do
  1210. {
  1211. pthread_cond_wait(&qe.cond,&f->lock);
  1212. }
  1213. while((node->nlookup == nlookup) && node->treelock);
  1214. dequeue_path(f,&qe);
  1215. }
  1216. assert(node->nlookup >= nlookup);
  1217. node->nlookup -= nlookup;
  1218. if(!node->nlookup)
  1219. unref_node(f,node);
  1220. else if(lru_enabled(f) && node->nlookup == 1)
  1221. set_forget_time(f,node);
  1222. pthread_mutex_unlock(&f->lock);
  1223. }
  1224. static
  1225. void
  1226. unlink_node(struct fuse *f,
  1227. struct node *node)
  1228. {
  1229. if(f->conf.remember)
  1230. {
  1231. assert(node->nlookup > 1);
  1232. node->nlookup--;
  1233. }
  1234. unhash_name(f,node);
  1235. }
  1236. static
  1237. void
  1238. remove_node(struct fuse *f,
  1239. fuse_ino_t dir,
  1240. const char *name)
  1241. {
  1242. struct node *node;
  1243. pthread_mutex_lock(&f->lock);
  1244. node = lookup_node(f,dir,name);
  1245. if(node != NULL)
  1246. unlink_node(f,node);
  1247. pthread_mutex_unlock(&f->lock);
  1248. }
  1249. static
  1250. int
  1251. rename_node(struct fuse *f,
  1252. fuse_ino_t olddir,
  1253. const char *oldname,
  1254. fuse_ino_t newdir,
  1255. const char *newname)
  1256. {
  1257. struct node *node;
  1258. struct node *newnode;
  1259. int err = 0;
  1260. pthread_mutex_lock(&f->lock);
  1261. node = lookup_node(f,olddir,oldname);
  1262. newnode = lookup_node(f,newdir,newname);
  1263. if(node == NULL)
  1264. goto out;
  1265. if(newnode != NULL)
  1266. unlink_node(f,newnode);
  1267. unhash_name(f,node);
  1268. if(hash_name(f,node,newdir,newname) == -1)
  1269. {
  1270. err = -ENOMEM;
  1271. goto out;
  1272. }
  1273. out:
  1274. pthread_mutex_unlock(&f->lock);
  1275. return err;
  1276. }
  1277. static
  1278. void
  1279. set_stat(struct fuse *f,
  1280. fuse_ino_t nodeid,
  1281. struct stat *stbuf)
  1282. {
  1283. if(!f->conf.use_ino)
  1284. stbuf->st_ino = nodeid;
  1285. if(f->conf.set_mode)
  1286. stbuf->st_mode = (stbuf->st_mode & S_IFMT) | (0777 & ~f->conf.umask);
  1287. if(f->conf.set_uid)
  1288. stbuf->st_uid = f->conf.uid;
  1289. if(f->conf.set_gid)
  1290. stbuf->st_gid = f->conf.gid;
  1291. }
  1292. static
  1293. struct fuse*
  1294. req_fuse(fuse_req_t req)
  1295. {
  1296. return (struct fuse*)fuse_req_userdata(req);
  1297. }
  1298. int
  1299. fuse_fs_getattr(struct fuse_fs *fs,
  1300. const char *path,
  1301. struct stat *buf,
  1302. fuse_timeouts_t *timeout)
  1303. {
  1304. return fs->op.getattr(path,buf,timeout);
  1305. }
  1306. int
  1307. fuse_fs_fgetattr(struct fuse_fs *fs,
  1308. struct stat *buf,
  1309. fuse_file_info_t *fi,
  1310. fuse_timeouts_t *timeout)
  1311. {
  1312. return fs->op.fgetattr(fi,buf,timeout);
  1313. }
  1314. int
  1315. fuse_fs_rename(struct fuse_fs *fs,
  1316. const char *oldpath,
  1317. const char *newpath)
  1318. {
  1319. return fs->op.rename(oldpath,newpath);
  1320. }
  1321. int
  1322. fuse_fs_prepare_hide(struct fuse_fs *fs_,
  1323. const char *path_,
  1324. uint64_t *fh_)
  1325. {
  1326. return fs_->op.prepare_hide(path_,fh_);
  1327. }
  1328. int
  1329. fuse_fs_free_hide(struct fuse_fs *fs_,
  1330. uint64_t fh_)
  1331. {
  1332. return fs_->op.free_hide(fh_);
  1333. }
  1334. int
  1335. fuse_fs_unlink(struct fuse_fs *fs,
  1336. const char *path)
  1337. {
  1338. return fs->op.unlink(path);
  1339. }
  1340. int
  1341. fuse_fs_rmdir(struct fuse_fs *fs,
  1342. const char *path)
  1343. {
  1344. return fs->op.rmdir(path);
  1345. }
  1346. int
  1347. fuse_fs_symlink(struct fuse_fs *fs_,
  1348. const char *linkname_,
  1349. const char *path_,
  1350. struct stat *st_,
  1351. fuse_timeouts_t *timeouts_)
  1352. {
  1353. return fs_->op.symlink(linkname_,path_,st_,timeouts_);
  1354. }
  1355. int
  1356. fuse_fs_link(struct fuse_fs *fs,
  1357. const char *oldpath,
  1358. const char *newpath,
  1359. struct stat *st_,
  1360. fuse_timeouts_t *timeouts_)
  1361. {
  1362. return fs->op.link(oldpath,newpath,st_,timeouts_);
  1363. }
  1364. int
  1365. fuse_fs_release(struct fuse_fs *fs,
  1366. fuse_file_info_t *fi)
  1367. {
  1368. return fs->op.release(fi);
  1369. }
  1370. int
  1371. fuse_fs_opendir(struct fuse_fs *fs,
  1372. const char *path,
  1373. fuse_file_info_t *fi)
  1374. {
  1375. return fs->op.opendir(path,fi);
  1376. }
  1377. int
  1378. fuse_fs_open(struct fuse_fs *fs,
  1379. const char *path,
  1380. fuse_file_info_t *fi)
  1381. {
  1382. return fs->op.open(path,fi);
  1383. }
  1384. static
  1385. void
  1386. fuse_free_buf(struct fuse_bufvec *buf)
  1387. {
  1388. if(buf != NULL)
  1389. {
  1390. size_t i;
  1391. for(i = 0; i < buf->count; i++)
  1392. free(buf->buf[i].mem);
  1393. free(buf);
  1394. }
  1395. }
  1396. int
  1397. fuse_fs_read_buf(struct fuse_fs *fs,
  1398. struct fuse_bufvec **bufp,
  1399. size_t size,
  1400. off_t off,
  1401. fuse_file_info_t *fi)
  1402. {
  1403. int res;
  1404. res = fs->op.read_buf(fi,bufp,size,off);
  1405. if(res < 0)
  1406. return res;
  1407. return 0;
  1408. }
  1409. int
  1410. fuse_fs_read(struct fuse_fs *fs,
  1411. char *mem,
  1412. size_t size,
  1413. off_t off,
  1414. fuse_file_info_t *fi)
  1415. {
  1416. int res;
  1417. struct fuse_bufvec *buf = NULL;
  1418. res = fuse_fs_read_buf(fs,&buf,size,off,fi);
  1419. if(res == 0)
  1420. {
  1421. struct fuse_bufvec dst = FUSE_BUFVEC_INIT(size);
  1422. dst.buf[0].mem = mem;
  1423. res = fuse_buf_copy(&dst,buf,0);
  1424. }
  1425. fuse_free_buf(buf);
  1426. return res;
  1427. }
  1428. int
  1429. fuse_fs_write_buf(struct fuse_fs *fs,
  1430. struct fuse_bufvec *buf,
  1431. off_t off,
  1432. fuse_file_info_t *fi)
  1433. {
  1434. return fs->op.write_buf(fi,buf,off);
  1435. }
  1436. int
  1437. fuse_fs_fsync(struct fuse_fs *fs,
  1438. int datasync,
  1439. fuse_file_info_t *fi)
  1440. {
  1441. return fs->op.fsync(fi,datasync);
  1442. }
  1443. int
  1444. fuse_fs_fsyncdir(struct fuse_fs *fs,
  1445. int datasync,
  1446. fuse_file_info_t *fi)
  1447. {
  1448. return fs->op.fsyncdir(fi,datasync);
  1449. }
  1450. int
  1451. fuse_fs_flush(struct fuse_fs *fs,
  1452. fuse_file_info_t *fi)
  1453. {
  1454. return fs->op.flush(fi);
  1455. }
  1456. int
  1457. fuse_fs_statfs(struct fuse_fs *fs,
  1458. const char *path,
  1459. struct statvfs *buf)
  1460. {
  1461. return fs->op.statfs(path,buf);
  1462. }
  1463. int
  1464. fuse_fs_releasedir(struct fuse_fs *fs,
  1465. fuse_file_info_t *fi)
  1466. {
  1467. return fs->op.releasedir(fi);
  1468. }
  1469. int
  1470. fuse_fs_readdir(struct fuse_fs *fs,
  1471. fuse_file_info_t *fi,
  1472. fuse_dirents_t *buf)
  1473. {
  1474. return fs->op.readdir(fi,buf);
  1475. }
  1476. int
  1477. fuse_fs_readdir_plus(struct fuse_fs *fs_,
  1478. fuse_file_info_t *ffi_,
  1479. fuse_dirents_t *buf_)
  1480. {
  1481. return fs_->op.readdir_plus(ffi_,buf_);
  1482. }
  1483. int
  1484. fuse_fs_create(struct fuse_fs *fs,
  1485. const char *path,
  1486. mode_t mode,
  1487. fuse_file_info_t *fi)
  1488. {
  1489. return fs->op.create(path,mode,fi);
  1490. }
  1491. int
  1492. fuse_fs_lock(struct fuse_fs *fs,
  1493. fuse_file_info_t *fi,
  1494. int cmd,
  1495. struct flock *lock)
  1496. {
  1497. return fs->op.lock(fi,cmd,lock);
  1498. }
  1499. int
  1500. fuse_fs_flock(struct fuse_fs *fs,
  1501. fuse_file_info_t *fi,
  1502. int op)
  1503. {
  1504. return fs->op.flock(fi,op);
  1505. }
  1506. int
  1507. fuse_fs_chown(struct fuse_fs *fs,
  1508. const char *path,
  1509. uid_t uid,
  1510. gid_t gid)
  1511. {
  1512. return fs->op.chown(path,uid,gid);
  1513. }
  1514. int
  1515. fuse_fs_fchown(struct fuse_fs *fs_,
  1516. const fuse_file_info_t *ffi_,
  1517. const uid_t uid_,
  1518. const gid_t gid_)
  1519. {
  1520. return fs_->op.fchown(ffi_,uid_,gid_);
  1521. }
  1522. int
  1523. fuse_fs_truncate(struct fuse_fs *fs,
  1524. const char *path,
  1525. off_t size)
  1526. {
  1527. return fs->op.truncate(path,size);
  1528. }
  1529. int
  1530. fuse_fs_ftruncate(struct fuse_fs *fs,
  1531. off_t size,
  1532. fuse_file_info_t *fi)
  1533. {
  1534. return fs->op.ftruncate(fi,size);
  1535. }
  1536. int
  1537. fuse_fs_utimens(struct fuse_fs *fs,
  1538. const char *path,
  1539. const struct timespec tv[2])
  1540. {
  1541. return fs->op.utimens(path,tv);
  1542. }
  1543. int
  1544. fuse_fs_futimens(struct fuse_fs *fs_,
  1545. const fuse_file_info_t *ffi_,
  1546. const struct timespec tv_[2])
  1547. {
  1548. return fs_->op.futimens(ffi_,tv_);
  1549. }
  1550. int
  1551. fuse_fs_access(struct fuse_fs *fs,
  1552. const char *path,
  1553. int mask)
  1554. {
  1555. return fs->op.access(path,mask);
  1556. }
  1557. int
  1558. fuse_fs_readlink(struct fuse_fs *fs,
  1559. const char *path,
  1560. char *buf,
  1561. size_t len)
  1562. {
  1563. return fs->op.readlink(path,buf,len);
  1564. }
  1565. int
  1566. fuse_fs_mknod(struct fuse_fs *fs,
  1567. const char *path,
  1568. mode_t mode,
  1569. dev_t rdev)
  1570. {
  1571. return fs->op.mknod(path,mode,rdev);
  1572. }
  1573. int
  1574. fuse_fs_mkdir(struct fuse_fs *fs,
  1575. const char *path,
  1576. mode_t mode)
  1577. {
  1578. return fs->op.mkdir(path,mode);
  1579. }
  1580. int
  1581. fuse_fs_setxattr(struct fuse_fs *fs,
  1582. const char *path,
  1583. const char *name,
  1584. const char *value,
  1585. size_t size,
  1586. int flags)
  1587. {
  1588. return fs->op.setxattr(path,name,value,size,flags);
  1589. }
  1590. int
  1591. fuse_fs_getxattr(struct fuse_fs *fs,
  1592. const char *path,
  1593. const char *name,
  1594. char *value,
  1595. size_t size)
  1596. {
  1597. return fs->op.getxattr(path,name,value,size);
  1598. }
  1599. int
  1600. fuse_fs_listxattr(struct fuse_fs *fs,
  1601. const char *path,
  1602. char *list,
  1603. size_t size)
  1604. {
  1605. return fs->op.listxattr(path,list,size);
  1606. }
  1607. int
  1608. fuse_fs_bmap(struct fuse_fs *fs,
  1609. const char *path,
  1610. size_t blocksize,
  1611. uint64_t *idx)
  1612. {
  1613. return fs->op.bmap(path,blocksize,idx);
  1614. }
  1615. int
  1616. fuse_fs_removexattr(struct fuse_fs *fs,
  1617. const char *path,
  1618. const char *name)
  1619. {
  1620. return fs->op.removexattr(path,name);
  1621. }
  1622. int
  1623. fuse_fs_ioctl(struct fuse_fs *fs,
  1624. unsigned long cmd,
  1625. void *arg,
  1626. fuse_file_info_t *fi,
  1627. unsigned int flags,
  1628. void *data,
  1629. uint32_t *out_size)
  1630. {
  1631. return fs->op.ioctl(fi,cmd,arg,flags,data,out_size);
  1632. }
  1633. int
  1634. fuse_fs_poll(struct fuse_fs *fs,
  1635. fuse_file_info_t *fi,
  1636. fuse_pollhandle_t *ph,
  1637. unsigned *reventsp)
  1638. {
  1639. return fs->op.poll(fi,ph,reventsp);
  1640. }
  1641. int
  1642. fuse_fs_fallocate(struct fuse_fs *fs,
  1643. int mode,
  1644. off_t offset,
  1645. off_t length,
  1646. fuse_file_info_t *fi)
  1647. {
  1648. return fs->op.fallocate(fi,mode,offset,length);
  1649. }
  1650. ssize_t
  1651. fuse_fs_copy_file_range(struct fuse_fs *fs_,
  1652. fuse_file_info_t *ffi_in_,
  1653. off_t off_in_,
  1654. fuse_file_info_t *ffi_out_,
  1655. off_t off_out_,
  1656. size_t len_,
  1657. int flags_)
  1658. {
  1659. return fs_->op.copy_file_range(ffi_in_,
  1660. off_in_,
  1661. ffi_out_,
  1662. off_out_,
  1663. len_,
  1664. flags_);
  1665. }
  1666. static
  1667. int
  1668. node_open(const struct node *node_)
  1669. {
  1670. return ((node_ != NULL) && (node_->open_count > 0));
  1671. }
  1672. #ifndef CLOCK_MONOTONIC
  1673. #define CLOCK_MONOTONIC CLOCK_REALTIME
  1674. #endif
  1675. static
  1676. void
  1677. curr_time(struct timespec *now)
  1678. {
  1679. static clockid_t clockid = CLOCK_MONOTONIC;
  1680. int res = clock_gettime(clockid,now);
  1681. if(res == -1 && errno == EINVAL)
  1682. {
  1683. clockid = CLOCK_REALTIME;
  1684. res = clock_gettime(clockid,now);
  1685. }
  1686. if(res == -1)
  1687. {
  1688. perror("fuse: clock_gettime");
  1689. abort();
  1690. }
  1691. }
  1692. static
  1693. void
  1694. update_stat(struct node *node_,
  1695. const struct stat *stnew_)
  1696. {
  1697. if((node_->stat_cache_valid) &&
  1698. ((node_->mtim.tv_sec != stnew_->st_mtim.tv_sec) ||
  1699. (node_->mtim.tv_nsec != stnew_->st_mtim.tv_nsec) ||
  1700. (node_->ino != stnew_->st_ino) ||
  1701. (node_->size != stnew_->st_size)))
  1702. node_->stat_cache_valid = 0;
  1703. node_->ino = stnew_->st_ino;
  1704. node_->size = stnew_->st_size;
  1705. node_->mtim = stnew_->st_mtim;
  1706. }
  1707. static
  1708. int
  1709. set_path_info(struct fuse *f,
  1710. fuse_ino_t nodeid,
  1711. const char *name,
  1712. struct fuse_entry_param *e)
  1713. {
  1714. struct node *node;
  1715. node = find_node(f,nodeid,name);
  1716. if(node == NULL)
  1717. return -ENOMEM;
  1718. e->ino = node->nodeid;
  1719. e->generation = node->generation;
  1720. pthread_mutex_lock(&f->lock);
  1721. update_stat(node,&e->attr);
  1722. pthread_mutex_unlock(&f->lock);
  1723. set_stat(f,e->ino,&e->attr);
  1724. return 0;
  1725. }
  1726. static
  1727. int
  1728. lookup_path(struct fuse *f,
  1729. fuse_ino_t nodeid,
  1730. const char *name,
  1731. const char *path,
  1732. struct fuse_entry_param *e,
  1733. fuse_file_info_t *fi)
  1734. {
  1735. int rv;
  1736. memset(e,0,sizeof(struct fuse_entry_param));
  1737. rv = ((fi == NULL) ?
  1738. fuse_fs_getattr(f->fs,path,&e->attr,&e->timeout) :
  1739. fuse_fs_fgetattr(f->fs,&e->attr,fi,&e->timeout));
  1740. if(rv)
  1741. return rv;
  1742. return set_path_info(f,nodeid,name,e);
  1743. }
  1744. static
  1745. struct fuse_context_i*
  1746. fuse_get_context_internal(void)
  1747. {
  1748. struct fuse_context_i *c;
  1749. c = (struct fuse_context_i *)pthread_getspecific(fuse_context_key);
  1750. if(c == NULL)
  1751. {
  1752. c = (struct fuse_context_i*)calloc(1,sizeof(struct fuse_context_i));
  1753. if(c == NULL)
  1754. {
  1755. /* This is hard to deal with properly,so just
  1756. abort. If memory is so low that the
  1757. context cannot be allocated,there's not
  1758. much hope for the filesystem anyway */
  1759. fprintf(stderr,"fuse: failed to allocate thread specific data\n");
  1760. abort();
  1761. }
  1762. pthread_setspecific(fuse_context_key,c);
  1763. }
  1764. return c;
  1765. }
  1766. static
  1767. void
  1768. fuse_freecontext(void *data)
  1769. {
  1770. free(data);
  1771. }
  1772. static
  1773. int
  1774. fuse_create_context_key(void)
  1775. {
  1776. int err = 0;
  1777. pthread_mutex_lock(&fuse_context_lock);
  1778. if(!fuse_context_ref)
  1779. {
  1780. err = pthread_key_create(&fuse_context_key,fuse_freecontext);
  1781. if(err)
  1782. {
  1783. fprintf(stderr,"fuse: failed to create thread specific key: %s\n",
  1784. strerror(err));
  1785. pthread_mutex_unlock(&fuse_context_lock);
  1786. return -1;
  1787. }
  1788. }
  1789. fuse_context_ref++;
  1790. pthread_mutex_unlock(&fuse_context_lock);
  1791. return 0;
  1792. }
  1793. static
  1794. void
  1795. fuse_delete_context_key(void)
  1796. {
  1797. pthread_mutex_lock(&fuse_context_lock);
  1798. fuse_context_ref--;
  1799. if(!fuse_context_ref)
  1800. {
  1801. free(pthread_getspecific(fuse_context_key));
  1802. pthread_key_delete(fuse_context_key);
  1803. }
  1804. pthread_mutex_unlock(&fuse_context_lock);
  1805. }
  1806. static
  1807. struct fuse*
  1808. req_fuse_prepare(fuse_req_t req)
  1809. {
  1810. struct fuse_context_i *c = fuse_get_context_internal();
  1811. const struct fuse_ctx *ctx = fuse_req_ctx(req);
  1812. c->req = req;
  1813. c->ctx.fuse = req_fuse(req);
  1814. c->ctx.uid = ctx->uid;
  1815. c->ctx.gid = ctx->gid;
  1816. c->ctx.pid = ctx->pid;
  1817. c->ctx.umask = ctx->umask;
  1818. return c->ctx.fuse;
  1819. }
  1820. static
  1821. inline
  1822. void
  1823. reply_err(fuse_req_t req,
  1824. int err)
  1825. {
  1826. /* fuse_reply_err() uses non-negated errno values */
  1827. fuse_reply_err(req,-err);
  1828. }
  1829. static
  1830. void
  1831. reply_entry(fuse_req_t req,
  1832. const struct fuse_entry_param *e,
  1833. int err)
  1834. {
  1835. if(!err)
  1836. {
  1837. struct fuse *f = req_fuse(req);
  1838. if(fuse_reply_entry(req,e) == -ENOENT)
  1839. {
  1840. /* Skip forget for negative result */
  1841. if(e->ino != 0)
  1842. forget_node(f,e->ino,1);
  1843. }
  1844. }
  1845. else
  1846. {
  1847. reply_err(req,err);
  1848. }
  1849. }
  1850. void
  1851. fuse_fs_init(struct fuse_fs *fs,
  1852. struct fuse_conn_info *conn)
  1853. {
  1854. fs->op.init(conn);
  1855. }
  1856. static
  1857. void
  1858. fuse_lib_init(void *data,
  1859. struct fuse_conn_info *conn)
  1860. {
  1861. struct fuse *f = (struct fuse *)data;
  1862. struct fuse_context_i *c = fuse_get_context_internal();
  1863. memset(c,0,sizeof(*c));
  1864. c->ctx.fuse = f;
  1865. conn->want |= FUSE_CAP_EXPORT_SUPPORT;
  1866. fuse_fs_init(f->fs,conn);
  1867. }
  1868. void
  1869. fuse_fs_destroy(struct fuse_fs *fs)
  1870. {
  1871. fs->op.destroy();
  1872. free(fs);
  1873. }
  1874. static
  1875. void
  1876. fuse_lib_destroy(void *data)
  1877. {
  1878. struct fuse *f = (struct fuse *)data;
  1879. struct fuse_context_i *c = fuse_get_context_internal();
  1880. memset(c,0,sizeof(*c));
  1881. c->ctx.fuse = f;
  1882. fuse_fs_destroy(f->fs);
  1883. f->fs = NULL;
  1884. }
  1885. static
  1886. void
  1887. fuse_lib_lookup(fuse_req_t req,
  1888. fuse_ino_t parent,
  1889. const char *name)
  1890. {
  1891. struct fuse *f = req_fuse_prepare(req);
  1892. struct fuse_entry_param e;
  1893. char *path;
  1894. int err;
  1895. struct node *dot = NULL;
  1896. if(name[0] == '.')
  1897. {
  1898. int len = strlen(name);
  1899. if(len == 1 || (name[1] == '.' && len == 2))
  1900. {
  1901. pthread_mutex_lock(&f->lock);
  1902. if(len == 1)
  1903. {
  1904. dot = get_node_nocheck(f,parent);
  1905. if(dot == NULL)
  1906. {
  1907. pthread_mutex_unlock(&f->lock);
  1908. reply_entry(req,&e,-ESTALE);
  1909. return;
  1910. }
  1911. dot->refctr++;
  1912. }
  1913. else
  1914. {
  1915. parent = get_node(f,parent)->parent->nodeid;
  1916. }
  1917. pthread_mutex_unlock(&f->lock);
  1918. name = NULL;
  1919. }
  1920. }
  1921. err = get_path_name(f,parent,name,&path);
  1922. if(!err)
  1923. {
  1924. err = lookup_path(f,parent,name,path,&e,NULL);
  1925. if(err == -ENOENT)
  1926. {
  1927. e.ino = 0;
  1928. err = 0;
  1929. }
  1930. free_path(f,parent,path);
  1931. }
  1932. if(dot)
  1933. {
  1934. pthread_mutex_lock(&f->lock);
  1935. unref_node(f,dot);
  1936. pthread_mutex_unlock(&f->lock);
  1937. }
  1938. reply_entry(req,&e,err);
  1939. }
  1940. static
  1941. void
  1942. do_forget(struct fuse *f,
  1943. const fuse_ino_t ino,
  1944. const uint64_t nlookup)
  1945. {
  1946. forget_node(f,ino,nlookup);
  1947. }
  1948. static
  1949. void
  1950. fuse_lib_forget(fuse_req_t req,
  1951. const fuse_ino_t ino,
  1952. const uint64_t nlookup)
  1953. {
  1954. do_forget(req_fuse(req),ino,nlookup);
  1955. fuse_reply_none(req);
  1956. }
  1957. static
  1958. void
  1959. fuse_lib_forget_multi(fuse_req_t req,
  1960. size_t count,
  1961. struct fuse_forget_data *forgets)
  1962. {
  1963. struct fuse *f = req_fuse(req);
  1964. size_t i;
  1965. for(i = 0; i < count; i++)
  1966. do_forget(f,forgets[i].ino,forgets[i].nlookup);
  1967. fuse_reply_none(req);
  1968. }
  1969. static
  1970. void
  1971. fuse_lib_getattr(fuse_req_t req,
  1972. fuse_ino_t ino,
  1973. fuse_file_info_t *fi)
  1974. {
  1975. int err;
  1976. char *path;
  1977. struct fuse *f;
  1978. struct stat buf;
  1979. struct node *node;
  1980. fuse_timeouts_t timeout;
  1981. fuse_file_info_t ffi = {0};
  1982. f = req_fuse_prepare(req);
  1983. if(fi == NULL)
  1984. {
  1985. pthread_mutex_lock(&f->lock);
  1986. node = get_node(f,ino);
  1987. if(node->is_hidden)
  1988. {
  1989. fi = &ffi;
  1990. fi->fh = node->hidden_fh;
  1991. }
  1992. pthread_mutex_unlock(&f->lock);
  1993. }
  1994. memset(&buf,0,sizeof(buf));
  1995. err = 0;
  1996. path = NULL;
  1997. if(fi == NULL)
  1998. err = get_path(f,ino,&path);
  1999. if(!err)
  2000. {
  2001. err = ((fi == NULL) ?
  2002. fuse_fs_getattr(f->fs,path,&buf,&timeout) :
  2003. fuse_fs_fgetattr(f->fs,&buf,fi,&timeout));
  2004. free_path(f,ino,path);
  2005. }
  2006. if(!err)
  2007. {
  2008. pthread_mutex_lock(&f->lock);
  2009. node = get_node(f,ino);
  2010. update_stat(node,&buf);
  2011. pthread_mutex_unlock(&f->lock);
  2012. set_stat(f,ino,&buf);
  2013. fuse_reply_attr(req,&buf,timeout.attr);
  2014. }
  2015. else
  2016. {
  2017. reply_err(req,err);
  2018. }
  2019. }
  2020. int
  2021. fuse_fs_chmod(struct fuse_fs *fs,
  2022. const char *path,
  2023. mode_t mode)
  2024. {
  2025. return fs->op.chmod(path,mode);
  2026. }
  2027. int
  2028. fuse_fs_fchmod(struct fuse_fs *fs_,
  2029. const fuse_file_info_t *ffi_,
  2030. const mode_t mode_)
  2031. {
  2032. return fs_->op.fchmod(ffi_,mode_);
  2033. }
  2034. static
  2035. void
  2036. fuse_lib_setattr(fuse_req_t req,
  2037. fuse_ino_t ino,
  2038. struct stat *attr,
  2039. int valid,
  2040. fuse_file_info_t *fi)
  2041. {
  2042. struct fuse *f = req_fuse_prepare(req);
  2043. struct stat buf;
  2044. char *path;
  2045. int err;
  2046. struct node *node;
  2047. fuse_timeouts_t timeout;
  2048. fuse_file_info_t ffi = {0};
  2049. if(fi == NULL)
  2050. {
  2051. pthread_mutex_lock(&f->lock);
  2052. node = get_node(f,ino);
  2053. if(node->is_hidden)
  2054. {
  2055. fi = &ffi;
  2056. fi->fh = node->hidden_fh;
  2057. }
  2058. pthread_mutex_unlock(&f->lock);
  2059. }
  2060. memset(&buf,0,sizeof(buf));
  2061. err = 0;
  2062. path = NULL;
  2063. if(fi == NULL)
  2064. err = get_path(f,ino,&path);
  2065. if(!err)
  2066. {
  2067. err = 0;
  2068. if(!err && (valid & FATTR_MODE))
  2069. err = ((fi == NULL) ?
  2070. fuse_fs_chmod(f->fs,path,attr->st_mode) :
  2071. fuse_fs_fchmod(f->fs,fi,attr->st_mode));
  2072. if(!err && (valid & (FATTR_UID | FATTR_GID)))
  2073. {
  2074. uid_t uid = ((valid & FATTR_UID) ? attr->st_uid : (uid_t)-1);
  2075. gid_t gid = ((valid & FATTR_GID) ? attr->st_gid : (gid_t)-1);
  2076. err = ((fi == NULL) ?
  2077. fuse_fs_chown(f->fs,path,uid,gid) :
  2078. fuse_fs_fchown(f->fs,fi,uid,gid));
  2079. }
  2080. if(!err && (valid & FATTR_SIZE))
  2081. err = ((fi == NULL) ?
  2082. fuse_fs_truncate(f->fs,path,attr->st_size) :
  2083. fuse_fs_ftruncate(f->fs,attr->st_size,fi));
  2084. #ifdef HAVE_UTIMENSAT
  2085. if(!err && (valid & (FATTR_ATIME | FATTR_MTIME)))
  2086. {
  2087. struct timespec tv[2];
  2088. tv[0].tv_sec = 0;
  2089. tv[1].tv_sec = 0;
  2090. tv[0].tv_nsec = UTIME_OMIT;
  2091. tv[1].tv_nsec = UTIME_OMIT;
  2092. if(valid & FATTR_ATIME_NOW)
  2093. tv[0].tv_nsec = UTIME_NOW;
  2094. else if(valid & FATTR_ATIME)
  2095. tv[0] = attr->st_atim;
  2096. if(valid & FATTR_MTIME_NOW)
  2097. tv[1].tv_nsec = UTIME_NOW;
  2098. else if(valid & FATTR_MTIME)
  2099. tv[1] = attr->st_mtim;
  2100. err = ((fi == NULL) ?
  2101. fuse_fs_utimens(f->fs,path,tv) :
  2102. fuse_fs_futimens(f->fs,fi,tv));
  2103. }
  2104. else
  2105. #endif
  2106. if(!err && ((valid & (FATTR_ATIME|FATTR_MTIME)) == (FATTR_ATIME|FATTR_MTIME)))
  2107. {
  2108. struct timespec tv[2];
  2109. tv[0].tv_sec = attr->st_atime;
  2110. tv[0].tv_nsec = ST_ATIM_NSEC(attr);
  2111. tv[1].tv_sec = attr->st_mtime;
  2112. tv[1].tv_nsec = ST_MTIM_NSEC(attr);
  2113. err = ((fi == NULL) ?
  2114. fuse_fs_utimens(f->fs,path,tv) :
  2115. fuse_fs_futimens(f->fs,fi,tv));
  2116. }
  2117. if(!err)
  2118. err = ((fi == NULL) ?
  2119. fuse_fs_getattr(f->fs,path,&buf,&timeout) :
  2120. fuse_fs_fgetattr(f->fs,&buf,fi,&timeout));
  2121. free_path(f,ino,path);
  2122. }
  2123. if(!err)
  2124. {
  2125. pthread_mutex_lock(&f->lock);
  2126. update_stat(get_node(f,ino),&buf);
  2127. pthread_mutex_unlock(&f->lock);
  2128. set_stat(f,ino,&buf);
  2129. fuse_reply_attr(req,&buf,timeout.attr);
  2130. }
  2131. else
  2132. {
  2133. reply_err(req,err);
  2134. }
  2135. }
  2136. static
  2137. void
  2138. fuse_lib_access(fuse_req_t req,
  2139. fuse_ino_t ino,
  2140. int mask)
  2141. {
  2142. struct fuse *f = req_fuse_prepare(req);
  2143. char *path;
  2144. int err;
  2145. err = get_path(f,ino,&path);
  2146. if(!err)
  2147. {
  2148. err = fuse_fs_access(f->fs,path,mask);
  2149. free_path(f,ino,path);
  2150. }
  2151. reply_err(req,err);
  2152. }
  2153. static
  2154. void
  2155. fuse_lib_readlink(fuse_req_t req,
  2156. fuse_ino_t ino)
  2157. {
  2158. struct fuse *f = req_fuse_prepare(req);
  2159. char linkname[PATH_MAX + 1];
  2160. char *path;
  2161. int err;
  2162. err = get_path(f,ino,&path);
  2163. if(!err)
  2164. {
  2165. err = fuse_fs_readlink(f->fs,path,linkname,sizeof(linkname));
  2166. free_path(f,ino,path);
  2167. }
  2168. if(!err)
  2169. {
  2170. linkname[PATH_MAX] = '\0';
  2171. fuse_reply_readlink(req,linkname);
  2172. }
  2173. else
  2174. {
  2175. reply_err(req,err);
  2176. }
  2177. }
  2178. static
  2179. void
  2180. fuse_lib_mknod(fuse_req_t req,
  2181. fuse_ino_t parent,
  2182. const char *name,
  2183. mode_t mode,
  2184. dev_t rdev)
  2185. {
  2186. struct fuse *f = req_fuse_prepare(req);
  2187. struct fuse_entry_param e;
  2188. char *path;
  2189. int err;
  2190. err = get_path_name(f,parent,name,&path);
  2191. if(!err)
  2192. {
  2193. err = -ENOSYS;
  2194. if(S_ISREG(mode))
  2195. {
  2196. fuse_file_info_t fi;
  2197. memset(&fi,0,sizeof(fi));
  2198. fi.flags = O_CREAT | O_EXCL | O_WRONLY;
  2199. err = fuse_fs_create(f->fs,path,mode,&fi);
  2200. if(!err)
  2201. {
  2202. err = lookup_path(f,parent,name,path,&e,
  2203. &fi);
  2204. fuse_fs_release(f->fs,&fi);
  2205. }
  2206. }
  2207. if(err == -ENOSYS)
  2208. {
  2209. err = fuse_fs_mknod(f->fs,path,mode,rdev);
  2210. if(!err)
  2211. err = lookup_path(f,parent,name,path,&e,NULL);
  2212. }
  2213. free_path(f,parent,path);
  2214. }
  2215. reply_entry(req,&e,err);
  2216. }
  2217. static
  2218. void
  2219. fuse_lib_mkdir(fuse_req_t req,
  2220. fuse_ino_t parent,
  2221. const char *name,
  2222. mode_t mode)
  2223. {
  2224. struct fuse *f = req_fuse_prepare(req);
  2225. struct fuse_entry_param e;
  2226. char *path;
  2227. int err;
  2228. err = get_path_name(f,parent,name,&path);
  2229. if(!err)
  2230. {
  2231. err = fuse_fs_mkdir(f->fs,path,mode);
  2232. if(!err)
  2233. err = lookup_path(f,parent,name,path,&e,NULL);
  2234. free_path(f,parent,path);
  2235. }
  2236. reply_entry(req,&e,err);
  2237. }
  2238. static
  2239. void
  2240. fuse_lib_unlink(fuse_req_t req,
  2241. fuse_ino_t parent,
  2242. const char *name)
  2243. {
  2244. int err;
  2245. char *path;
  2246. struct fuse *f;
  2247. struct node *wnode;
  2248. f = req_fuse_prepare(req);
  2249. err = get_path_wrlock(f,parent,name,&path,&wnode);
  2250. if(!err)
  2251. {
  2252. pthread_mutex_lock(&f->lock);
  2253. if(node_open(wnode))
  2254. {
  2255. err = fuse_fs_prepare_hide(f->fs,path,&wnode->hidden_fh);
  2256. if(!err)
  2257. wnode->is_hidden = 1;
  2258. }
  2259. pthread_mutex_unlock(&f->lock);
  2260. err = fuse_fs_unlink(f->fs,path);
  2261. if(!err)
  2262. remove_node(f,parent,name);
  2263. free_path_wrlock(f,parent,wnode,path);
  2264. }
  2265. reply_err(req,err);
  2266. }
  2267. static
  2268. void
  2269. fuse_lib_rmdir(fuse_req_t req,
  2270. fuse_ino_t parent,
  2271. const char *name)
  2272. {
  2273. struct fuse *f = req_fuse_prepare(req);
  2274. struct node *wnode;
  2275. char *path;
  2276. int err;
  2277. err = get_path_wrlock(f,parent,name,&path,&wnode);
  2278. if(!err)
  2279. {
  2280. err = fuse_fs_rmdir(f->fs,path);
  2281. if(!err)
  2282. remove_node(f,parent,name);
  2283. free_path_wrlock(f,parent,wnode,path);
  2284. }
  2285. reply_err(req,err);
  2286. }
  2287. static
  2288. void
  2289. fuse_lib_symlink(fuse_req_t req_,
  2290. const char *linkname_,
  2291. fuse_ino_t parent_,
  2292. const char *name_)
  2293. {
  2294. int rv;
  2295. char *path;
  2296. struct fuse *f;
  2297. struct fuse_entry_param e = {0};
  2298. f = req_fuse_prepare(req_);
  2299. rv = get_path_name(f,parent_,name_,&path);
  2300. if(rv == 0)
  2301. {
  2302. rv = fuse_fs_symlink(f->fs,linkname_,path,&e.attr,&e.timeout);
  2303. if(rv == 0)
  2304. rv = set_path_info(f,parent_,name_,&e);
  2305. free_path(f,parent_,path);
  2306. }
  2307. reply_entry(req_,&e,rv);
  2308. }
  2309. static
  2310. void
  2311. fuse_lib_rename(fuse_req_t req,
  2312. fuse_ino_t olddir,
  2313. const char *oldname,
  2314. fuse_ino_t newdir,
  2315. const char *newname)
  2316. {
  2317. int err;
  2318. struct fuse *f;
  2319. char *oldpath;
  2320. char *newpath;
  2321. struct node *wnode1;
  2322. struct node *wnode2;
  2323. f = req_fuse_prepare(req);
  2324. err = get_path2(f,olddir,oldname,newdir,newname,
  2325. &oldpath,&newpath,&wnode1,&wnode2);
  2326. if(!err)
  2327. {
  2328. pthread_mutex_lock(&f->lock);
  2329. if(node_open(wnode2))
  2330. {
  2331. err = fuse_fs_prepare_hide(f->fs,newpath,&wnode2->hidden_fh);
  2332. if(!err)
  2333. wnode2->is_hidden = 1;
  2334. }
  2335. pthread_mutex_unlock(&f->lock);
  2336. err = fuse_fs_rename(f->fs,oldpath,newpath);
  2337. if(!err)
  2338. err = rename_node(f,olddir,oldname,newdir,newname);
  2339. free_path2(f,olddir,newdir,wnode1,wnode2,oldpath,newpath);
  2340. }
  2341. reply_err(req,err);
  2342. }
  2343. static
  2344. void
  2345. fuse_lib_link(fuse_req_t req,
  2346. fuse_ino_t ino,
  2347. fuse_ino_t newparent,
  2348. const char *newname)
  2349. {
  2350. int rv;
  2351. char *oldpath;
  2352. char *newpath;
  2353. struct fuse *f;
  2354. struct fuse_entry_param e = {0};
  2355. f = req_fuse_prepare(req);
  2356. rv = get_path2(f,ino,NULL,newparent,newname,
  2357. &oldpath,&newpath,NULL,NULL);
  2358. if(!rv)
  2359. {
  2360. rv = fuse_fs_link(f->fs,oldpath,newpath,&e.attr,&e.timeout);
  2361. if(rv == 0)
  2362. rv = set_path_info(f,newparent,newname,&e);
  2363. free_path2(f,ino,newparent,NULL,NULL,oldpath,newpath);
  2364. }
  2365. reply_entry(req,&e,rv);
  2366. }
  2367. static
  2368. void
  2369. fuse_do_release(struct fuse *f,
  2370. fuse_ino_t ino,
  2371. fuse_file_info_t *fi)
  2372. {
  2373. struct node *node;
  2374. uint64_t fh;
  2375. int was_hidden;
  2376. fh = 0;
  2377. fuse_fs_release(f->fs,fi);
  2378. pthread_mutex_lock(&f->lock);
  2379. node = get_node(f,ino);
  2380. assert(node->open_count > 0);
  2381. node->open_count--;
  2382. was_hidden = 0;
  2383. if(node->is_hidden && (node->open_count == 0))
  2384. {
  2385. was_hidden = 1;
  2386. node->is_hidden = 0;
  2387. fh = node->hidden_fh;
  2388. }
  2389. pthread_mutex_unlock(&f->lock);
  2390. if(was_hidden)
  2391. fuse_fs_free_hide(f->fs,fh);
  2392. }
  2393. static
  2394. void
  2395. fuse_lib_create(fuse_req_t req,
  2396. fuse_ino_t parent,
  2397. const char *name,
  2398. mode_t mode,
  2399. fuse_file_info_t *fi)
  2400. {
  2401. int err;
  2402. char *path;
  2403. struct fuse *f;
  2404. struct fuse_entry_param e;
  2405. f = req_fuse_prepare(req);
  2406. err = get_path_name(f,parent,name,&path);
  2407. if(!err)
  2408. {
  2409. err = fuse_fs_create(f->fs,path,mode,fi);
  2410. if(!err)
  2411. {
  2412. err = lookup_path(f,parent,name,path,&e,fi);
  2413. if(err)
  2414. {
  2415. fuse_fs_release(f->fs,fi);
  2416. }
  2417. else if(!S_ISREG(e.attr.st_mode))
  2418. {
  2419. err = -EIO;
  2420. fuse_fs_release(f->fs,fi);
  2421. forget_node(f,e.ino,1);
  2422. }
  2423. }
  2424. }
  2425. if(!err)
  2426. {
  2427. pthread_mutex_lock(&f->lock);
  2428. get_node(f,e.ino)->open_count++;
  2429. pthread_mutex_unlock(&f->lock);
  2430. if(fuse_reply_create(req,&e,fi) == -ENOENT)
  2431. {
  2432. /* The open syscall was interrupted,so it
  2433. must be cancelled */
  2434. fuse_do_release(f,e.ino,fi);
  2435. forget_node(f,e.ino,1);
  2436. }
  2437. }
  2438. else
  2439. {
  2440. reply_err(req,err);
  2441. }
  2442. free_path(f,parent,path);
  2443. }
  2444. static
  2445. double
  2446. diff_timespec(const struct timespec *t1,
  2447. const struct timespec *t2)
  2448. {
  2449. return (t1->tv_sec - t2->tv_sec) +
  2450. ((double)t1->tv_nsec - (double)t2->tv_nsec) / 1000000000.0;
  2451. }
  2452. static
  2453. void
  2454. open_auto_cache(struct fuse *f,
  2455. fuse_ino_t ino,
  2456. const char *path,
  2457. fuse_file_info_t *fi)
  2458. {
  2459. struct node *node;
  2460. fuse_timeouts_t timeout;
  2461. pthread_mutex_lock(&f->lock);
  2462. node = get_node(f,ino);
  2463. if(node->stat_cache_valid)
  2464. {
  2465. int err;
  2466. struct stat stbuf;
  2467. pthread_mutex_unlock(&f->lock);
  2468. err = fuse_fs_fgetattr(f->fs,&stbuf,fi,&timeout);
  2469. pthread_mutex_lock(&f->lock);
  2470. if(!err)
  2471. update_stat(node,&stbuf);
  2472. else
  2473. node->stat_cache_valid = 0;
  2474. }
  2475. if(node->stat_cache_valid)
  2476. fi->keep_cache = 1;
  2477. node->stat_cache_valid = 1;
  2478. pthread_mutex_unlock(&f->lock);
  2479. }
  2480. static
  2481. void
  2482. fuse_lib_open(fuse_req_t req,
  2483. fuse_ino_t ino,
  2484. fuse_file_info_t *fi)
  2485. {
  2486. int err;
  2487. char *path;
  2488. struct fuse *f;
  2489. f = req_fuse_prepare(req);
  2490. err = get_path(f,ino,&path);
  2491. if(!err)
  2492. {
  2493. err = fuse_fs_open(f->fs,path,fi);
  2494. if(!err)
  2495. {
  2496. if(fi && fi->auto_cache)
  2497. open_auto_cache(f,ino,path,fi);
  2498. }
  2499. }
  2500. if(!err)
  2501. {
  2502. pthread_mutex_lock(&f->lock);
  2503. get_node(f,ino)->open_count++;
  2504. pthread_mutex_unlock(&f->lock);
  2505. /* The open syscall was interrupted,so it must be cancelled */
  2506. if(fuse_reply_open(req,fi) == -ENOENT)
  2507. fuse_do_release(f,ino,fi);
  2508. }
  2509. else
  2510. {
  2511. reply_err(req,err);
  2512. }
  2513. free_path(f,ino,path);
  2514. }
  2515. static
  2516. void
  2517. fuse_lib_read(fuse_req_t req,
  2518. fuse_ino_t ino,
  2519. size_t size,
  2520. off_t off,
  2521. fuse_file_info_t *fi)
  2522. {
  2523. struct fuse *f = req_fuse_prepare(req);
  2524. struct fuse_bufvec *buf = NULL;
  2525. int res;
  2526. res = fuse_fs_read_buf(f->fs,&buf,size,off,fi);
  2527. if(res == 0)
  2528. fuse_reply_data(req,buf,FUSE_BUF_SPLICE_MOVE);
  2529. else
  2530. reply_err(req,res);
  2531. fuse_free_buf(buf);
  2532. }
  2533. static
  2534. void
  2535. fuse_lib_write_buf(fuse_req_t req,
  2536. fuse_ino_t ino,
  2537. struct fuse_bufvec *buf,
  2538. off_t off,
  2539. fuse_file_info_t *fi)
  2540. {
  2541. int res;
  2542. struct fuse *f = req_fuse_prepare(req);
  2543. res = fuse_fs_write_buf(f->fs,buf,off,fi);
  2544. free_path(f,ino,NULL);
  2545. if(res >= 0)
  2546. fuse_reply_write(req,res);
  2547. else
  2548. reply_err(req,res);
  2549. }
  2550. static
  2551. void
  2552. fuse_lib_fsync(fuse_req_t req,
  2553. fuse_ino_t ino,
  2554. int datasync,
  2555. fuse_file_info_t *fi)
  2556. {
  2557. int err;
  2558. struct fuse *f = req_fuse_prepare(req);
  2559. err = fuse_fs_fsync(f->fs,datasync,fi);
  2560. reply_err(req,err);
  2561. }
  2562. static
  2563. struct fuse_dh*
  2564. get_dirhandle(const fuse_file_info_t *llfi,
  2565. fuse_file_info_t *fi)
  2566. {
  2567. struct fuse_dh *dh = (struct fuse_dh *)(uintptr_t)llfi->fh;
  2568. memset(fi,0,sizeof(fuse_file_info_t));
  2569. fi->fh = dh->fh;
  2570. return dh;
  2571. }
  2572. static
  2573. void
  2574. fuse_lib_opendir(fuse_req_t req,
  2575. fuse_ino_t ino,
  2576. fuse_file_info_t *llfi)
  2577. {
  2578. int err;
  2579. char *path;
  2580. struct fuse_dh *dh;
  2581. fuse_file_info_t fi;
  2582. struct fuse *f = req_fuse_prepare(req);
  2583. dh = (struct fuse_dh *)calloc(1,sizeof(struct fuse_dh));
  2584. if(dh == NULL)
  2585. {
  2586. reply_err(req,-ENOMEM);
  2587. return;
  2588. }
  2589. fuse_dirents_init(&dh->d);
  2590. fuse_mutex_init(&dh->lock);
  2591. llfi->fh = (uintptr_t)dh;
  2592. memset(&fi,0,sizeof(fi));
  2593. fi.flags = llfi->flags;
  2594. err = get_path(f,ino,&path);
  2595. if(!err)
  2596. {
  2597. err = fuse_fs_opendir(f->fs,path,&fi);
  2598. dh->fh = fi.fh;
  2599. llfi->keep_cache = fi.keep_cache;
  2600. llfi->cache_readdir = fi.cache_readdir;
  2601. }
  2602. if(!err)
  2603. {
  2604. if(fuse_reply_open(req,llfi) == -ENOENT)
  2605. {
  2606. /* The opendir syscall was interrupted,so it
  2607. must be cancelled */
  2608. fuse_fs_releasedir(f->fs,&fi);
  2609. pthread_mutex_destroy(&dh->lock);
  2610. free(dh);
  2611. }
  2612. }
  2613. else
  2614. {
  2615. reply_err(req,err);
  2616. pthread_mutex_destroy(&dh->lock);
  2617. free(dh);
  2618. }
  2619. free_path(f,ino,path);
  2620. }
  2621. static
  2622. size_t
  2623. readdir_buf_size(fuse_dirents_t *d_,
  2624. size_t size_,
  2625. off_t off_)
  2626. {
  2627. if(off_ >= kv_size(d_->offs))
  2628. return 0;
  2629. if((kv_A(d_->offs,off_) + size_) > d_->data_len)
  2630. return (d_->data_len - kv_A(d_->offs,off_));
  2631. return size_;
  2632. }
  2633. static
  2634. char*
  2635. readdir_buf(fuse_dirents_t *d_,
  2636. off_t off_)
  2637. {
  2638. return &d_->buf[kv_A(d_->offs,off_)];
  2639. }
  2640. static
  2641. void
  2642. fuse_lib_readdir(fuse_req_t req_,
  2643. fuse_ino_t ino_,
  2644. size_t size_,
  2645. off_t off_,
  2646. fuse_file_info_t *llffi_)
  2647. {
  2648. int rv;
  2649. struct fuse *f;
  2650. fuse_dirents_t *d;
  2651. struct fuse_dh *dh;
  2652. fuse_file_info_t fi;
  2653. f = req_fuse_prepare(req_);
  2654. dh = get_dirhandle(llffi_,&fi);
  2655. d = &dh->d;
  2656. pthread_mutex_lock(&dh->lock);
  2657. rv = 0;
  2658. if((off_ == 0) || (d->data_len == 0))
  2659. rv = fuse_fs_readdir(f->fs,&fi,d);
  2660. if(rv)
  2661. {
  2662. reply_err(req_,rv);
  2663. goto out;
  2664. }
  2665. size_ = readdir_buf_size(d,size_,off_);
  2666. fuse_reply_buf(req_,
  2667. readdir_buf(d,off_),
  2668. size_);
  2669. out:
  2670. pthread_mutex_unlock(&dh->lock);
  2671. }
  2672. static
  2673. void
  2674. fuse_lib_readdir_plus(fuse_req_t req_,
  2675. fuse_ino_t ino_,
  2676. size_t size_,
  2677. off_t off_,
  2678. fuse_file_info_t *llffi_)
  2679. {
  2680. int rv;
  2681. struct fuse *f;
  2682. fuse_dirents_t *d;
  2683. struct fuse_dh *dh;
  2684. fuse_file_info_t fi;
  2685. f = req_fuse_prepare(req_);
  2686. dh = get_dirhandle(llffi_,&fi);
  2687. d = &dh->d;
  2688. pthread_mutex_lock(&dh->lock);
  2689. rv = 0;
  2690. if((off_ == 0) || (d->data_len == 0))
  2691. rv = fuse_fs_readdir_plus(f->fs,&fi,d);
  2692. if(rv)
  2693. {
  2694. reply_err(req_,rv);
  2695. goto out;
  2696. }
  2697. size_ = readdir_buf_size(d,size_,off_);
  2698. fuse_reply_buf(req_,
  2699. readdir_buf(d,off_),
  2700. size_);
  2701. out:
  2702. pthread_mutex_unlock(&dh->lock);
  2703. }
  2704. static
  2705. void
  2706. fuse_lib_releasedir(fuse_req_t req_,
  2707. fuse_ino_t ino_,
  2708. fuse_file_info_t *llfi_)
  2709. {
  2710. struct fuse *f;
  2711. struct fuse_dh *dh;
  2712. fuse_file_info_t fi;
  2713. f = req_fuse_prepare(req_);
  2714. dh = get_dirhandle(llfi_,&fi);
  2715. fuse_fs_releasedir(f->fs,&fi);
  2716. /* Done to keep race condition between last readdir reply and the unlock */
  2717. pthread_mutex_lock(&dh->lock);
  2718. pthread_mutex_unlock(&dh->lock);
  2719. pthread_mutex_destroy(&dh->lock);
  2720. fuse_dirents_free(&dh->d);
  2721. free(dh);
  2722. reply_err(req_,0);
  2723. }
  2724. static
  2725. void
  2726. fuse_lib_fsyncdir(fuse_req_t req,
  2727. fuse_ino_t ino,
  2728. int datasync,
  2729. fuse_file_info_t *llfi)
  2730. {
  2731. int err;
  2732. fuse_file_info_t fi;
  2733. struct fuse *f = req_fuse_prepare(req);
  2734. get_dirhandle(llfi,&fi);
  2735. err = fuse_fs_fsyncdir(f->fs,datasync,&fi);
  2736. reply_err(req,err);
  2737. }
  2738. static
  2739. void
  2740. fuse_lib_statfs(fuse_req_t req,
  2741. fuse_ino_t ino)
  2742. {
  2743. struct fuse *f = req_fuse_prepare(req);
  2744. struct statvfs buf;
  2745. char *path = NULL;
  2746. int err = 0;
  2747. memset(&buf,0,sizeof(buf));
  2748. if(ino)
  2749. err = get_path(f,ino,&path);
  2750. if(!err)
  2751. {
  2752. err = fuse_fs_statfs(f->fs,path ? path : "/",&buf);
  2753. free_path(f,ino,path);
  2754. }
  2755. if(!err)
  2756. fuse_reply_statfs(req,&buf);
  2757. else
  2758. reply_err(req,err);
  2759. }
  2760. static
  2761. void
  2762. fuse_lib_setxattr(fuse_req_t req,
  2763. fuse_ino_t ino,
  2764. const char *name,
  2765. const char *value,
  2766. size_t size,
  2767. int flags)
  2768. {
  2769. struct fuse *f = req_fuse_prepare(req);
  2770. char *path;
  2771. int err;
  2772. err = get_path(f,ino,&path);
  2773. if(!err)
  2774. {
  2775. err = fuse_fs_setxattr(f->fs,path,name,value,size,flags);
  2776. free_path(f,ino,path);
  2777. }
  2778. reply_err(req,err);
  2779. }
  2780. static
  2781. int
  2782. common_getxattr(struct fuse *f,
  2783. fuse_req_t req,
  2784. fuse_ino_t ino,
  2785. const char *name,
  2786. char *value,
  2787. size_t size)
  2788. {
  2789. int err;
  2790. char *path;
  2791. err = get_path(f,ino,&path);
  2792. if(!err)
  2793. {
  2794. err = fuse_fs_getxattr(f->fs,path,name,value,size);
  2795. free_path(f,ino,path);
  2796. }
  2797. return err;
  2798. }
  2799. static
  2800. void
  2801. fuse_lib_getxattr(fuse_req_t req,
  2802. fuse_ino_t ino,
  2803. const char *name,
  2804. size_t size)
  2805. {
  2806. struct fuse *f = req_fuse_prepare(req);
  2807. int res;
  2808. if(size)
  2809. {
  2810. char *value = (char *)malloc(size);
  2811. if(value == NULL)
  2812. {
  2813. reply_err(req,-ENOMEM);
  2814. return;
  2815. }
  2816. res = common_getxattr(f,req,ino,name,value,size);
  2817. if(res > 0)
  2818. fuse_reply_buf(req,value,res);
  2819. else
  2820. reply_err(req,res);
  2821. free(value);
  2822. }
  2823. else
  2824. {
  2825. res = common_getxattr(f,req,ino,name,NULL,0);
  2826. if(res >= 0)
  2827. fuse_reply_xattr(req,res);
  2828. else
  2829. reply_err(req,res);
  2830. }
  2831. }
  2832. static
  2833. int
  2834. common_listxattr(struct fuse *f,
  2835. fuse_req_t req,
  2836. fuse_ino_t ino,
  2837. char *list,
  2838. size_t size)
  2839. {
  2840. char *path;
  2841. int err;
  2842. err = get_path(f,ino,&path);
  2843. if(!err)
  2844. {
  2845. err = fuse_fs_listxattr(f->fs,path,list,size);
  2846. free_path(f,ino,path);
  2847. }
  2848. return err;
  2849. }
  2850. static
  2851. void
  2852. fuse_lib_listxattr(fuse_req_t req,
  2853. fuse_ino_t ino,
  2854. size_t size)
  2855. {
  2856. struct fuse *f = req_fuse_prepare(req);
  2857. int res;
  2858. if(size)
  2859. {
  2860. char *list = (char *)malloc(size);
  2861. if(list == NULL)
  2862. {
  2863. reply_err(req,-ENOMEM);
  2864. return;
  2865. }
  2866. res = common_listxattr(f,req,ino,list,size);
  2867. if(res > 0)
  2868. fuse_reply_buf(req,list,res);
  2869. else
  2870. reply_err(req,res);
  2871. free(list);
  2872. }
  2873. else
  2874. {
  2875. res = common_listxattr(f,req,ino,NULL,0);
  2876. if(res >= 0)
  2877. fuse_reply_xattr(req,res);
  2878. else
  2879. reply_err(req,res);
  2880. }
  2881. }
  2882. static
  2883. void
  2884. fuse_lib_removexattr(fuse_req_t req,
  2885. fuse_ino_t ino,
  2886. const char *name)
  2887. {
  2888. struct fuse *f = req_fuse_prepare(req);
  2889. char *path;
  2890. int err;
  2891. err = get_path(f,ino,&path);
  2892. if(!err)
  2893. {
  2894. err = fuse_fs_removexattr(f->fs,path,name);
  2895. free_path(f,ino,path);
  2896. }
  2897. reply_err(req,err);
  2898. }
  2899. static
  2900. void
  2901. fuse_lib_copy_file_range(fuse_req_t req_,
  2902. fuse_ino_t nodeid_in_,
  2903. off_t off_in_,
  2904. fuse_file_info_t *ffi_in_,
  2905. fuse_ino_t nodeid_out_,
  2906. off_t off_out_,
  2907. fuse_file_info_t *ffi_out_,
  2908. size_t len_,
  2909. int flags_)
  2910. {
  2911. ssize_t rv;
  2912. struct fuse *f;
  2913. f = req_fuse_prepare(req_);
  2914. rv = fuse_fs_copy_file_range(f->fs,
  2915. ffi_in_,
  2916. off_in_,
  2917. ffi_out_,
  2918. off_out_,
  2919. len_,
  2920. flags_);
  2921. if(rv >= 0)
  2922. fuse_reply_write(req_,rv);
  2923. else
  2924. reply_err(req_,rv);
  2925. }
  2926. static
  2927. struct lock*
  2928. locks_conflict(struct node *node,
  2929. const struct lock *lock)
  2930. {
  2931. struct lock *l;
  2932. for(l = node->locks; l; l = l->next)
  2933. if(l->owner != lock->owner &&
  2934. lock->start <= l->end && l->start <= lock->end &&
  2935. (l->type == F_WRLCK || lock->type == F_WRLCK))
  2936. break;
  2937. return l;
  2938. }
  2939. static
  2940. void
  2941. delete_lock(struct lock **lockp)
  2942. {
  2943. struct lock *l = *lockp;
  2944. *lockp = l->next;
  2945. free(l);
  2946. }
  2947. static
  2948. void
  2949. insert_lock(struct lock **pos,
  2950. struct lock *lock)
  2951. {
  2952. lock->next = *pos;
  2953. *pos = lock;
  2954. }
  2955. static
  2956. int
  2957. locks_insert(struct node *node,
  2958. struct lock *lock)
  2959. {
  2960. struct lock **lp;
  2961. struct lock *newl1 = NULL;
  2962. struct lock *newl2 = NULL;
  2963. if(lock->type != F_UNLCK || lock->start != 0 || lock->end != OFFSET_MAX)
  2964. {
  2965. newl1 = malloc(sizeof(struct lock));
  2966. newl2 = malloc(sizeof(struct lock));
  2967. if(!newl1 || !newl2)
  2968. {
  2969. free(newl1);
  2970. free(newl2);
  2971. return -ENOLCK;
  2972. }
  2973. }
  2974. for(lp = &node->locks; *lp;)
  2975. {
  2976. struct lock *l = *lp;
  2977. if(l->owner != lock->owner)
  2978. goto skip;
  2979. if(lock->type == l->type)
  2980. {
  2981. if(l->end < lock->start - 1)
  2982. goto skip;
  2983. if(lock->end < l->start - 1)
  2984. break;
  2985. if(l->start <= lock->start && lock->end <= l->end)
  2986. goto out;
  2987. if(l->start < lock->start)
  2988. lock->start = l->start;
  2989. if(lock->end < l->end)
  2990. lock->end = l->end;
  2991. goto delete;
  2992. }
  2993. else
  2994. {
  2995. if(l->end < lock->start)
  2996. goto skip;
  2997. if(lock->end < l->start)
  2998. break;
  2999. if(lock->start <= l->start && l->end <= lock->end)
  3000. goto delete;
  3001. if(l->end <= lock->end)
  3002. {
  3003. l->end = lock->start - 1;
  3004. goto skip;
  3005. }
  3006. if(lock->start <= l->start)
  3007. {
  3008. l->start = lock->end + 1;
  3009. break;
  3010. }
  3011. *newl2 = *l;
  3012. newl2->start = lock->end + 1;
  3013. l->end = lock->start - 1;
  3014. insert_lock(&l->next,newl2);
  3015. newl2 = NULL;
  3016. }
  3017. skip:
  3018. lp = &l->next;
  3019. continue;
  3020. delete:
  3021. delete_lock(lp);
  3022. }
  3023. if(lock->type != F_UNLCK)
  3024. {
  3025. *newl1 = *lock;
  3026. insert_lock(lp,newl1);
  3027. newl1 = NULL;
  3028. }
  3029. out:
  3030. free(newl1);
  3031. free(newl2);
  3032. return 0;
  3033. }
  3034. static
  3035. void
  3036. flock_to_lock(struct flock *flock,
  3037. struct lock *lock)
  3038. {
  3039. memset(lock,0,sizeof(struct lock));
  3040. lock->type = flock->l_type;
  3041. lock->start = flock->l_start;
  3042. lock->end = flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
  3043. lock->pid = flock->l_pid;
  3044. }
  3045. static
  3046. void
  3047. lock_to_flock(struct lock *lock,
  3048. struct flock *flock)
  3049. {
  3050. flock->l_type = lock->type;
  3051. flock->l_start = lock->start;
  3052. flock->l_len = (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
  3053. flock->l_pid = lock->pid;
  3054. }
  3055. static
  3056. int
  3057. fuse_flush_common(struct fuse *f,
  3058. fuse_req_t req,
  3059. fuse_ino_t ino,
  3060. fuse_file_info_t *fi)
  3061. {
  3062. struct flock lock;
  3063. struct lock l;
  3064. int err;
  3065. int errlock;
  3066. memset(&lock,0,sizeof(lock));
  3067. lock.l_type = F_UNLCK;
  3068. lock.l_whence = SEEK_SET;
  3069. err = fuse_fs_flush(f->fs,fi);
  3070. errlock = fuse_fs_lock(f->fs,fi,F_SETLK,&lock);
  3071. if(errlock != -ENOSYS)
  3072. {
  3073. flock_to_lock(&lock,&l);
  3074. l.owner = fi->lock_owner;
  3075. pthread_mutex_lock(&f->lock);
  3076. locks_insert(get_node(f,ino),&l);
  3077. pthread_mutex_unlock(&f->lock);
  3078. /* if op.lock() is defined FLUSH is needed regardless
  3079. of op.flush() */
  3080. if(err == -ENOSYS)
  3081. err = 0;
  3082. }
  3083. return err;
  3084. }
  3085. static
  3086. void
  3087. fuse_lib_release(fuse_req_t req,
  3088. fuse_ino_t ino,
  3089. fuse_file_info_t *fi)
  3090. {
  3091. int err = 0;
  3092. struct fuse *f = req_fuse_prepare(req);
  3093. if(fi->flush)
  3094. {
  3095. err = fuse_flush_common(f,req,ino,fi);
  3096. if(err == -ENOSYS)
  3097. err = 0;
  3098. }
  3099. fuse_do_release(f,ino,fi);
  3100. reply_err(req,err);
  3101. }
  3102. static
  3103. void
  3104. fuse_lib_flush(fuse_req_t req,
  3105. fuse_ino_t ino,
  3106. fuse_file_info_t *fi)
  3107. {
  3108. int err;
  3109. struct fuse *f = req_fuse_prepare(req);
  3110. err = fuse_flush_common(f,req,ino,fi);
  3111. reply_err(req,err);
  3112. }
  3113. static
  3114. int
  3115. fuse_lock_common(fuse_req_t req,
  3116. fuse_ino_t ino,
  3117. fuse_file_info_t *fi,
  3118. struct flock *lock,
  3119. int cmd)
  3120. {
  3121. int err;
  3122. struct fuse *f = req_fuse_prepare(req);
  3123. err = fuse_fs_lock(f->fs,fi,cmd,lock);
  3124. return err;
  3125. }
  3126. static
  3127. void
  3128. fuse_lib_getlk(fuse_req_t req,
  3129. fuse_ino_t ino,
  3130. fuse_file_info_t *fi,
  3131. struct flock *lock)
  3132. {
  3133. int err;
  3134. struct lock l;
  3135. struct lock *conflict;
  3136. struct fuse *f = req_fuse(req);
  3137. flock_to_lock(lock,&l);
  3138. l.owner = fi->lock_owner;
  3139. pthread_mutex_lock(&f->lock);
  3140. conflict = locks_conflict(get_node(f,ino),&l);
  3141. if(conflict)
  3142. lock_to_flock(conflict,lock);
  3143. pthread_mutex_unlock(&f->lock);
  3144. if(!conflict)
  3145. err = fuse_lock_common(req,ino,fi,lock,F_GETLK);
  3146. else
  3147. err = 0;
  3148. if(!err)
  3149. fuse_reply_lock(req,lock);
  3150. else
  3151. reply_err(req,err);
  3152. }
  3153. static
  3154. void
  3155. fuse_lib_setlk(fuse_req_t req,
  3156. fuse_ino_t ino,
  3157. fuse_file_info_t *fi,
  3158. struct flock *lock,
  3159. int sleep)
  3160. {
  3161. int err = fuse_lock_common(req,ino,fi,lock,
  3162. sleep ? F_SETLKW : F_SETLK);
  3163. if(!err)
  3164. {
  3165. struct fuse *f = req_fuse(req);
  3166. struct lock l;
  3167. flock_to_lock(lock,&l);
  3168. l.owner = fi->lock_owner;
  3169. pthread_mutex_lock(&f->lock);
  3170. locks_insert(get_node(f,ino),&l);
  3171. pthread_mutex_unlock(&f->lock);
  3172. }
  3173. reply_err(req,err);
  3174. }
  3175. static
  3176. void
  3177. fuse_lib_flock(fuse_req_t req,
  3178. fuse_ino_t ino,
  3179. fuse_file_info_t *fi,
  3180. int op)
  3181. {
  3182. int err;
  3183. struct fuse *f = req_fuse_prepare(req);
  3184. err = fuse_fs_flock(f->fs,fi,op);
  3185. reply_err(req,err);
  3186. }
  3187. static
  3188. void
  3189. fuse_lib_bmap(fuse_req_t req,
  3190. fuse_ino_t ino,
  3191. size_t blocksize,
  3192. uint64_t idx)
  3193. {
  3194. int err;
  3195. char *path;
  3196. struct fuse *f = req_fuse_prepare(req);
  3197. err = get_path(f,ino,&path);
  3198. if(!err)
  3199. {
  3200. err = fuse_fs_bmap(f->fs,path,blocksize,&idx);
  3201. free_path(f,ino,path);
  3202. }
  3203. if(!err)
  3204. fuse_reply_bmap(req,idx);
  3205. else
  3206. reply_err(req,err);
  3207. }
  3208. static
  3209. void
  3210. fuse_lib_ioctl(fuse_req_t req,
  3211. fuse_ino_t ino,
  3212. unsigned long cmd,
  3213. void *arg,
  3214. fuse_file_info_t *llfi,
  3215. unsigned int flags,
  3216. const void *in_buf,
  3217. uint32_t in_bufsz,
  3218. uint32_t out_bufsz_)
  3219. {
  3220. int err;
  3221. char *out_buf = NULL;
  3222. struct fuse *f = req_fuse_prepare(req);
  3223. fuse_file_info_t fi;
  3224. uint32_t out_bufsz = out_bufsz_;
  3225. err = -EPERM;
  3226. if(flags & FUSE_IOCTL_UNRESTRICTED)
  3227. goto err;
  3228. if(flags & FUSE_IOCTL_DIR)
  3229. get_dirhandle(llfi,&fi);
  3230. else
  3231. fi = *llfi;
  3232. if(out_bufsz)
  3233. {
  3234. err = -ENOMEM;
  3235. out_buf = malloc(out_bufsz);
  3236. if(!out_buf)
  3237. goto err;
  3238. }
  3239. assert(!in_bufsz || !out_bufsz || in_bufsz == out_bufsz);
  3240. if(out_buf)
  3241. memcpy(out_buf,in_buf,in_bufsz);
  3242. err = fuse_fs_ioctl(f->fs,cmd,arg,&fi,flags,
  3243. out_buf ?: (void *)in_buf,&out_bufsz);
  3244. fuse_reply_ioctl(req,err,out_buf,out_bufsz);
  3245. goto out;
  3246. err:
  3247. reply_err(req,err);
  3248. out:
  3249. free(out_buf);
  3250. }
  3251. static
  3252. void
  3253. fuse_lib_poll(fuse_req_t req,
  3254. fuse_ino_t ino,
  3255. fuse_file_info_t *fi,
  3256. fuse_pollhandle_t *ph)
  3257. {
  3258. int err;
  3259. struct fuse *f = req_fuse_prepare(req);
  3260. unsigned revents = 0;
  3261. err = fuse_fs_poll(f->fs,fi,ph,&revents);
  3262. if(!err)
  3263. fuse_reply_poll(req,revents);
  3264. else
  3265. reply_err(req,err);
  3266. }
  3267. static
  3268. void
  3269. fuse_lib_fallocate(fuse_req_t req,
  3270. fuse_ino_t ino,
  3271. int mode,
  3272. off_t offset,
  3273. off_t length,
  3274. fuse_file_info_t *fi)
  3275. {
  3276. int err;
  3277. struct fuse *f = req_fuse_prepare(req);
  3278. err = fuse_fs_fallocate(f->fs,mode,offset,length,fi);
  3279. reply_err(req,err);
  3280. }
  3281. static
  3282. int
  3283. clean_delay(struct fuse *f)
  3284. {
  3285. /*
  3286. * This is calculating the delay between clean runs. To
  3287. * reduce the number of cleans we are doing them 10 times
  3288. * within the remember window.
  3289. */
  3290. int min_sleep = 60;
  3291. int max_sleep = 3600;
  3292. int sleep_time = f->conf.remember / 10;
  3293. if(sleep_time > max_sleep)
  3294. return max_sleep;
  3295. if(sleep_time < min_sleep)
  3296. return min_sleep;
  3297. return sleep_time;
  3298. }
  3299. int
  3300. fuse_clean_cache(struct fuse *f)
  3301. {
  3302. struct node_lru *lnode;
  3303. struct list_head *curr,*next;
  3304. struct node *node;
  3305. struct timespec now;
  3306. pthread_mutex_lock(&f->lock);
  3307. curr_time(&now);
  3308. for(curr = f->lru_table.next; curr != &f->lru_table; curr = next)
  3309. {
  3310. double age;
  3311. next = curr->next;
  3312. lnode = list_entry(curr,struct node_lru,lru);
  3313. node = &lnode->node;
  3314. age = diff_timespec(&now,&lnode->forget_time);
  3315. if(age <= f->conf.remember)
  3316. break;
  3317. assert(node->nlookup == 1);
  3318. /* Don't forget active directories */
  3319. if(node->refctr > 1)
  3320. continue;
  3321. node->nlookup = 0;
  3322. unhash_name(f,node);
  3323. unref_node(f,node);
  3324. }
  3325. pthread_mutex_unlock(&f->lock);
  3326. return clean_delay(f);
  3327. }
  3328. static struct fuse_lowlevel_ops fuse_path_ops =
  3329. {
  3330. .access = fuse_lib_access,
  3331. .bmap = fuse_lib_bmap,
  3332. .copy_file_range = fuse_lib_copy_file_range,
  3333. .create = fuse_lib_create,
  3334. .destroy = fuse_lib_destroy,
  3335. .fallocate = fuse_lib_fallocate,
  3336. .flock = fuse_lib_flock,
  3337. .flush = fuse_lib_flush,
  3338. .forget = fuse_lib_forget,
  3339. .forget_multi = fuse_lib_forget_multi,
  3340. .fsync = fuse_lib_fsync,
  3341. .fsyncdir = fuse_lib_fsyncdir,
  3342. .getattr = fuse_lib_getattr,
  3343. .getlk = fuse_lib_getlk,
  3344. .getxattr = fuse_lib_getxattr,
  3345. .init = fuse_lib_init,
  3346. .ioctl = fuse_lib_ioctl,
  3347. .link = fuse_lib_link,
  3348. .listxattr = fuse_lib_listxattr,
  3349. .lookup = fuse_lib_lookup,
  3350. .mkdir = fuse_lib_mkdir,
  3351. .mknod = fuse_lib_mknod,
  3352. .open = fuse_lib_open,
  3353. .opendir = fuse_lib_opendir,
  3354. .poll = fuse_lib_poll,
  3355. .read = fuse_lib_read,
  3356. .readdir = fuse_lib_readdir,
  3357. .readdir_plus = fuse_lib_readdir_plus,
  3358. .readlink = fuse_lib_readlink,
  3359. .release = fuse_lib_release,
  3360. .releasedir = fuse_lib_releasedir,
  3361. .removexattr = fuse_lib_removexattr,
  3362. .rename = fuse_lib_rename,
  3363. .retrieve_reply = NULL,
  3364. .rmdir = fuse_lib_rmdir,
  3365. .setattr = fuse_lib_setattr,
  3366. .setlk = fuse_lib_setlk,
  3367. .setxattr = fuse_lib_setxattr,
  3368. .statfs = fuse_lib_statfs,
  3369. .symlink = fuse_lib_symlink,
  3370. .unlink = fuse_lib_unlink,
  3371. .write_buf = fuse_lib_write_buf,
  3372. };
  3373. int
  3374. fuse_notify_poll(fuse_pollhandle_t *ph)
  3375. {
  3376. return fuse_lowlevel_notify_poll(ph);
  3377. }
  3378. static
  3379. void
  3380. free_cmd(struct fuse_cmd *cmd)
  3381. {
  3382. free(cmd->buf);
  3383. free(cmd);
  3384. }
  3385. void
  3386. fuse_process_cmd(struct fuse *f,
  3387. struct fuse_cmd *cmd)
  3388. {
  3389. fuse_session_process(f->se,cmd->buf,cmd->buflen,cmd->ch);
  3390. free_cmd(cmd);
  3391. }
  3392. int
  3393. fuse_exited(struct fuse *f)
  3394. {
  3395. return fuse_session_exited(f->se);
  3396. }
  3397. struct fuse_session*
  3398. fuse_get_session(struct fuse *f)
  3399. {
  3400. return f->se;
  3401. }
  3402. static
  3403. struct fuse_cmd*
  3404. fuse_alloc_cmd(size_t bufsize)
  3405. {
  3406. struct fuse_cmd *cmd = (struct fuse_cmd *)malloc(sizeof(*cmd));
  3407. if(cmd == NULL)
  3408. {
  3409. fprintf(stderr,"fuse: failed to allocate cmd\n");
  3410. return NULL;
  3411. }
  3412. cmd->buf = (char *)malloc(bufsize);
  3413. if(cmd->buf == NULL)
  3414. {
  3415. fprintf(stderr,"fuse: failed to allocate read buffer\n");
  3416. free(cmd);
  3417. return NULL;
  3418. }
  3419. return cmd;
  3420. }
  3421. struct fuse_cmd*
  3422. fuse_read_cmd(struct fuse *f)
  3423. {
  3424. struct fuse_chan *ch = fuse_session_next_chan(f->se,NULL);
  3425. size_t bufsize = fuse_chan_bufsize(ch);
  3426. struct fuse_cmd *cmd = fuse_alloc_cmd(bufsize);
  3427. if(cmd != NULL)
  3428. {
  3429. int res = fuse_chan_recv(&ch,cmd->buf,bufsize);
  3430. if(res <= 0)
  3431. {
  3432. free_cmd(cmd);
  3433. if(res < 0 && res != -EINTR && res != -EAGAIN)
  3434. fuse_exit(f);
  3435. return NULL;
  3436. }
  3437. cmd->buflen = res;
  3438. cmd->ch = ch;
  3439. }
  3440. return cmd;
  3441. }
  3442. void
  3443. fuse_exit(struct fuse *f)
  3444. {
  3445. fuse_session_exit(f->se);
  3446. }
  3447. struct fuse_context*
  3448. fuse_get_context(void)
  3449. {
  3450. return &fuse_get_context_internal()->ctx;
  3451. }
  3452. enum {
  3453. KEY_HELP,
  3454. };
  3455. #define FUSE_LIB_OPT(t,p,v) { t,offsetof(struct fuse_config,p),v }
  3456. static const struct fuse_opt fuse_lib_opts[] =
  3457. {
  3458. FUSE_OPT_KEY("-h", KEY_HELP),
  3459. FUSE_OPT_KEY("--help", KEY_HELP),
  3460. FUSE_OPT_KEY("debug", FUSE_OPT_KEY_KEEP),
  3461. FUSE_OPT_KEY("-d", FUSE_OPT_KEY_KEEP),
  3462. FUSE_LIB_OPT("debug", debug,1),
  3463. FUSE_LIB_OPT("-d", debug,1),
  3464. FUSE_LIB_OPT("umask=", set_mode,1),
  3465. FUSE_LIB_OPT("umask=%o", umask,0),
  3466. FUSE_LIB_OPT("uid=", set_uid,1),
  3467. FUSE_LIB_OPT("uid=%d", uid,0),
  3468. FUSE_LIB_OPT("gid=", set_gid,1),
  3469. FUSE_LIB_OPT("gid=%d", gid,0),
  3470. FUSE_LIB_OPT("noforget", remember,-1),
  3471. FUSE_LIB_OPT("remember=%u", remember,0),
  3472. FUSE_LIB_OPT("threads=%d", threads,0),
  3473. FUSE_LIB_OPT("use_ino", use_ino,1),
  3474. FUSE_OPT_END
  3475. };
  3476. static void fuse_lib_help(void)
  3477. {
  3478. fprintf(stderr,
  3479. " -o umask=M set file permissions (octal)\n"
  3480. " -o uid=N set file owner\n"
  3481. " -o gid=N set file group\n"
  3482. " -o noforget never forget cached inodes\n"
  3483. " -o remember=T remember cached inodes for T seconds (0s)\n"
  3484. " -o threads=NUM number of worker threads. 0 = autodetect.\n"
  3485. " Negative values autodetect then divide by\n"
  3486. " absolute value. default = 0\n"
  3487. "\n");
  3488. }
  3489. static
  3490. int
  3491. fuse_lib_opt_proc(void *data,
  3492. const char *arg,
  3493. int key,
  3494. struct fuse_args *outargs)
  3495. {
  3496. (void)arg; (void)outargs;
  3497. if(key == KEY_HELP)
  3498. {
  3499. struct fuse_config *conf = (struct fuse_config *)data;
  3500. fuse_lib_help();
  3501. conf->help = 1;
  3502. }
  3503. return 1;
  3504. }
  3505. int
  3506. fuse_is_lib_option(const char *opt)
  3507. {
  3508. return fuse_lowlevel_is_lib_option(opt) || fuse_opt_match(fuse_lib_opts,opt);
  3509. }
  3510. struct fuse_fs*
  3511. fuse_fs_new(const struct fuse_operations *op,
  3512. size_t op_size)
  3513. {
  3514. struct fuse_fs *fs;
  3515. if(sizeof(struct fuse_operations) < op_size)
  3516. {
  3517. fprintf(stderr,"fuse: warning: library too old,some operations may not not work\n");
  3518. op_size = sizeof(struct fuse_operations);
  3519. }
  3520. fs = (struct fuse_fs *)calloc(1,sizeof(struct fuse_fs));
  3521. if(!fs)
  3522. {
  3523. fprintf(stderr,"fuse: failed to allocate fuse_fs object\n");
  3524. return NULL;
  3525. }
  3526. if(op)
  3527. memcpy(&fs->op,op,op_size);
  3528. return fs;
  3529. }
  3530. static
  3531. int
  3532. node_table_init(struct node_table *t)
  3533. {
  3534. t->size = NODE_TABLE_MIN_SIZE;
  3535. t->array = (struct node **)calloc(1,sizeof(struct node *) * t->size);
  3536. if(t->array == NULL)
  3537. {
  3538. fprintf(stderr,"fuse: memory allocation failed\n");
  3539. return -1;
  3540. }
  3541. t->use = 0;
  3542. t->split = 0;
  3543. return 0;
  3544. }
  3545. static
  3546. void*
  3547. fuse_prune_nodes(void *fuse)
  3548. {
  3549. struct fuse *f = fuse;
  3550. int sleep_time;
  3551. while(1)
  3552. {
  3553. sleep_time = fuse_clean_cache(f);
  3554. sleep(sleep_time);
  3555. }
  3556. return NULL;
  3557. }
  3558. int
  3559. fuse_start_cleanup_thread(struct fuse *f)
  3560. {
  3561. if(lru_enabled(f))
  3562. return fuse_start_thread(&f->prune_thread,fuse_prune_nodes,f);
  3563. return 0;
  3564. }
  3565. void
  3566. fuse_stop_cleanup_thread(struct fuse *f)
  3567. {
  3568. if(lru_enabled(f))
  3569. {
  3570. pthread_mutex_lock(&f->lock);
  3571. pthread_cancel(f->prune_thread);
  3572. pthread_mutex_unlock(&f->lock);
  3573. pthread_join(f->prune_thread,NULL);
  3574. }
  3575. }
  3576. struct fuse*
  3577. fuse_new_common(struct fuse_chan *ch,
  3578. struct fuse_args *args,
  3579. const struct fuse_operations *op,
  3580. size_t op_size)
  3581. {
  3582. struct fuse *f;
  3583. struct node *root;
  3584. struct fuse_fs *fs;
  3585. struct fuse_lowlevel_ops llop = fuse_path_ops;
  3586. if(fuse_create_context_key() == -1)
  3587. goto out;
  3588. f = (struct fuse *)calloc(1,sizeof(struct fuse));
  3589. if(f == NULL)
  3590. {
  3591. fprintf(stderr,"fuse: failed to allocate fuse object\n");
  3592. goto out_delete_context_key;
  3593. }
  3594. fs = fuse_fs_new(op,op_size);
  3595. if(!fs)
  3596. goto out_free;
  3597. f->fs = fs;
  3598. /* Oh f**k,this is ugly! */
  3599. if(!fs->op.lock)
  3600. {
  3601. llop.getlk = NULL;
  3602. llop.setlk = NULL;
  3603. }
  3604. f->pagesize = getpagesize();
  3605. init_list_head(&f->partial_slabs);
  3606. init_list_head(&f->full_slabs);
  3607. init_list_head(&f->lru_table);
  3608. if(fuse_opt_parse(args,&f->conf,fuse_lib_opts,fuse_lib_opt_proc) == -1)
  3609. goto out_free_fs;
  3610. f->se = fuse_lowlevel_new_common(args,&llop,sizeof(llop),f);
  3611. if(f->se == NULL)
  3612. goto out_free_fs;
  3613. fuse_session_add_chan(f->se,ch);
  3614. /* Trace topmost layer by default */
  3615. srand(time(NULL));
  3616. f->ctr = 0;
  3617. f->generation = rand64();
  3618. if(node_table_init(&f->name_table) == -1)
  3619. goto out_free_session;
  3620. if(node_table_init(&f->id_table) == -1)
  3621. goto out_free_name_table;
  3622. fuse_mutex_init(&f->lock);
  3623. root = alloc_node(f);
  3624. if(root == NULL)
  3625. {
  3626. fprintf(stderr,"fuse: memory allocation failed\n");
  3627. goto out_free_id_table;
  3628. }
  3629. if(lru_enabled(f))
  3630. {
  3631. struct node_lru *lnode = node_lru(root);
  3632. init_list_head(&lnode->lru);
  3633. }
  3634. strcpy(root->inline_name,"/");
  3635. root->name = root->inline_name;
  3636. root->parent = NULL;
  3637. root->nodeid = FUSE_ROOT_ID;
  3638. inc_nlookup(root);
  3639. hash_id(f,root);
  3640. return f;
  3641. out_free_id_table:
  3642. free(f->id_table.array);
  3643. out_free_name_table:
  3644. free(f->name_table.array);
  3645. out_free_session:
  3646. fuse_session_destroy(f->se);
  3647. out_free_fs:
  3648. /* Horrible compatibility hack to stop the destructor from being
  3649. called on the filesystem without init being called first */
  3650. fs->op.destroy = NULL;
  3651. fuse_fs_destroy(f->fs);
  3652. out_free:
  3653. free(f);
  3654. out_delete_context_key:
  3655. fuse_delete_context_key();
  3656. out:
  3657. return NULL;
  3658. }
  3659. struct fuse*
  3660. fuse_new(struct fuse_chan *ch,
  3661. struct fuse_args *args,
  3662. const struct fuse_operations *op,
  3663. size_t op_size)
  3664. {
  3665. return fuse_new_common(ch,args,op,op_size);
  3666. }
  3667. void
  3668. fuse_destroy(struct fuse *f)
  3669. {
  3670. size_t i;
  3671. if(f->fs)
  3672. {
  3673. struct fuse_context_i *c = fuse_get_context_internal();
  3674. memset(c,0,sizeof(*c));
  3675. c->ctx.fuse = f;
  3676. for(i = 0; i < f->id_table.size; i++)
  3677. {
  3678. struct node *node;
  3679. for(node = f->id_table.array[i]; node != NULL; node = node->id_next)
  3680. {
  3681. if(node->is_hidden)
  3682. fuse_fs_free_hide(f->fs,node->hidden_fh);
  3683. }
  3684. }
  3685. }
  3686. for(i = 0; i < f->id_table.size; i++)
  3687. {
  3688. struct node *node;
  3689. struct node *next;
  3690. for(node = f->id_table.array[i]; node != NULL; node = next)
  3691. {
  3692. next = node->id_next;
  3693. free_node(f,node);
  3694. f->id_table.use--;
  3695. }
  3696. }
  3697. assert(list_empty(&f->partial_slabs));
  3698. assert(list_empty(&f->full_slabs));
  3699. free(f->id_table.array);
  3700. free(f->name_table.array);
  3701. pthread_mutex_destroy(&f->lock);
  3702. fuse_session_destroy(f->se);
  3703. free(f);
  3704. fuse_delete_context_key();
  3705. }
  3706. int
  3707. fuse_config_num_threads(const struct fuse *fuse_)
  3708. {
  3709. return fuse_->conf.threads;
  3710. }