You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4553 lines
102 KiB

  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. /* For pthread_rwlock_t */
  8. #define _GNU_SOURCE
  9. #include "config.h"
  10. #include "fuse_i.h"
  11. #include "fuse_lowlevel.h"
  12. #include "fuse_opt.h"
  13. #include "fuse_misc.h"
  14. #include "fuse_kernel.h"
  15. #include "fuse_dirents.h"
  16. #include <assert.h>
  17. #include <dlfcn.h>
  18. #include <errno.h>
  19. #include <fcntl.h>
  20. #include <limits.h>
  21. #include <poll.h>
  22. #include <signal.h>
  23. #include <stdbool.h>
  24. #include <stddef.h>
  25. #include <stdint.h>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <sys/file.h>
  30. #include <sys/mman.h>
  31. #include <sys/param.h>
  32. #include <sys/time.h>
  33. #include <sys/uio.h>
  34. #include <time.h>
  35. #include <unistd.h>
  36. #define FUSE_NODE_SLAB 1
  37. #ifndef MAP_ANONYMOUS
  38. #undef FUSE_NODE_SLAB
  39. #endif
  40. #define FUSE_DEFAULT_INTR_SIGNAL SIGUSR1
  41. #define FUSE_UNKNOWN_INO UINT64_MAX
  42. #define OFFSET_MAX 0x7fffffffffffffffLL
  43. #define NODE_TABLE_MIN_SIZE 8192
  44. struct fuse_config
  45. {
  46. unsigned int uid;
  47. unsigned int gid;
  48. unsigned int umask;
  49. int remember;
  50. int debug;
  51. int use_ino;
  52. int set_mode;
  53. int set_uid;
  54. int set_gid;
  55. int intr;
  56. int intr_signal;
  57. int help;
  58. int threads;
  59. };
  60. struct fuse_fs
  61. {
  62. struct fuse_operations op;
  63. void *user_data;
  64. int debug;
  65. };
  66. struct lock_queue_element
  67. {
  68. struct lock_queue_element *next;
  69. pthread_cond_t cond;
  70. fuse_ino_t nodeid1;
  71. const char *name1;
  72. char **path1;
  73. struct node **wnode1;
  74. fuse_ino_t nodeid2;
  75. const char *name2;
  76. char **path2;
  77. struct node **wnode2;
  78. int err;
  79. bool first_locked : 1;
  80. bool second_locked : 1;
  81. bool done : 1;
  82. };
  83. struct node_table
  84. {
  85. struct node **array;
  86. size_t use;
  87. size_t size;
  88. size_t split;
  89. };
  90. #define container_of(ptr, type, member) ({ \
  91. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  92. (type *)( (char *)__mptr - offsetof(type,member) );})
  93. #define list_entry(ptr, type, member) \
  94. container_of(ptr, type, member)
  95. struct list_head
  96. {
  97. struct list_head *next;
  98. struct list_head *prev;
  99. };
  100. struct node_slab
  101. {
  102. struct list_head list; /* must be the first member */
  103. struct list_head freelist;
  104. int used;
  105. };
  106. struct fuse
  107. {
  108. struct fuse_session *se;
  109. struct node_table name_table;
  110. struct node_table id_table;
  111. struct list_head lru_table;
  112. fuse_ino_t ctr;
  113. uint64_t generation;
  114. unsigned int hidectr;
  115. pthread_mutex_t lock;
  116. struct fuse_config conf;
  117. int intr_installed;
  118. struct fuse_fs *fs;
  119. struct lock_queue_element *lockq;
  120. int pagesize;
  121. struct list_head partial_slabs;
  122. struct list_head full_slabs;
  123. pthread_t prune_thread;
  124. };
  125. struct lock
  126. {
  127. int type;
  128. off_t start;
  129. off_t end;
  130. pid_t pid;
  131. uint64_t owner;
  132. struct lock *next;
  133. };
  134. struct node
  135. {
  136. struct node *name_next;
  137. struct node *id_next;
  138. fuse_ino_t nodeid;
  139. uint64_t generation;
  140. int refctr;
  141. struct node *parent;
  142. char *name;
  143. uint64_t nlookup;
  144. int open_count;
  145. struct lock *locks;
  146. uint64_t hidden_fh;
  147. char is_hidden;
  148. int treelock;
  149. struct stat stat_cache;
  150. char stat_cache_valid;
  151. char inline_name[32];
  152. };
  153. #define TREELOCK_WRITE -1
  154. #define TREELOCK_WAIT_OFFSET INT_MIN
  155. struct node_lru
  156. {
  157. struct node node;
  158. struct list_head lru;
  159. struct timespec forget_time;
  160. };
  161. struct fuse_dh
  162. {
  163. pthread_mutex_t lock;
  164. uint64_t fh;
  165. fuse_dirents_t d;
  166. };
  167. struct fuse_context_i
  168. {
  169. struct fuse_context ctx;
  170. fuse_req_t req;
  171. };
  172. static pthread_key_t fuse_context_key;
  173. static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
  174. static int fuse_context_ref;
  175. static
  176. void
  177. init_list_head(struct list_head *list)
  178. {
  179. list->next = list;
  180. list->prev = list;
  181. }
  182. static
  183. int
  184. list_empty(const struct list_head *head)
  185. {
  186. return head->next == head;
  187. }
  188. static
  189. void
  190. list_add(struct list_head *new,
  191. struct list_head *prev,
  192. struct list_head *next)
  193. {
  194. next->prev = new;
  195. new->next = next;
  196. new->prev = prev;
  197. prev->next = new;
  198. }
  199. static
  200. inline
  201. void
  202. list_add_head(struct list_head *new,
  203. struct list_head *head)
  204. {
  205. list_add(new, head, head->next);
  206. }
  207. static
  208. inline
  209. void
  210. list_add_tail(struct list_head *new,
  211. struct list_head *head)
  212. {
  213. list_add(new, head->prev, head);
  214. }
  215. static
  216. inline
  217. void
  218. list_del(struct list_head *entry)
  219. {
  220. struct list_head *prev = entry->prev;
  221. struct list_head *next = entry->next;
  222. next->prev = prev;
  223. prev->next = next;
  224. }
  225. static
  226. inline
  227. int
  228. lru_enabled(struct fuse *f)
  229. {
  230. return f->conf.remember > 0;
  231. }
  232. static
  233. struct
  234. node_lru*
  235. node_lru(struct node *node)
  236. {
  237. return (struct node_lru*)node;
  238. }
  239. static
  240. size_t
  241. get_node_size(struct fuse *f)
  242. {
  243. if (lru_enabled(f))
  244. return sizeof(struct node_lru);
  245. else
  246. return sizeof(struct node);
  247. }
  248. #ifdef FUSE_NODE_SLAB
  249. static
  250. struct node_slab*
  251. list_to_slab(struct list_head *head)
  252. {
  253. return (struct node_slab *) head;
  254. }
  255. static
  256. struct node_slab*
  257. node_to_slab(struct fuse *f, struct node *node)
  258. {
  259. return (struct node_slab *) (((uintptr_t) node) & ~((uintptr_t) f->pagesize - 1));
  260. }
  261. static
  262. int
  263. alloc_slab(struct fuse *f)
  264. {
  265. void *mem;
  266. struct node_slab *slab;
  267. char *start;
  268. size_t num;
  269. size_t i;
  270. size_t node_size = get_node_size(f);
  271. mem = mmap(NULL, f->pagesize, PROT_READ | PROT_WRITE,
  272. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  273. if (mem == MAP_FAILED)
  274. return -1;
  275. slab = mem;
  276. init_list_head(&slab->freelist);
  277. slab->used = 0;
  278. num = (f->pagesize - sizeof(struct node_slab)) / node_size;
  279. start = (char *) mem + f->pagesize - num * node_size;
  280. for (i = 0; i < num; i++) {
  281. struct list_head *n;
  282. n = (struct list_head *) (start + i * node_size);
  283. list_add_tail(n, &slab->freelist);
  284. }
  285. list_add_tail(&slab->list, &f->partial_slabs);
  286. return 0;
  287. }
  288. static
  289. struct node*
  290. alloc_node(struct fuse *f)
  291. {
  292. struct node_slab *slab;
  293. struct list_head *node;
  294. if (list_empty(&f->partial_slabs)) {
  295. int res = alloc_slab(f);
  296. if (res != 0)
  297. return NULL;
  298. }
  299. slab = list_to_slab(f->partial_slabs.next);
  300. slab->used++;
  301. node = slab->freelist.next;
  302. list_del(node);
  303. if (list_empty(&slab->freelist)) {
  304. list_del(&slab->list);
  305. list_add_tail(&slab->list, &f->full_slabs);
  306. }
  307. memset(node, 0, sizeof(struct node));
  308. return (struct node *) node;
  309. }
  310. static
  311. void
  312. free_slab(struct fuse *f,
  313. struct node_slab *slab)
  314. {
  315. int res;
  316. list_del(&slab->list);
  317. res = munmap(slab, f->pagesize);
  318. if (res == -1)
  319. fprintf(stderr, "fuse warning: munmap(%p) failed\n", slab);
  320. }
  321. static
  322. void
  323. free_node_mem(struct fuse *f,
  324. struct node *node)
  325. {
  326. struct node_slab *slab = node_to_slab(f, node);
  327. struct list_head *n = (struct list_head *) node;
  328. slab->used--;
  329. if (slab->used) {
  330. if (list_empty(&slab->freelist)) {
  331. list_del(&slab->list);
  332. list_add_tail(&slab->list, &f->partial_slabs);
  333. }
  334. list_add_head(n, &slab->freelist);
  335. } else {
  336. free_slab(f, slab);
  337. }
  338. }
  339. #else
  340. static
  341. struct node*
  342. alloc_node(struct fuse *f)
  343. {
  344. return (struct node *) calloc(1, get_node_size(f));
  345. }
  346. static
  347. void
  348. free_node_mem(struct fuse *f,
  349. struct node *node)
  350. {
  351. (void) f;
  352. free(node);
  353. }
  354. #endif
  355. static
  356. size_t
  357. id_hash(struct fuse *f,
  358. fuse_ino_t ino)
  359. {
  360. uint64_t hash = ((uint32_t) ino * 2654435761U) % f->id_table.size;
  361. uint64_t oldhash = hash % (f->id_table.size / 2);
  362. if (oldhash >= f->id_table.split)
  363. return oldhash;
  364. else
  365. return hash;
  366. }
  367. static
  368. struct node*
  369. get_node_nocheck(struct fuse *f,
  370. fuse_ino_t nodeid)
  371. {
  372. size_t hash = id_hash(f, nodeid);
  373. struct node *node;
  374. for (node = f->id_table.array[hash]; node != NULL; node = node->id_next)
  375. if (node->nodeid == nodeid)
  376. return node;
  377. return NULL;
  378. }
  379. static
  380. struct node*
  381. get_node(struct fuse *f,
  382. const fuse_ino_t nodeid)
  383. {
  384. struct node *node = get_node_nocheck(f, nodeid);
  385. if(!node)
  386. {
  387. fprintf(stderr, "fuse internal error: node %llu not found\n",
  388. (unsigned long long) nodeid);
  389. abort();
  390. }
  391. return node;
  392. }
  393. static void curr_time(struct timespec *now);
  394. static double diff_timespec(const struct timespec *t1,
  395. const struct timespec *t2);
  396. static
  397. void
  398. remove_node_lru(struct node *node)
  399. {
  400. struct node_lru *lnode = node_lru(node);
  401. list_del(&lnode->lru);
  402. init_list_head(&lnode->lru);
  403. }
  404. static
  405. void
  406. set_forget_time(struct fuse *f,
  407. struct node *node)
  408. {
  409. struct node_lru *lnode = node_lru(node);
  410. list_del(&lnode->lru);
  411. list_add_tail(&lnode->lru, &f->lru_table);
  412. curr_time(&lnode->forget_time);
  413. }
  414. static
  415. void
  416. free_node(struct fuse *f_,
  417. struct node *node_)
  418. {
  419. if(node_->name != node_->inline_name)
  420. free(node_->name);
  421. if(node_->is_hidden)
  422. fuse_fs_free_hide(f_->fs,node_->hidden_fh);
  423. free_node_mem(f_,node_);
  424. }
  425. static
  426. void
  427. node_table_reduce(struct node_table *t)
  428. {
  429. size_t newsize = t->size / 2;
  430. void *newarray;
  431. if (newsize < NODE_TABLE_MIN_SIZE)
  432. return;
  433. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  434. if (newarray != NULL)
  435. t->array = newarray;
  436. t->size = newsize;
  437. t->split = t->size / 2;
  438. }
  439. static
  440. void
  441. remerge_id(struct fuse *f)
  442. {
  443. struct node_table *t = &f->id_table;
  444. int iter;
  445. if (t->split == 0)
  446. node_table_reduce(t);
  447. for (iter = 8; t->split > 0 && iter; iter--) {
  448. struct node **upper;
  449. t->split--;
  450. upper = &t->array[t->split + t->size / 2];
  451. if (*upper) {
  452. struct node **nodep;
  453. for (nodep = &t->array[t->split]; *nodep;
  454. nodep = &(*nodep)->id_next);
  455. *nodep = *upper;
  456. *upper = NULL;
  457. break;
  458. }
  459. }
  460. }
  461. static
  462. void
  463. unhash_id(struct fuse *f, struct node *node)
  464. {
  465. struct node **nodep = &f->id_table.array[id_hash(f, node->nodeid)];
  466. for (; *nodep != NULL; nodep = &(*nodep)->id_next)
  467. if (*nodep == node) {
  468. *nodep = node->id_next;
  469. f->id_table.use--;
  470. if(f->id_table.use < f->id_table.size / 4)
  471. remerge_id(f);
  472. return;
  473. }
  474. }
  475. static int node_table_resize(struct node_table *t)
  476. {
  477. size_t newsize = t->size * 2;
  478. void *newarray;
  479. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  480. if (newarray == NULL)
  481. return -1;
  482. t->array = newarray;
  483. memset(t->array + t->size, 0, t->size * sizeof(struct node *));
  484. t->size = newsize;
  485. t->split = 0;
  486. return 0;
  487. }
  488. static void rehash_id(struct fuse *f)
  489. {
  490. struct node_table *t = &f->id_table;
  491. struct node **nodep;
  492. struct node **next;
  493. size_t hash;
  494. if (t->split == t->size / 2)
  495. return;
  496. hash = t->split;
  497. t->split++;
  498. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  499. struct node *node = *nodep;
  500. size_t newhash = id_hash(f, node->nodeid);
  501. if (newhash != hash) {
  502. next = nodep;
  503. *nodep = node->id_next;
  504. node->id_next = t->array[newhash];
  505. t->array[newhash] = node;
  506. } else {
  507. next = &node->id_next;
  508. }
  509. }
  510. if (t->split == t->size / 2)
  511. node_table_resize(t);
  512. }
  513. static void hash_id(struct fuse *f, struct node *node)
  514. {
  515. size_t hash = id_hash(f, node->nodeid);
  516. node->id_next = f->id_table.array[hash];
  517. f->id_table.array[hash] = node;
  518. f->id_table.use++;
  519. if (f->id_table.use >= f->id_table.size / 2)
  520. rehash_id(f);
  521. }
  522. static size_t name_hash(struct fuse *f, fuse_ino_t parent,
  523. const char *name)
  524. {
  525. uint64_t hash = parent;
  526. uint64_t oldhash;
  527. for (; *name; name++)
  528. hash = hash * 31 + (unsigned char) *name;
  529. hash %= f->name_table.size;
  530. oldhash = hash % (f->name_table.size / 2);
  531. if (oldhash >= f->name_table.split)
  532. return oldhash;
  533. else
  534. return hash;
  535. }
  536. static void unref_node(struct fuse *f, struct node *node);
  537. static void remerge_name(struct fuse *f)
  538. {
  539. struct node_table *t = &f->name_table;
  540. int iter;
  541. if (t->split == 0)
  542. node_table_reduce(t);
  543. for (iter = 8; t->split > 0 && iter; iter--) {
  544. struct node **upper;
  545. t->split--;
  546. upper = &t->array[t->split + t->size / 2];
  547. if (*upper) {
  548. struct node **nodep;
  549. for (nodep = &t->array[t->split]; *nodep;
  550. nodep = &(*nodep)->name_next);
  551. *nodep = *upper;
  552. *upper = NULL;
  553. break;
  554. }
  555. }
  556. }
  557. static void unhash_name(struct fuse *f, struct node *node)
  558. {
  559. if (node->name) {
  560. size_t hash = name_hash(f, node->parent->nodeid, node->name);
  561. struct node **nodep = &f->name_table.array[hash];
  562. for (; *nodep != NULL; nodep = &(*nodep)->name_next)
  563. if (*nodep == node) {
  564. *nodep = node->name_next;
  565. node->name_next = NULL;
  566. unref_node(f, node->parent);
  567. if (node->name != node->inline_name)
  568. free(node->name);
  569. node->name = NULL;
  570. node->parent = NULL;
  571. f->name_table.use--;
  572. if (f->name_table.use < f->name_table.size / 4)
  573. remerge_name(f);
  574. return;
  575. }
  576. fprintf(stderr,
  577. "fuse internal error: unable to unhash node: %llu\n",
  578. (unsigned long long) node->nodeid);
  579. abort();
  580. }
  581. }
  582. static void rehash_name(struct fuse *f)
  583. {
  584. struct node_table *t = &f->name_table;
  585. struct node **nodep;
  586. struct node **next;
  587. size_t hash;
  588. if (t->split == t->size / 2)
  589. return;
  590. hash = t->split;
  591. t->split++;
  592. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  593. struct node *node = *nodep;
  594. size_t newhash = name_hash(f, node->parent->nodeid, node->name);
  595. if (newhash != hash) {
  596. next = nodep;
  597. *nodep = node->name_next;
  598. node->name_next = t->array[newhash];
  599. t->array[newhash] = node;
  600. } else {
  601. next = &node->name_next;
  602. }
  603. }
  604. if (t->split == t->size / 2)
  605. node_table_resize(t);
  606. }
  607. static int hash_name(struct fuse *f, struct node *node, fuse_ino_t parentid,
  608. const char *name)
  609. {
  610. size_t hash = name_hash(f, parentid, name);
  611. struct node *parent = get_node(f, parentid);
  612. if (strlen(name) < sizeof(node->inline_name)) {
  613. strcpy(node->inline_name, name);
  614. node->name = node->inline_name;
  615. } else {
  616. node->name = strdup(name);
  617. if (node->name == NULL)
  618. return -1;
  619. }
  620. parent->refctr ++;
  621. node->parent = parent;
  622. node->name_next = f->name_table.array[hash];
  623. f->name_table.array[hash] = node;
  624. f->name_table.use++;
  625. if (f->name_table.use >= f->name_table.size / 2)
  626. rehash_name(f);
  627. return 0;
  628. }
  629. static void delete_node(struct fuse *f, struct node *node)
  630. {
  631. if (f->conf.debug)
  632. fprintf(stderr, "DELETE: %llu\n",
  633. (unsigned long long) node->nodeid);
  634. assert(node->treelock == 0);
  635. unhash_name(f, node);
  636. if (lru_enabled(f))
  637. remove_node_lru(node);
  638. unhash_id(f, node);
  639. free_node(f, node);
  640. }
  641. static void unref_node(struct fuse *f, struct node *node)
  642. {
  643. assert(node->refctr > 0);
  644. node->refctr --;
  645. if (!node->refctr)
  646. delete_node(f, node);
  647. }
  648. static
  649. uint64_t
  650. rand64(void)
  651. {
  652. uint64_t rv;
  653. rv = rand();
  654. rv <<= 32;
  655. rv |= rand();
  656. return rv;
  657. }
  658. static
  659. fuse_ino_t
  660. next_id(struct fuse *f)
  661. {
  662. do
  663. {
  664. f->ctr = ((f->ctr + 1) & UINT64_MAX);
  665. if(f->ctr == 0)
  666. f->generation++;
  667. } while((f->ctr == 0) ||
  668. (f->ctr == FUSE_UNKNOWN_INO) ||
  669. (get_node_nocheck(f, f->ctr) != NULL));
  670. return f->ctr;
  671. }
  672. static struct node *lookup_node(struct fuse *f, fuse_ino_t parent,
  673. const char *name)
  674. {
  675. size_t hash = name_hash(f, parent, name);
  676. struct node *node;
  677. for (node = f->name_table.array[hash]; node != NULL; node = node->name_next)
  678. if (node->parent->nodeid == parent &&
  679. strcmp(node->name, name) == 0)
  680. return node;
  681. return NULL;
  682. }
  683. static void inc_nlookup(struct node *node)
  684. {
  685. if (!node->nlookup)
  686. node->refctr++;
  687. node->nlookup++;
  688. }
  689. static struct node *find_node(struct fuse *f, fuse_ino_t parent,
  690. const char *name)
  691. {
  692. struct node *node;
  693. pthread_mutex_lock(&f->lock);
  694. if (!name)
  695. node = get_node(f, parent);
  696. else
  697. node = lookup_node(f, parent, name);
  698. if (node == NULL) {
  699. node = alloc_node(f);
  700. if (node == NULL)
  701. goto out_err;
  702. node->nodeid = next_id(f);
  703. node->generation = f->generation;
  704. if (f->conf.remember)
  705. inc_nlookup(node);
  706. if (hash_name(f, node, parent, name) == -1) {
  707. free_node(f, node);
  708. node = NULL;
  709. goto out_err;
  710. }
  711. hash_id(f, node);
  712. if (lru_enabled(f)) {
  713. struct node_lru *lnode = node_lru(node);
  714. init_list_head(&lnode->lru);
  715. }
  716. } else if (lru_enabled(f) && node->nlookup == 1) {
  717. remove_node_lru(node);
  718. }
  719. inc_nlookup(node);
  720. out_err:
  721. pthread_mutex_unlock(&f->lock);
  722. return node;
  723. }
  724. static char *add_name(char **buf, unsigned *bufsize, char *s, const char *name)
  725. {
  726. size_t len = strlen(name);
  727. if (s - len <= *buf) {
  728. unsigned pathlen = *bufsize - (s - *buf);
  729. unsigned newbufsize = *bufsize;
  730. char *newbuf;
  731. while (newbufsize < pathlen + len + 1) {
  732. if (newbufsize >= 0x80000000)
  733. newbufsize = 0xffffffff;
  734. else
  735. newbufsize *= 2;
  736. }
  737. newbuf = realloc(*buf, newbufsize);
  738. if (newbuf == NULL)
  739. return NULL;
  740. *buf = newbuf;
  741. s = newbuf + newbufsize - pathlen;
  742. memmove(s, newbuf + *bufsize - pathlen, pathlen);
  743. *bufsize = newbufsize;
  744. }
  745. s -= len;
  746. strncpy(s, name, len);
  747. s--;
  748. *s = '/';
  749. return s;
  750. }
  751. static void unlock_path(struct fuse *f, fuse_ino_t nodeid, struct node *wnode,
  752. struct node *end)
  753. {
  754. struct node *node;
  755. if (wnode) {
  756. assert(wnode->treelock == TREELOCK_WRITE);
  757. wnode->treelock = 0;
  758. }
  759. for (node = get_node(f, nodeid);
  760. node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent) {
  761. assert(node->treelock != 0);
  762. assert(node->treelock != TREELOCK_WAIT_OFFSET);
  763. assert(node->treelock != TREELOCK_WRITE);
  764. node->treelock--;
  765. if (node->treelock == TREELOCK_WAIT_OFFSET)
  766. node->treelock = 0;
  767. }
  768. }
  769. static int try_get_path(struct fuse *f, fuse_ino_t nodeid, const char *name,
  770. char **path, struct node **wnodep, bool need_lock)
  771. {
  772. unsigned bufsize = 256;
  773. char *buf;
  774. char *s;
  775. struct node *node;
  776. struct node *wnode = NULL;
  777. int err;
  778. *path = NULL;
  779. err = -ENOMEM;
  780. buf = malloc(bufsize);
  781. if (buf == NULL)
  782. goto out_err;
  783. s = buf + bufsize - 1;
  784. *s = '\0';
  785. if (name != NULL) {
  786. s = add_name(&buf, &bufsize, s, name);
  787. err = -ENOMEM;
  788. if (s == NULL)
  789. goto out_free;
  790. }
  791. if (wnodep) {
  792. assert(need_lock);
  793. wnode = lookup_node(f, nodeid, name);
  794. if (wnode) {
  795. if (wnode->treelock != 0) {
  796. if (wnode->treelock > 0)
  797. wnode->treelock += TREELOCK_WAIT_OFFSET;
  798. err = -EAGAIN;
  799. goto out_free;
  800. }
  801. wnode->treelock = TREELOCK_WRITE;
  802. }
  803. }
  804. for (node = get_node(f, nodeid); node->nodeid != FUSE_ROOT_ID;
  805. node = node->parent) {
  806. err = -ENOENT;
  807. if (node->name == NULL || node->parent == NULL)
  808. goto out_unlock;
  809. err = -ENOMEM;
  810. s = add_name(&buf, &bufsize, s, node->name);
  811. if (s == NULL)
  812. goto out_unlock;
  813. if (need_lock) {
  814. err = -EAGAIN;
  815. if (node->treelock < 0)
  816. goto out_unlock;
  817. node->treelock++;
  818. }
  819. }
  820. if (s[0])
  821. memmove(buf, s, bufsize - (s - buf));
  822. else
  823. strcpy(buf, "/");
  824. *path = buf;
  825. if (wnodep)
  826. *wnodep = wnode;
  827. return 0;
  828. out_unlock:
  829. if (need_lock)
  830. unlock_path(f, nodeid, wnode, node);
  831. out_free:
  832. free(buf);
  833. out_err:
  834. return err;
  835. }
  836. static void queue_element_unlock(struct fuse *f, struct lock_queue_element *qe)
  837. {
  838. struct node *wnode;
  839. if (qe->first_locked) {
  840. wnode = qe->wnode1 ? *qe->wnode1 : NULL;
  841. unlock_path(f, qe->nodeid1, wnode, NULL);
  842. qe->first_locked = false;
  843. }
  844. if (qe->second_locked) {
  845. wnode = qe->wnode2 ? *qe->wnode2 : NULL;
  846. unlock_path(f, qe->nodeid2, wnode, NULL);
  847. qe->second_locked = false;
  848. }
  849. }
  850. static void queue_element_wakeup(struct fuse *f, struct lock_queue_element *qe)
  851. {
  852. int err;
  853. bool first = (qe == f->lockq);
  854. if (!qe->path1) {
  855. /* Just waiting for it to be unlocked */
  856. if (get_node(f, qe->nodeid1)->treelock == 0)
  857. pthread_cond_signal(&qe->cond);
  858. return;
  859. }
  860. if (!qe->first_locked) {
  861. err = try_get_path(f, qe->nodeid1, qe->name1, qe->path1,
  862. qe->wnode1, true);
  863. if (!err)
  864. qe->first_locked = true;
  865. else if (err != -EAGAIN)
  866. goto err_unlock;
  867. }
  868. if (!qe->second_locked && qe->path2) {
  869. err = try_get_path(f, qe->nodeid2, qe->name2, qe->path2,
  870. qe->wnode2, true);
  871. if (!err)
  872. qe->second_locked = true;
  873. else if (err != -EAGAIN)
  874. goto err_unlock;
  875. }
  876. if (qe->first_locked && (qe->second_locked || !qe->path2)) {
  877. err = 0;
  878. goto done;
  879. }
  880. /*
  881. * Only let the first element be partially locked otherwise there could
  882. * be a deadlock.
  883. *
  884. * But do allow the first element to be partially locked to prevent
  885. * starvation.
  886. */
  887. if (!first)
  888. queue_element_unlock(f, qe);
  889. /* keep trying */
  890. return;
  891. err_unlock:
  892. queue_element_unlock(f, qe);
  893. done:
  894. qe->err = err;
  895. qe->done = true;
  896. pthread_cond_signal(&qe->cond);
  897. }
  898. static void wake_up_queued(struct fuse *f)
  899. {
  900. struct lock_queue_element *qe;
  901. for (qe = f->lockq; qe != NULL; qe = qe->next)
  902. queue_element_wakeup(f, qe);
  903. }
  904. static void debug_path(struct fuse *f, const char *msg, fuse_ino_t nodeid,
  905. const char *name, bool wr)
  906. {
  907. if (f->conf.debug) {
  908. struct node *wnode = NULL;
  909. if (wr)
  910. wnode = lookup_node(f, nodeid, name);
  911. if (wnode)
  912. fprintf(stderr, "%s %li (w)\n", msg, wnode->nodeid);
  913. else
  914. fprintf(stderr, "%s %li\n", msg, nodeid);
  915. }
  916. }
  917. static void queue_path(struct fuse *f, struct lock_queue_element *qe)
  918. {
  919. struct lock_queue_element **qp;
  920. qe->done = false;
  921. qe->first_locked = false;
  922. qe->second_locked = false;
  923. pthread_cond_init(&qe->cond, NULL);
  924. qe->next = NULL;
  925. for (qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
  926. *qp = qe;
  927. }
  928. static void dequeue_path(struct fuse *f, struct lock_queue_element *qe)
  929. {
  930. struct lock_queue_element **qp;
  931. pthread_cond_destroy(&qe->cond);
  932. for (qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
  933. *qp = qe->next;
  934. }
  935. static int wait_path(struct fuse *f, struct lock_queue_element *qe)
  936. {
  937. queue_path(f, qe);
  938. do {
  939. pthread_cond_wait(&qe->cond, &f->lock);
  940. } while (!qe->done);
  941. dequeue_path(f, qe);
  942. return qe->err;
  943. }
  944. static int get_path_common(struct fuse *f, fuse_ino_t nodeid, const char *name,
  945. char **path, struct node **wnode)
  946. {
  947. int err;
  948. pthread_mutex_lock(&f->lock);
  949. err = try_get_path(f, nodeid, name, path, wnode, true);
  950. if (err == -EAGAIN) {
  951. struct lock_queue_element qe = {
  952. .nodeid1 = nodeid,
  953. .name1 = name,
  954. .path1 = path,
  955. .wnode1 = wnode,
  956. };
  957. debug_path(f, "QUEUE PATH", nodeid, name, !!wnode);
  958. err = wait_path(f, &qe);
  959. debug_path(f, "DEQUEUE PATH", nodeid, name, !!wnode);
  960. }
  961. pthread_mutex_unlock(&f->lock);
  962. return err;
  963. }
  964. static int get_path(struct fuse *f, fuse_ino_t nodeid, char **path)
  965. {
  966. return get_path_common(f, nodeid, NULL, path, NULL);
  967. }
  968. static int get_path_name(struct fuse *f, fuse_ino_t nodeid, const char *name,
  969. char **path)
  970. {
  971. return get_path_common(f, nodeid, name, path, NULL);
  972. }
  973. static int get_path_wrlock(struct fuse *f, fuse_ino_t nodeid, const char *name,
  974. char **path, struct node **wnode)
  975. {
  976. return get_path_common(f, nodeid, name, path, wnode);
  977. }
  978. static int try_get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  979. fuse_ino_t nodeid2, const char *name2,
  980. char **path1, char **path2,
  981. struct node **wnode1, struct node **wnode2)
  982. {
  983. int err;
  984. /* FIXME: locking two paths needs deadlock checking */
  985. err = try_get_path(f, nodeid1, name1, path1, wnode1, true);
  986. if (!err) {
  987. err = try_get_path(f, nodeid2, name2, path2, wnode2, true);
  988. if (err) {
  989. struct node *wn1 = wnode1 ? *wnode1 : NULL;
  990. unlock_path(f, nodeid1, wn1, NULL);
  991. free(*path1);
  992. }
  993. }
  994. return err;
  995. }
  996. static int get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  997. fuse_ino_t nodeid2, const char *name2,
  998. char **path1, char **path2,
  999. struct node **wnode1, struct node **wnode2)
  1000. {
  1001. int err;
  1002. pthread_mutex_lock(&f->lock);
  1003. err = try_get_path2(f, nodeid1, name1, nodeid2, name2,
  1004. path1, path2, wnode1, wnode2);
  1005. if (err == -EAGAIN) {
  1006. struct lock_queue_element qe = {
  1007. .nodeid1 = nodeid1,
  1008. .name1 = name1,
  1009. .path1 = path1,
  1010. .wnode1 = wnode1,
  1011. .nodeid2 = nodeid2,
  1012. .name2 = name2,
  1013. .path2 = path2,
  1014. .wnode2 = wnode2,
  1015. };
  1016. debug_path(f, "QUEUE PATH1", nodeid1, name1, !!wnode1);
  1017. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  1018. err = wait_path(f, &qe);
  1019. debug_path(f, "DEQUEUE PATH1", nodeid1, name1, !!wnode1);
  1020. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  1021. }
  1022. pthread_mutex_unlock(&f->lock);
  1023. return err;
  1024. }
  1025. static void free_path_wrlock(struct fuse *f, fuse_ino_t nodeid,
  1026. struct node *wnode, char *path)
  1027. {
  1028. pthread_mutex_lock(&f->lock);
  1029. unlock_path(f, nodeid, wnode, NULL);
  1030. if (f->lockq)
  1031. wake_up_queued(f);
  1032. pthread_mutex_unlock(&f->lock);
  1033. free(path);
  1034. }
  1035. static void free_path(struct fuse *f, fuse_ino_t nodeid, char *path)
  1036. {
  1037. if (path)
  1038. free_path_wrlock(f, nodeid, NULL, path);
  1039. }
  1040. static void free_path2(struct fuse *f, fuse_ino_t nodeid1, fuse_ino_t nodeid2,
  1041. struct node *wnode1, struct node *wnode2,
  1042. char *path1, char *path2)
  1043. {
  1044. pthread_mutex_lock(&f->lock);
  1045. unlock_path(f, nodeid1, wnode1, NULL);
  1046. unlock_path(f, nodeid2, wnode2, NULL);
  1047. wake_up_queued(f);
  1048. pthread_mutex_unlock(&f->lock);
  1049. free(path1);
  1050. free(path2);
  1051. }
  1052. static
  1053. void
  1054. forget_node(struct fuse *f,
  1055. const fuse_ino_t nodeid,
  1056. const uint64_t nlookup)
  1057. {
  1058. struct node *node;
  1059. if(nodeid == FUSE_ROOT_ID)
  1060. return;
  1061. pthread_mutex_lock(&f->lock);
  1062. node = get_node(f, nodeid);
  1063. /*
  1064. * Node may still be locked due to interrupt idiocy in open,
  1065. * create and opendir
  1066. */
  1067. while(node->nlookup == nlookup && node->treelock)
  1068. {
  1069. struct lock_queue_element qe = {
  1070. .nodeid1 = nodeid,
  1071. };
  1072. debug_path(f, "QUEUE PATH (forget)", nodeid, NULL, false);
  1073. queue_path(f, &qe);
  1074. do
  1075. {
  1076. pthread_cond_wait(&qe.cond, &f->lock);
  1077. }
  1078. while((node->nlookup == nlookup) && node->treelock);
  1079. dequeue_path(f, &qe);
  1080. debug_path(f, "DEQUEUE_PATH (forget)", nodeid, NULL, false);
  1081. }
  1082. assert(node->nlookup >= nlookup);
  1083. node->nlookup -= nlookup;
  1084. if(!node->nlookup)
  1085. unref_node(f, node);
  1086. else if(lru_enabled(f) && node->nlookup == 1)
  1087. set_forget_time(f, node);
  1088. pthread_mutex_unlock(&f->lock);
  1089. }
  1090. static void unlink_node(struct fuse *f, struct node *node)
  1091. {
  1092. if (f->conf.remember) {
  1093. assert(node->nlookup > 1);
  1094. node->nlookup--;
  1095. }
  1096. unhash_name(f, node);
  1097. }
  1098. static void remove_node(struct fuse *f, fuse_ino_t dir, const char *name)
  1099. {
  1100. struct node *node;
  1101. pthread_mutex_lock(&f->lock);
  1102. node = lookup_node(f, dir, name);
  1103. if (node != NULL)
  1104. unlink_node(f, node);
  1105. pthread_mutex_unlock(&f->lock);
  1106. }
  1107. static int rename_node(struct fuse *f, fuse_ino_t olddir, const char *oldname,
  1108. fuse_ino_t newdir, const char *newname)
  1109. {
  1110. struct node *node;
  1111. struct node *newnode;
  1112. int err = 0;
  1113. pthread_mutex_lock(&f->lock);
  1114. node = lookup_node(f, olddir, oldname);
  1115. newnode = lookup_node(f, newdir, newname);
  1116. if (node == NULL)
  1117. goto out;
  1118. if (newnode != NULL)
  1119. unlink_node(f, newnode);
  1120. unhash_name(f, node);
  1121. if (hash_name(f, node, newdir, newname) == -1) {
  1122. err = -ENOMEM;
  1123. goto out;
  1124. }
  1125. out:
  1126. pthread_mutex_unlock(&f->lock);
  1127. return err;
  1128. }
  1129. static void set_stat(struct fuse *f, fuse_ino_t nodeid, struct stat *stbuf)
  1130. {
  1131. if (!f->conf.use_ino)
  1132. stbuf->st_ino = nodeid;
  1133. if (f->conf.set_mode)
  1134. stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
  1135. (0777 & ~f->conf.umask);
  1136. if (f->conf.set_uid)
  1137. stbuf->st_uid = f->conf.uid;
  1138. if (f->conf.set_gid)
  1139. stbuf->st_gid = f->conf.gid;
  1140. }
  1141. static struct fuse *req_fuse(fuse_req_t req)
  1142. {
  1143. return (struct fuse *) fuse_req_userdata(req);
  1144. }
  1145. static void fuse_intr_sighandler(int sig)
  1146. {
  1147. (void) sig;
  1148. /* Nothing to do */
  1149. }
  1150. struct fuse_intr_data {
  1151. pthread_t id;
  1152. pthread_cond_t cond;
  1153. int finished;
  1154. };
  1155. static void fuse_interrupt(fuse_req_t req, void *d_)
  1156. {
  1157. struct fuse_intr_data *d = d_;
  1158. struct fuse *f = req_fuse(req);
  1159. if (d->id == pthread_self())
  1160. return;
  1161. pthread_mutex_lock(&f->lock);
  1162. while (!d->finished) {
  1163. struct timeval now;
  1164. struct timespec timeout;
  1165. pthread_kill(d->id, f->conf.intr_signal);
  1166. gettimeofday(&now, NULL);
  1167. timeout.tv_sec = now.tv_sec + 1;
  1168. timeout.tv_nsec = now.tv_usec * 1000;
  1169. pthread_cond_timedwait(&d->cond, &f->lock, &timeout);
  1170. }
  1171. pthread_mutex_unlock(&f->lock);
  1172. }
  1173. static void fuse_do_finish_interrupt(struct fuse *f, fuse_req_t req,
  1174. struct fuse_intr_data *d)
  1175. {
  1176. pthread_mutex_lock(&f->lock);
  1177. d->finished = 1;
  1178. pthread_cond_broadcast(&d->cond);
  1179. pthread_mutex_unlock(&f->lock);
  1180. fuse_req_interrupt_func(req, NULL, NULL);
  1181. pthread_cond_destroy(&d->cond);
  1182. }
  1183. static void fuse_do_prepare_interrupt(fuse_req_t req, struct fuse_intr_data *d)
  1184. {
  1185. d->id = pthread_self();
  1186. pthread_cond_init(&d->cond, NULL);
  1187. d->finished = 0;
  1188. fuse_req_interrupt_func(req, fuse_interrupt, d);
  1189. }
  1190. static inline void fuse_finish_interrupt(struct fuse *f, fuse_req_t req,
  1191. struct fuse_intr_data *d)
  1192. {
  1193. if (f->conf.intr)
  1194. fuse_do_finish_interrupt(f, req, d);
  1195. }
  1196. static inline void fuse_prepare_interrupt(struct fuse *f, fuse_req_t req,
  1197. struct fuse_intr_data *d)
  1198. {
  1199. if (f->conf.intr)
  1200. fuse_do_prepare_interrupt(req, d);
  1201. }
  1202. int
  1203. fuse_fs_getattr(struct fuse_fs *fs,
  1204. const char *path,
  1205. struct stat *buf,
  1206. fuse_timeouts_t *timeout)
  1207. {
  1208. if(fs->op.getattr == NULL)
  1209. return -ENOSYS;
  1210. if(fs->debug)
  1211. fprintf(stderr,"getattr %s\n",path);
  1212. fuse_get_context()->private_data = fs->user_data;
  1213. return fs->op.getattr(path,buf,timeout);
  1214. }
  1215. int
  1216. fuse_fs_fgetattr(struct fuse_fs *fs,
  1217. struct stat *buf,
  1218. struct fuse_file_info *fi,
  1219. fuse_timeouts_t *timeout)
  1220. {
  1221. if(fs->op.fgetattr == NULL)
  1222. return -ENOSYS;
  1223. fuse_get_context()->private_data = fs->user_data;
  1224. if(fs->debug)
  1225. fprintf(stderr,"fgetattr[%llu]\n",(unsigned long long)fi->fh);
  1226. return fs->op.fgetattr(buf,fi,timeout);
  1227. }
  1228. int
  1229. fuse_fs_rename(struct fuse_fs *fs,
  1230. const char *oldpath,
  1231. const char *newpath)
  1232. {
  1233. fuse_get_context()->private_data = fs->user_data;
  1234. if(fs->op.rename)
  1235. return fs->op.rename(oldpath, newpath);
  1236. return -ENOSYS;
  1237. }
  1238. int
  1239. fuse_fs_prepare_hide(struct fuse_fs *fs_,
  1240. const char *path_,
  1241. uint64_t *fh_)
  1242. {
  1243. fuse_get_context()->private_data = fs_->user_data;
  1244. if(fs_->op.prepare_hide)
  1245. return fs_->op.prepare_hide(path_,fh_);
  1246. return -ENOSYS;
  1247. }
  1248. int
  1249. fuse_fs_free_hide(struct fuse_fs *fs_,
  1250. uint64_t fh_)
  1251. {
  1252. fuse_get_context()->private_data = fs_->user_data;
  1253. if(fs_->op.free_hide)
  1254. return fs_->op.free_hide(fh_);
  1255. return -ENOSYS;
  1256. }
  1257. int fuse_fs_unlink(struct fuse_fs *fs, const char *path)
  1258. {
  1259. fuse_get_context()->private_data = fs->user_data;
  1260. if (fs->op.unlink) {
  1261. if (fs->debug)
  1262. fprintf(stderr, "unlink %s\n", path);
  1263. return fs->op.unlink(path);
  1264. } else {
  1265. return -ENOSYS;
  1266. }
  1267. }
  1268. int fuse_fs_rmdir(struct fuse_fs *fs, const char *path)
  1269. {
  1270. fuse_get_context()->private_data = fs->user_data;
  1271. if (fs->op.rmdir) {
  1272. if (fs->debug)
  1273. fprintf(stderr, "rmdir %s\n", path);
  1274. return fs->op.rmdir(path);
  1275. } else {
  1276. return -ENOSYS;
  1277. }
  1278. }
  1279. int
  1280. fuse_fs_symlink(struct fuse_fs *fs_,
  1281. const char *linkname_,
  1282. const char *path_,
  1283. struct stat *st_,
  1284. fuse_timeouts_t *timeouts_)
  1285. {
  1286. fuse_get_context()->private_data = fs_->user_data;
  1287. if(fs_->op.symlink == NULL)
  1288. return -ENOSYS;
  1289. if(fs_->debug)
  1290. fprintf(stderr,"symlink %s %s\n",linkname_,path_);
  1291. return fs_->op.symlink(linkname_,path_,st_,timeouts_);
  1292. }
  1293. int fuse_fs_link(struct fuse_fs *fs, const char *oldpath, const char *newpath)
  1294. {
  1295. fuse_get_context()->private_data = fs->user_data;
  1296. if (fs->op.link) {
  1297. if (fs->debug)
  1298. fprintf(stderr, "link %s %s\n", oldpath, newpath);
  1299. return fs->op.link(oldpath, newpath);
  1300. } else {
  1301. return -ENOSYS;
  1302. }
  1303. }
  1304. int fuse_fs_release(struct fuse_fs *fs,
  1305. struct fuse_file_info *fi)
  1306. {
  1307. fuse_get_context()->private_data = fs->user_data;
  1308. if (fs->op.release) {
  1309. if (fs->debug)
  1310. fprintf(stderr, "release%s[%llu] flags: 0x%x\n",
  1311. fi->flush ? "+flush" : "",
  1312. (unsigned long long) fi->fh, fi->flags);
  1313. return fs->op.release(fi);
  1314. } else {
  1315. return 0;
  1316. }
  1317. }
  1318. int fuse_fs_opendir(struct fuse_fs *fs, const char *path,
  1319. struct fuse_file_info *fi)
  1320. {
  1321. fuse_get_context()->private_data = fs->user_data;
  1322. if (fs->op.opendir) {
  1323. int err;
  1324. if (fs->debug)
  1325. fprintf(stderr, "opendir flags: 0x%x %s\n", fi->flags,
  1326. path);
  1327. err = fs->op.opendir(path,fi);
  1328. if (fs->debug && !err)
  1329. fprintf(stderr, " opendir[%lli] flags: 0x%x %s\n",
  1330. (unsigned long long) fi->fh, fi->flags, path);
  1331. return err;
  1332. } else {
  1333. return 0;
  1334. }
  1335. }
  1336. int fuse_fs_open(struct fuse_fs *fs, const char *path,
  1337. struct fuse_file_info *fi)
  1338. {
  1339. fuse_get_context()->private_data = fs->user_data;
  1340. if (fs->op.open) {
  1341. int err;
  1342. if (fs->debug)
  1343. fprintf(stderr, "open flags: 0x%x %s\n", fi->flags,
  1344. path);
  1345. err = fs->op.open(path,fi);
  1346. if (fs->debug && !err)
  1347. fprintf(stderr, " open[%lli] flags: 0x%x %s\n",
  1348. (unsigned long long) fi->fh, fi->flags, path);
  1349. return err;
  1350. } else {
  1351. return 0;
  1352. }
  1353. }
  1354. static void fuse_free_buf(struct fuse_bufvec *buf)
  1355. {
  1356. if (buf != NULL) {
  1357. size_t i;
  1358. for (i = 0; i < buf->count; i++)
  1359. free(buf->buf[i].mem);
  1360. free(buf);
  1361. }
  1362. }
  1363. int fuse_fs_read_buf(struct fuse_fs *fs,
  1364. struct fuse_bufvec **bufp, size_t size, off_t off,
  1365. struct fuse_file_info *fi)
  1366. {
  1367. fuse_get_context()->private_data = fs->user_data;
  1368. if (fs->op.read || fs->op.read_buf) {
  1369. int res;
  1370. if (fs->debug)
  1371. fprintf(stderr,
  1372. "read[%llu] %zu bytes from %llu flags: 0x%x\n",
  1373. (unsigned long long) fi->fh,
  1374. size, (unsigned long long) off, fi->flags);
  1375. if (fs->op.read_buf) {
  1376. res = fs->op.read_buf(bufp, size, off, fi);
  1377. } else {
  1378. struct fuse_bufvec *buf;
  1379. void *mem;
  1380. buf = malloc(sizeof(struct fuse_bufvec));
  1381. if (buf == NULL)
  1382. return -ENOMEM;
  1383. mem = malloc(size);
  1384. if (mem == NULL) {
  1385. free(buf);
  1386. return -ENOMEM;
  1387. }
  1388. *buf = FUSE_BUFVEC_INIT(size);
  1389. buf->buf[0].mem = mem;
  1390. *bufp = buf;
  1391. res = fs->op.read(mem, size, off, fi);
  1392. if (res >= 0)
  1393. buf->buf[0].size = res;
  1394. }
  1395. if (fs->debug && res >= 0)
  1396. fprintf(stderr, " read[%llu] %zu bytes from %llu\n",
  1397. (unsigned long long) fi->fh,
  1398. fuse_buf_size(*bufp),
  1399. (unsigned long long) off);
  1400. if (res >= 0 && fuse_buf_size(*bufp) > (int) size)
  1401. fprintf(stderr, "fuse: read too many bytes\n");
  1402. if (res < 0)
  1403. return res;
  1404. return 0;
  1405. } else {
  1406. return -ENOSYS;
  1407. }
  1408. }
  1409. int fuse_fs_read(struct fuse_fs *fs, char *mem, size_t size,
  1410. off_t off, struct fuse_file_info *fi)
  1411. {
  1412. int res;
  1413. struct fuse_bufvec *buf = NULL;
  1414. res = fuse_fs_read_buf(fs, &buf, size, off, fi);
  1415. if (res == 0) {
  1416. struct fuse_bufvec dst = FUSE_BUFVEC_INIT(size);
  1417. dst.buf[0].mem = mem;
  1418. res = fuse_buf_copy(&dst, buf, 0);
  1419. }
  1420. fuse_free_buf(buf);
  1421. return res;
  1422. }
  1423. int fuse_fs_write_buf(struct fuse_fs *fs,
  1424. struct fuse_bufvec *buf, off_t off,
  1425. struct fuse_file_info *fi)
  1426. {
  1427. fuse_get_context()->private_data = fs->user_data;
  1428. if (fs->op.write_buf || fs->op.write) {
  1429. int res;
  1430. size_t size = fuse_buf_size(buf);
  1431. assert(buf->idx == 0 && buf->off == 0);
  1432. if (fs->debug)
  1433. fprintf(stderr,
  1434. "write%s[%llu] %zu bytes to %llu flags: 0x%x\n",
  1435. fi->writepage ? "page" : "",
  1436. (unsigned long long) fi->fh,
  1437. size,
  1438. (unsigned long long) off,
  1439. fi->flags);
  1440. if (fs->op.write_buf) {
  1441. res = fs->op.write_buf(buf, off, fi);
  1442. } else {
  1443. void *mem = NULL;
  1444. struct fuse_buf *flatbuf;
  1445. struct fuse_bufvec tmp = FUSE_BUFVEC_INIT(size);
  1446. if (buf->count == 1 &&
  1447. !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
  1448. flatbuf = &buf->buf[0];
  1449. } else {
  1450. res = -ENOMEM;
  1451. mem = malloc(size);
  1452. if (mem == NULL)
  1453. goto out;
  1454. tmp.buf[0].mem = mem;
  1455. res = fuse_buf_copy(&tmp, buf, 0);
  1456. if (res <= 0)
  1457. goto out_free;
  1458. tmp.buf[0].size = res;
  1459. flatbuf = &tmp.buf[0];
  1460. }
  1461. res = fs->op.write(flatbuf->mem, flatbuf->size,
  1462. off, fi);
  1463. out_free:
  1464. free(mem);
  1465. }
  1466. out:
  1467. if (fs->debug && res >= 0)
  1468. fprintf(stderr, " write%s[%llu] %u bytes to %llu\n",
  1469. fi->writepage ? "page" : "",
  1470. (unsigned long long) fi->fh, res,
  1471. (unsigned long long) off);
  1472. if (res > (int) size)
  1473. fprintf(stderr, "fuse: wrote too many bytes\n");
  1474. return res;
  1475. } else {
  1476. return -ENOSYS;
  1477. }
  1478. }
  1479. int fuse_fs_write(struct fuse_fs *fs, const char *mem,
  1480. size_t size, off_t off, struct fuse_file_info *fi)
  1481. {
  1482. struct fuse_bufvec bufv = FUSE_BUFVEC_INIT(size);
  1483. bufv.buf[0].mem = (void *) mem;
  1484. return fuse_fs_write_buf(fs, &bufv, off, fi);
  1485. }
  1486. int fuse_fs_fsync(struct fuse_fs *fs, int datasync,
  1487. struct fuse_file_info *fi)
  1488. {
  1489. fuse_get_context()->private_data = fs->user_data;
  1490. if (fs->op.fsync) {
  1491. if (fs->debug)
  1492. fprintf(stderr, "fsync[%llu] datasync: %i\n",
  1493. (unsigned long long) fi->fh, datasync);
  1494. return fs->op.fsync(datasync, fi);
  1495. } else {
  1496. return -ENOSYS;
  1497. }
  1498. }
  1499. int fuse_fs_fsyncdir(struct fuse_fs *fs, int datasync,
  1500. struct fuse_file_info *fi)
  1501. {
  1502. fuse_get_context()->private_data = fs->user_data;
  1503. if (fs->op.fsyncdir) {
  1504. if (fs->debug)
  1505. fprintf(stderr, "fsyncdir[%llu] datasync: %i\n",
  1506. (unsigned long long) fi->fh, datasync);
  1507. return fs->op.fsyncdir(datasync, fi);
  1508. } else {
  1509. return -ENOSYS;
  1510. }
  1511. }
  1512. int fuse_fs_flush(struct fuse_fs *fs,
  1513. struct fuse_file_info *fi)
  1514. {
  1515. fuse_get_context()->private_data = fs->user_data;
  1516. if (fs->op.flush) {
  1517. if (fs->debug)
  1518. fprintf(stderr, "flush[%llu]\n",
  1519. (unsigned long long) fi->fh);
  1520. return fs->op.flush(fi);
  1521. } else {
  1522. return -ENOSYS;
  1523. }
  1524. }
  1525. int fuse_fs_statfs(struct fuse_fs *fs, const char *path, struct statvfs *buf)
  1526. {
  1527. fuse_get_context()->private_data = fs->user_data;
  1528. if (fs->op.statfs) {
  1529. if (fs->debug)
  1530. fprintf(stderr, "statfs %s\n", path);
  1531. return fs->op.statfs(path,buf);
  1532. } else {
  1533. buf->f_namemax = 255;
  1534. buf->f_bsize = 512;
  1535. return 0;
  1536. }
  1537. }
  1538. int fuse_fs_releasedir(struct fuse_fs *fs,
  1539. struct fuse_file_info *fi)
  1540. {
  1541. fuse_get_context()->private_data = fs->user_data;
  1542. if (fs->op.releasedir) {
  1543. if (fs->debug)
  1544. fprintf(stderr, "releasedir[%llu] flags: 0x%x\n",
  1545. (unsigned long long) fi->fh, fi->flags);
  1546. return fs->op.releasedir(fi);
  1547. } else {
  1548. return 0;
  1549. }
  1550. }
  1551. int
  1552. fuse_fs_readdir(struct fuse_fs *fs,
  1553. struct fuse_file_info *fi,
  1554. fuse_dirents_t *buf)
  1555. {
  1556. if(fs->op.readdir == NULL)
  1557. return -ENOSYS;
  1558. fuse_get_context()->private_data = fs->user_data;
  1559. return fs->op.readdir(fi,buf);
  1560. }
  1561. int
  1562. fuse_fs_readdir_plus(struct fuse_fs *fs_,
  1563. struct fuse_file_info *ffi_,
  1564. fuse_dirents_t *buf_)
  1565. {
  1566. if(fs_->op.readdir_plus == NULL)
  1567. return -ENOSYS;
  1568. fuse_get_context()->private_data = fs_->user_data;
  1569. return fs_->op.readdir_plus(ffi_,buf_);
  1570. }
  1571. int fuse_fs_create(struct fuse_fs *fs, const char *path, mode_t mode,
  1572. struct fuse_file_info *fi)
  1573. {
  1574. fuse_get_context()->private_data = fs->user_data;
  1575. if (fs->op.create) {
  1576. int err;
  1577. if (fs->debug)
  1578. fprintf(stderr,
  1579. "create flags: 0x%x %s 0%o umask=0%03o\n",
  1580. fi->flags, path, mode,
  1581. fuse_get_context()->umask);
  1582. err = fs->op.create(path, mode, fi);
  1583. if (fs->debug && !err)
  1584. fprintf(stderr, " create[%llu] flags: 0x%x %s\n",
  1585. (unsigned long long) fi->fh, fi->flags, path);
  1586. return err;
  1587. } else {
  1588. return -ENOSYS;
  1589. }
  1590. }
  1591. int fuse_fs_lock(struct fuse_fs *fs,
  1592. struct fuse_file_info *fi, int cmd, struct flock *lock)
  1593. {
  1594. fuse_get_context()->private_data = fs->user_data;
  1595. if (fs->op.lock) {
  1596. if (fs->debug)
  1597. fprintf(stderr, "lock[%llu] %s %s start: %llu len: %llu pid: %llu\n",
  1598. (unsigned long long) fi->fh,
  1599. (cmd == F_GETLK ? "F_GETLK" :
  1600. (cmd == F_SETLK ? "F_SETLK" :
  1601. (cmd == F_SETLKW ? "F_SETLKW" : "???"))),
  1602. (lock->l_type == F_RDLCK ? "F_RDLCK" :
  1603. (lock->l_type == F_WRLCK ? "F_WRLCK" :
  1604. (lock->l_type == F_UNLCK ? "F_UNLCK" :
  1605. "???"))),
  1606. (unsigned long long) lock->l_start,
  1607. (unsigned long long) lock->l_len,
  1608. (unsigned long long) lock->l_pid);
  1609. return fs->op.lock(fi, cmd, lock);
  1610. } else {
  1611. return -ENOSYS;
  1612. }
  1613. }
  1614. int fuse_fs_flock(struct fuse_fs *fs,
  1615. struct fuse_file_info *fi, int op)
  1616. {
  1617. fuse_get_context()->private_data = fs->user_data;
  1618. if (fs->op.flock) {
  1619. if (fs->debug) {
  1620. int xop = op & ~LOCK_NB;
  1621. fprintf(stderr, "lock[%llu] %s%s\n",
  1622. (unsigned long long) fi->fh,
  1623. xop == LOCK_SH ? "LOCK_SH" :
  1624. (xop == LOCK_EX ? "LOCK_EX" :
  1625. (xop == LOCK_UN ? "LOCK_UN" : "???")),
  1626. (op & LOCK_NB) ? "|LOCK_NB" : "");
  1627. }
  1628. return fs->op.flock(fi, op);
  1629. } else {
  1630. return -ENOSYS;
  1631. }
  1632. }
  1633. int fuse_fs_chown(struct fuse_fs *fs, const char *path, uid_t uid, gid_t gid)
  1634. {
  1635. fuse_get_context()->private_data = fs->user_data;
  1636. if (fs->op.chown) {
  1637. if (fs->debug)
  1638. fprintf(stderr, "chown %s %lu %lu\n", path,
  1639. (unsigned long) uid, (unsigned long) gid);
  1640. return fs->op.chown(path, uid, gid);
  1641. } else {
  1642. return -ENOSYS;
  1643. }
  1644. }
  1645. int
  1646. fuse_fs_fchown(struct fuse_fs *fs_,
  1647. const struct fuse_file_info *ffi_,
  1648. const uid_t uid_,
  1649. const gid_t gid_)
  1650. {
  1651. fuse_get_context()->private_data = fs_->user_data;
  1652. if(fs_->op.fchown)
  1653. return fs_->op.fchown(ffi_,uid_,gid_);
  1654. return -ENOSYS;
  1655. }
  1656. int fuse_fs_truncate(struct fuse_fs *fs, const char *path, off_t size)
  1657. {
  1658. fuse_get_context()->private_data = fs->user_data;
  1659. if (fs->op.truncate) {
  1660. if (fs->debug)
  1661. fprintf(stderr, "truncate %s %llu\n", path,
  1662. (unsigned long long) size);
  1663. return fs->op.truncate(path, size);
  1664. } else {
  1665. return -ENOSYS;
  1666. }
  1667. }
  1668. int fuse_fs_ftruncate(struct fuse_fs *fs, off_t size,
  1669. struct fuse_file_info *fi)
  1670. {
  1671. fuse_get_context()->private_data = fs->user_data;
  1672. if (fs->debug)
  1673. fprintf(stderr, "ftruncate[%llu] %llu\n",
  1674. (unsigned long long) fi->fh,
  1675. (unsigned long long) size);
  1676. return fs->op.ftruncate(size, fi);
  1677. }
  1678. int fuse_fs_utimens(struct fuse_fs *fs, const char *path,
  1679. const struct timespec tv[2])
  1680. {
  1681. fuse_get_context()->private_data = fs->user_data;
  1682. if (fs->op.utimens) {
  1683. if (fs->debug)
  1684. fprintf(stderr, "utimens %s %li.%09lu %li.%09lu\n",
  1685. path, tv[0].tv_sec, tv[0].tv_nsec,
  1686. tv[1].tv_sec, tv[1].tv_nsec);
  1687. return fs->op.utimens(path, tv);
  1688. } else if(fs->op.utime) {
  1689. struct utimbuf buf;
  1690. if (fs->debug)
  1691. fprintf(stderr, "utime %s %li %li\n", path,
  1692. tv[0].tv_sec, tv[1].tv_sec);
  1693. buf.actime = tv[0].tv_sec;
  1694. buf.modtime = tv[1].tv_sec;
  1695. return fs->op.utime(path, &buf);
  1696. } else {
  1697. return -ENOSYS;
  1698. }
  1699. }
  1700. int
  1701. fuse_fs_futimens(struct fuse_fs *fs_,
  1702. const struct fuse_file_info *ffi_,
  1703. const struct timespec tv_[2])
  1704. {
  1705. fuse_get_context()->private_data = fs_->user_data;
  1706. if(fs_->op.futimens)
  1707. return fs_->op.futimens(ffi_,tv_);
  1708. return -ENOSYS;
  1709. }
  1710. int fuse_fs_access(struct fuse_fs *fs, const char *path, int mask)
  1711. {
  1712. fuse_get_context()->private_data = fs->user_data;
  1713. if (fs->op.access) {
  1714. if (fs->debug)
  1715. fprintf(stderr, "access %s 0%o\n", path, mask);
  1716. return fs->op.access(path, mask);
  1717. } else {
  1718. return -ENOSYS;
  1719. }
  1720. }
  1721. int fuse_fs_readlink(struct fuse_fs *fs, const char *path, char *buf,
  1722. size_t len)
  1723. {
  1724. fuse_get_context()->private_data = fs->user_data;
  1725. if (fs->op.readlink) {
  1726. if (fs->debug)
  1727. fprintf(stderr, "readlink %s %lu\n", path,
  1728. (unsigned long) len);
  1729. return fs->op.readlink(path, buf, len);
  1730. } else {
  1731. return -ENOSYS;
  1732. }
  1733. }
  1734. int fuse_fs_mknod(struct fuse_fs *fs, const char *path, mode_t mode,
  1735. dev_t rdev)
  1736. {
  1737. fuse_get_context()->private_data = fs->user_data;
  1738. if (fs->op.mknod) {
  1739. if (fs->debug)
  1740. fprintf(stderr, "mknod %s 0%o 0x%llx umask=0%03o\n",
  1741. path, mode, (unsigned long long) rdev,
  1742. fuse_get_context()->umask);
  1743. return fs->op.mknod(path, mode, rdev);
  1744. } else {
  1745. return -ENOSYS;
  1746. }
  1747. }
  1748. int fuse_fs_mkdir(struct fuse_fs *fs, const char *path, mode_t mode)
  1749. {
  1750. fuse_get_context()->private_data = fs->user_data;
  1751. if (fs->op.mkdir) {
  1752. if (fs->debug)
  1753. fprintf(stderr, "mkdir %s 0%o umask=0%03o\n",
  1754. path, mode, fuse_get_context()->umask);
  1755. return fs->op.mkdir(path, mode);
  1756. } else {
  1757. return -ENOSYS;
  1758. }
  1759. }
  1760. int fuse_fs_setxattr(struct fuse_fs *fs, const char *path, const char *name,
  1761. const char *value, size_t size, int flags)
  1762. {
  1763. fuse_get_context()->private_data = fs->user_data;
  1764. if (fs->op.setxattr) {
  1765. if (fs->debug)
  1766. fprintf(stderr, "setxattr %s %s %lu 0x%x\n",
  1767. path, name, (unsigned long) size, flags);
  1768. return fs->op.setxattr(path, name, value, size, flags);
  1769. } else {
  1770. return -ENOSYS;
  1771. }
  1772. }
  1773. int fuse_fs_getxattr(struct fuse_fs *fs, const char *path, const char *name,
  1774. char *value, size_t size)
  1775. {
  1776. fuse_get_context()->private_data = fs->user_data;
  1777. if (fs->op.getxattr) {
  1778. if (fs->debug)
  1779. fprintf(stderr, "getxattr %s %s %lu\n",
  1780. path, name, (unsigned long) size);
  1781. return fs->op.getxattr(path, name, value, size);
  1782. } else {
  1783. return -ENOSYS;
  1784. }
  1785. }
  1786. int fuse_fs_listxattr(struct fuse_fs *fs, const char *path, char *list,
  1787. size_t size)
  1788. {
  1789. fuse_get_context()->private_data = fs->user_data;
  1790. if (fs->op.listxattr) {
  1791. if (fs->debug)
  1792. fprintf(stderr, "listxattr %s %lu\n",
  1793. path, (unsigned long) size);
  1794. return fs->op.listxattr(path, list, size);
  1795. } else {
  1796. return -ENOSYS;
  1797. }
  1798. }
  1799. int fuse_fs_bmap(struct fuse_fs *fs, const char *path, size_t blocksize,
  1800. uint64_t *idx)
  1801. {
  1802. fuse_get_context()->private_data = fs->user_data;
  1803. if (fs->op.bmap) {
  1804. if (fs->debug)
  1805. fprintf(stderr, "bmap %s blocksize: %lu index: %llu\n",
  1806. path, (unsigned long) blocksize,
  1807. (unsigned long long) *idx);
  1808. return fs->op.bmap(path, blocksize, idx);
  1809. } else {
  1810. return -ENOSYS;
  1811. }
  1812. }
  1813. int fuse_fs_removexattr(struct fuse_fs *fs, const char *path, const char *name)
  1814. {
  1815. fuse_get_context()->private_data = fs->user_data;
  1816. if (fs->op.removexattr) {
  1817. if (fs->debug)
  1818. fprintf(stderr, "removexattr %s %s\n", path, name);
  1819. return fs->op.removexattr(path, name);
  1820. } else {
  1821. return -ENOSYS;
  1822. }
  1823. }
  1824. int fuse_fs_ioctl(struct fuse_fs *fs, unsigned long cmd, void *arg,
  1825. struct fuse_file_info *fi, unsigned int flags,
  1826. void *data, uint32_t *out_size)
  1827. {
  1828. fuse_get_context()->private_data = fs->user_data;
  1829. if (fs->op.ioctl) {
  1830. if (fs->debug)
  1831. fprintf(stderr, "ioctl[%llu] 0x%lx flags: 0x%x\n",
  1832. (unsigned long long) fi->fh, cmd, flags);
  1833. return fs->op.ioctl(cmd, arg, fi, flags, data, out_size);
  1834. } else
  1835. return -ENOSYS;
  1836. }
  1837. int fuse_fs_poll(struct fuse_fs *fs,
  1838. struct fuse_file_info *fi, struct fuse_pollhandle *ph,
  1839. unsigned *reventsp)
  1840. {
  1841. fuse_get_context()->private_data = fs->user_data;
  1842. if (fs->op.poll) {
  1843. int res;
  1844. if (fs->debug)
  1845. fprintf(stderr, "poll[%llu] ph: %p\n",
  1846. (unsigned long long) fi->fh, ph);
  1847. res = fs->op.poll(fi, ph, reventsp);
  1848. if (fs->debug && !res)
  1849. fprintf(stderr, " poll[%llu] revents: 0x%x\n",
  1850. (unsigned long long) fi->fh, *reventsp);
  1851. return res;
  1852. } else
  1853. return -ENOSYS;
  1854. }
  1855. int fuse_fs_fallocate(struct fuse_fs *fs, int mode,
  1856. off_t offset, off_t length, struct fuse_file_info *fi)
  1857. {
  1858. fuse_get_context()->private_data = fs->user_data;
  1859. if (fs->op.fallocate) {
  1860. if (fs->debug)
  1861. fprintf(stderr, "fallocate mode %x, offset: %llu, length: %llu\n",
  1862. mode,
  1863. (unsigned long long) offset,
  1864. (unsigned long long) length);
  1865. return fs->op.fallocate(mode, offset, length, fi);
  1866. } else
  1867. return -ENOSYS;
  1868. }
  1869. ssize_t
  1870. fuse_fs_copy_file_range(struct fuse_fs *fs_,
  1871. struct fuse_file_info *ffi_in_,
  1872. off_t off_in_,
  1873. struct fuse_file_info *ffi_out_,
  1874. off_t off_out_,
  1875. size_t len_,
  1876. int flags_)
  1877. {
  1878. fuse_get_context()->private_data = fs_->user_data;
  1879. if(fs_->op.copy_file_range == NULL)
  1880. return -ENOSYS;
  1881. return fs_->op.copy_file_range(ffi_in_,
  1882. off_in_,
  1883. ffi_out_,
  1884. off_out_,
  1885. len_,
  1886. flags_);
  1887. }
  1888. int
  1889. node_open(const struct node *node_)
  1890. {
  1891. return ((node_ != NULL) &&
  1892. (node_->open_count > 0));
  1893. }
  1894. #ifndef CLOCK_MONOTONIC
  1895. #define CLOCK_MONOTONIC CLOCK_REALTIME
  1896. #endif
  1897. static void curr_time(struct timespec *now)
  1898. {
  1899. static clockid_t clockid = CLOCK_MONOTONIC;
  1900. int res = clock_gettime(clockid, now);
  1901. if (res == -1 && errno == EINVAL) {
  1902. clockid = CLOCK_REALTIME;
  1903. res = clock_gettime(clockid, now);
  1904. }
  1905. if (res == -1) {
  1906. perror("fuse: clock_gettime");
  1907. abort();
  1908. }
  1909. }
  1910. static
  1911. void
  1912. update_stat(struct node *node_,
  1913. const struct stat *stnew_)
  1914. {
  1915. struct stat *stold;
  1916. stold = &node_->stat_cache;
  1917. if((node_->stat_cache_valid) &&
  1918. ((stold->st_mtim.tv_sec != stnew_->st_mtim.tv_sec) ||
  1919. (stold->st_mtim.tv_nsec != stnew_->st_mtim.tv_nsec) ||
  1920. (stold->st_size != stnew_->st_size)))
  1921. node_->stat_cache_valid = 0;
  1922. *stold = *stnew_;
  1923. }
  1924. static
  1925. int
  1926. set_path_info(struct fuse *f,
  1927. fuse_ino_t nodeid,
  1928. const char *name,
  1929. struct fuse_entry_param *e)
  1930. {
  1931. struct node *node;
  1932. node = find_node(f,nodeid,name);
  1933. if(node == NULL)
  1934. return -ENOMEM;
  1935. e->ino = node->nodeid;
  1936. e->generation = node->generation;
  1937. pthread_mutex_lock(&f->lock);
  1938. update_stat(node,&e->attr);
  1939. pthread_mutex_unlock(&f->lock);
  1940. set_stat(f,e->ino,&e->attr);
  1941. if(f->conf.debug)
  1942. fprintf(stderr,
  1943. " NODEID: %llu\n"
  1944. " GEN: %llu\n",
  1945. (unsigned long long)e->ino,
  1946. (unsigned long long)e->generation);
  1947. return 0;
  1948. }
  1949. static
  1950. int
  1951. lookup_path(struct fuse *f,
  1952. fuse_ino_t nodeid,
  1953. const char *name,
  1954. const char *path,
  1955. struct fuse_entry_param *e,
  1956. struct fuse_file_info *fi)
  1957. {
  1958. int rv;
  1959. memset(e,0,sizeof(struct fuse_entry_param));
  1960. rv = ((fi == NULL) ?
  1961. fuse_fs_getattr(f->fs,path,&e->attr,&e->timeout) :
  1962. fuse_fs_fgetattr(f->fs,&e->attr,fi,&e->timeout));
  1963. if(rv)
  1964. return rv;
  1965. return set_path_info(f,nodeid,name,e);
  1966. }
  1967. static struct fuse_context_i *fuse_get_context_internal(void)
  1968. {
  1969. struct fuse_context_i *c;
  1970. c = (struct fuse_context_i *) pthread_getspecific(fuse_context_key);
  1971. if (c == NULL) {
  1972. c = (struct fuse_context_i *)
  1973. calloc(1, sizeof(struct fuse_context_i));
  1974. if (c == NULL) {
  1975. /* This is hard to deal with properly, so just
  1976. abort. If memory is so low that the
  1977. context cannot be allocated, there's not
  1978. much hope for the filesystem anyway */
  1979. fprintf(stderr, "fuse: failed to allocate thread specific data\n");
  1980. abort();
  1981. }
  1982. pthread_setspecific(fuse_context_key, c);
  1983. }
  1984. return c;
  1985. }
  1986. static void fuse_freecontext(void *data)
  1987. {
  1988. free(data);
  1989. }
  1990. static int fuse_create_context_key(void)
  1991. {
  1992. int err = 0;
  1993. pthread_mutex_lock(&fuse_context_lock);
  1994. if (!fuse_context_ref) {
  1995. err = pthread_key_create(&fuse_context_key, fuse_freecontext);
  1996. if (err) {
  1997. fprintf(stderr, "fuse: failed to create thread specific key: %s\n",
  1998. strerror(err));
  1999. pthread_mutex_unlock(&fuse_context_lock);
  2000. return -1;
  2001. }
  2002. }
  2003. fuse_context_ref++;
  2004. pthread_mutex_unlock(&fuse_context_lock);
  2005. return 0;
  2006. }
  2007. static void fuse_delete_context_key(void)
  2008. {
  2009. pthread_mutex_lock(&fuse_context_lock);
  2010. fuse_context_ref--;
  2011. if (!fuse_context_ref) {
  2012. free(pthread_getspecific(fuse_context_key));
  2013. pthread_key_delete(fuse_context_key);
  2014. }
  2015. pthread_mutex_unlock(&fuse_context_lock);
  2016. }
  2017. static struct fuse *req_fuse_prepare(fuse_req_t req)
  2018. {
  2019. struct fuse_context_i *c = fuse_get_context_internal();
  2020. const struct fuse_ctx *ctx = fuse_req_ctx(req);
  2021. c->req = req;
  2022. c->ctx.fuse = req_fuse(req);
  2023. c->ctx.uid = ctx->uid;
  2024. c->ctx.gid = ctx->gid;
  2025. c->ctx.pid = ctx->pid;
  2026. c->ctx.umask = ctx->umask;
  2027. return c->ctx.fuse;
  2028. }
  2029. static inline void reply_err(fuse_req_t req, int err)
  2030. {
  2031. /* fuse_reply_err() uses non-negated errno values */
  2032. fuse_reply_err(req, -err);
  2033. }
  2034. static void reply_entry(fuse_req_t req, const struct fuse_entry_param *e,
  2035. int err)
  2036. {
  2037. if (!err) {
  2038. struct fuse *f = req_fuse(req);
  2039. if (fuse_reply_entry(req, e) == -ENOENT) {
  2040. /* Skip forget for negative result */
  2041. if (e->ino != 0)
  2042. forget_node(f, e->ino, 1);
  2043. }
  2044. } else
  2045. reply_err(req, err);
  2046. }
  2047. void fuse_fs_init(struct fuse_fs *fs, struct fuse_conn_info *conn)
  2048. {
  2049. fuse_get_context()->private_data = fs->user_data;
  2050. if (!fs->op.write_buf)
  2051. conn->want &= ~FUSE_CAP_SPLICE_READ;
  2052. if (!fs->op.lock)
  2053. conn->want &= ~FUSE_CAP_POSIX_LOCKS;
  2054. if (!fs->op.flock)
  2055. conn->want &= ~FUSE_CAP_FLOCK_LOCKS;
  2056. if (fs->op.init)
  2057. fs->user_data = fs->op.init(conn);
  2058. }
  2059. static void fuse_lib_init(void *data, struct fuse_conn_info *conn)
  2060. {
  2061. struct fuse *f = (struct fuse *) data;
  2062. struct fuse_context_i *c = fuse_get_context_internal();
  2063. memset(c, 0, sizeof(*c));
  2064. c->ctx.fuse = f;
  2065. conn->want |= FUSE_CAP_EXPORT_SUPPORT;
  2066. fuse_fs_init(f->fs, conn);
  2067. }
  2068. void fuse_fs_destroy(struct fuse_fs *fs)
  2069. {
  2070. fuse_get_context()->private_data = fs->user_data;
  2071. if (fs->op.destroy)
  2072. fs->op.destroy(fs->user_data);
  2073. free(fs);
  2074. }
  2075. static void fuse_lib_destroy(void *data)
  2076. {
  2077. struct fuse *f = (struct fuse *) data;
  2078. struct fuse_context_i *c = fuse_get_context_internal();
  2079. memset(c, 0, sizeof(*c));
  2080. c->ctx.fuse = f;
  2081. fuse_fs_destroy(f->fs);
  2082. f->fs = NULL;
  2083. }
  2084. static
  2085. void
  2086. fuse_lib_lookup(fuse_req_t req,
  2087. fuse_ino_t parent,
  2088. const char *name)
  2089. {
  2090. struct fuse *f = req_fuse_prepare(req);
  2091. struct fuse_entry_param e;
  2092. char *path;
  2093. int err;
  2094. struct node *dot = NULL;
  2095. if (name[0] == '.') {
  2096. int len = strlen(name);
  2097. if (len == 1 || (name[1] == '.' && len == 2)) {
  2098. pthread_mutex_lock(&f->lock);
  2099. if (len == 1) {
  2100. if (f->conf.debug)
  2101. fprintf(stderr, "LOOKUP-DOT\n");
  2102. dot = get_node_nocheck(f, parent);
  2103. if (dot == NULL) {
  2104. pthread_mutex_unlock(&f->lock);
  2105. reply_entry(req, &e, -ESTALE);
  2106. return;
  2107. }
  2108. dot->refctr++;
  2109. } else {
  2110. if (f->conf.debug)
  2111. fprintf(stderr, "LOOKUP-DOTDOT\n");
  2112. parent = get_node(f, parent)->parent->nodeid;
  2113. }
  2114. pthread_mutex_unlock(&f->lock);
  2115. name = NULL;
  2116. }
  2117. }
  2118. err = get_path_name(f, parent, name, &path);
  2119. if (!err) {
  2120. struct fuse_intr_data d;
  2121. if (f->conf.debug)
  2122. fprintf(stderr, "LOOKUP %s\n", path);
  2123. fuse_prepare_interrupt(f, req, &d);
  2124. err = lookup_path(f, parent, name, path, &e, NULL);
  2125. if (err == -ENOENT) {
  2126. e.ino = 0;
  2127. err = 0;
  2128. }
  2129. fuse_finish_interrupt(f, req, &d);
  2130. free_path(f, parent, path);
  2131. }
  2132. if (dot) {
  2133. pthread_mutex_lock(&f->lock);
  2134. unref_node(f, dot);
  2135. pthread_mutex_unlock(&f->lock);
  2136. }
  2137. reply_entry(req, &e, err);
  2138. }
  2139. static
  2140. void
  2141. do_forget(struct fuse *f,
  2142. const fuse_ino_t ino,
  2143. const uint64_t nlookup)
  2144. {
  2145. if(f->conf.debug)
  2146. fprintf(stderr,
  2147. "FORGET %llu/%llu\n",
  2148. (unsigned long long)ino,
  2149. (unsigned long long)nlookup);
  2150. forget_node(f, ino, nlookup);
  2151. }
  2152. static
  2153. void
  2154. fuse_lib_forget(fuse_req_t req,
  2155. const fuse_ino_t ino,
  2156. const uint64_t nlookup)
  2157. {
  2158. do_forget(req_fuse(req), ino, nlookup);
  2159. fuse_reply_none(req);
  2160. }
  2161. static void fuse_lib_forget_multi(fuse_req_t req, size_t count,
  2162. struct fuse_forget_data *forgets)
  2163. {
  2164. struct fuse *f = req_fuse(req);
  2165. size_t i;
  2166. for (i = 0; i < count; i++)
  2167. do_forget(f, forgets[i].ino, forgets[i].nlookup);
  2168. fuse_reply_none(req);
  2169. }
  2170. static
  2171. void
  2172. fuse_lib_getattr(fuse_req_t req,
  2173. fuse_ino_t ino,
  2174. struct fuse_file_info *fi)
  2175. {
  2176. int err;
  2177. char *path;
  2178. struct fuse *f;
  2179. struct stat buf;
  2180. struct node *node;
  2181. fuse_timeouts_t timeout;
  2182. struct fuse_file_info ffi = {0};
  2183. f = req_fuse_prepare(req);
  2184. if(fi == NULL)
  2185. {
  2186. pthread_mutex_lock(&f->lock);
  2187. node = get_node(f,ino);
  2188. if(node->is_hidden)
  2189. {
  2190. fi = &ffi;
  2191. fi->fh = node->hidden_fh;
  2192. }
  2193. pthread_mutex_unlock(&f->lock);
  2194. }
  2195. memset(&buf, 0, sizeof(buf));
  2196. err = 0;
  2197. path = NULL;
  2198. if((fi == NULL) || (f->fs->op.fgetattr == NULL))
  2199. err = get_path(f,ino,&path);
  2200. if(!err)
  2201. {
  2202. struct fuse_intr_data d;
  2203. fuse_prepare_interrupt(f,req,&d);
  2204. err = ((fi == NULL) ?
  2205. fuse_fs_getattr(f->fs,path,&buf,&timeout) :
  2206. fuse_fs_fgetattr(f->fs,&buf,fi,&timeout));
  2207. fuse_finish_interrupt(f,req,&d);
  2208. free_path(f,ino,path);
  2209. }
  2210. if(!err)
  2211. {
  2212. pthread_mutex_lock(&f->lock);
  2213. node = get_node(f,ino);
  2214. update_stat(node,&buf);
  2215. pthread_mutex_unlock(&f->lock);
  2216. set_stat(f,ino,&buf);
  2217. fuse_reply_attr(req,&buf,timeout.attr);
  2218. }
  2219. else
  2220. {
  2221. reply_err(req, err);
  2222. }
  2223. }
  2224. int fuse_fs_chmod(struct fuse_fs *fs, const char *path, mode_t mode)
  2225. {
  2226. fuse_get_context()->private_data = fs->user_data;
  2227. if (fs->op.chmod)
  2228. return fs->op.chmod(path, mode);
  2229. else
  2230. return -ENOSYS;
  2231. }
  2232. int
  2233. fuse_fs_fchmod(struct fuse_fs *fs_,
  2234. const struct fuse_file_info *ffi_,
  2235. const mode_t mode_)
  2236. {
  2237. fuse_get_context()->private_data = fs_->user_data;
  2238. if(fs_->op.fchmod)
  2239. return fs_->op.fchmod(ffi_,mode_);
  2240. return -ENOSYS;
  2241. }
  2242. static
  2243. void
  2244. fuse_lib_setattr(fuse_req_t req,
  2245. fuse_ino_t ino,
  2246. struct stat *attr,
  2247. int valid,
  2248. struct fuse_file_info *fi)
  2249. {
  2250. struct fuse *f = req_fuse_prepare(req);
  2251. struct stat buf;
  2252. char *path;
  2253. int err;
  2254. struct node *node;
  2255. fuse_timeouts_t timeout;
  2256. struct fuse_file_info ffi = {0};
  2257. if(fi == NULL)
  2258. {
  2259. pthread_mutex_lock(&f->lock);
  2260. node = get_node(f,ino);
  2261. if(node->is_hidden)
  2262. {
  2263. fi = &ffi;
  2264. fi->fh = node->hidden_fh;
  2265. }
  2266. pthread_mutex_unlock(&f->lock);
  2267. }
  2268. memset(&buf,0,sizeof(buf));
  2269. err = 0;
  2270. path = NULL;
  2271. if(fi == NULL)
  2272. err = get_path(f,ino,&path);
  2273. if(!err)
  2274. {
  2275. struct fuse_intr_data d;
  2276. fuse_prepare_interrupt(f,req,&d);
  2277. err = 0;
  2278. if (!err && (valid & FATTR_MODE))
  2279. err = ((fi == NULL) ?
  2280. fuse_fs_chmod(f->fs,path,attr->st_mode) :
  2281. fuse_fs_fchmod(f->fs,fi,attr->st_mode));
  2282. if(!err && (valid & (FATTR_UID | FATTR_GID)))
  2283. {
  2284. uid_t uid = ((valid & FATTR_UID) ? attr->st_uid : (uid_t) -1);
  2285. gid_t gid = ((valid & FATTR_GID) ? attr->st_gid : (gid_t) -1);
  2286. err = ((fi == NULL) ?
  2287. fuse_fs_chown(f->fs,path,uid,gid) :
  2288. fuse_fs_fchown(f->fs,fi,uid,gid));
  2289. }
  2290. if(!err && (valid & FATTR_SIZE))
  2291. err = ((fi == NULL) ?
  2292. fuse_fs_truncate(f->fs,path,attr->st_size) :
  2293. fuse_fs_ftruncate(f->fs,attr->st_size,fi));
  2294. #ifdef HAVE_UTIMENSAT
  2295. if(!err && (valid & (FATTR_ATIME | FATTR_MTIME)))
  2296. {
  2297. struct timespec tv[2];
  2298. tv[0].tv_sec = 0;
  2299. tv[1].tv_sec = 0;
  2300. tv[0].tv_nsec = UTIME_OMIT;
  2301. tv[1].tv_nsec = UTIME_OMIT;
  2302. if(valid & FATTR_ATIME_NOW)
  2303. tv[0].tv_nsec = UTIME_NOW;
  2304. else if(valid & FATTR_ATIME)
  2305. tv[0] = attr->st_atim;
  2306. if(valid & FATTR_MTIME_NOW)
  2307. tv[1].tv_nsec = UTIME_NOW;
  2308. else if(valid & FATTR_MTIME)
  2309. tv[1] = attr->st_mtim;
  2310. err = ((fi == NULL) ?
  2311. fuse_fs_utimens(f->fs,path,tv) :
  2312. fuse_fs_futimens(f->fs,fi,tv));
  2313. }
  2314. else
  2315. #endif
  2316. if(!err && ((valid & (FATTR_ATIME|FATTR_MTIME)) == (FATTR_ATIME|FATTR_MTIME)))
  2317. {
  2318. struct timespec tv[2];
  2319. tv[0].tv_sec = attr->st_atime;
  2320. tv[0].tv_nsec = ST_ATIM_NSEC(attr);
  2321. tv[1].tv_sec = attr->st_mtime;
  2322. tv[1].tv_nsec = ST_MTIM_NSEC(attr);
  2323. err = ((fi == NULL) ?
  2324. fuse_fs_utimens(f->fs,path,tv) :
  2325. fuse_fs_futimens(f->fs,fi,tv));
  2326. }
  2327. if (!err)
  2328. err = ((fi == NULL) ?
  2329. fuse_fs_getattr(f->fs,path,&buf,&timeout) :
  2330. fuse_fs_fgetattr(f->fs,&buf,fi,&timeout));
  2331. fuse_finish_interrupt(f,req,&d);
  2332. free_path(f,ino,path);
  2333. }
  2334. if(!err)
  2335. {
  2336. pthread_mutex_lock(&f->lock);
  2337. update_stat(get_node(f,ino),&buf);
  2338. pthread_mutex_unlock(&f->lock);
  2339. set_stat(f,ino,&buf);
  2340. fuse_reply_attr(req,&buf,timeout.attr);
  2341. }
  2342. else
  2343. {
  2344. reply_err(req,err);
  2345. }
  2346. }
  2347. static void fuse_lib_access(fuse_req_t req, fuse_ino_t ino, int mask)
  2348. {
  2349. struct fuse *f = req_fuse_prepare(req);
  2350. char *path;
  2351. int err;
  2352. err = get_path(f, ino, &path);
  2353. if (!err) {
  2354. struct fuse_intr_data d;
  2355. fuse_prepare_interrupt(f, req, &d);
  2356. err = fuse_fs_access(f->fs, path, mask);
  2357. fuse_finish_interrupt(f, req, &d);
  2358. free_path(f, ino, path);
  2359. }
  2360. reply_err(req, err);
  2361. }
  2362. static void fuse_lib_readlink(fuse_req_t req, fuse_ino_t ino)
  2363. {
  2364. struct fuse *f = req_fuse_prepare(req);
  2365. char linkname[PATH_MAX + 1];
  2366. char *path;
  2367. int err;
  2368. err = get_path(f, ino, &path);
  2369. if (!err) {
  2370. struct fuse_intr_data d;
  2371. fuse_prepare_interrupt(f, req, &d);
  2372. err = fuse_fs_readlink(f->fs, path, linkname, sizeof(linkname));
  2373. fuse_finish_interrupt(f, req, &d);
  2374. free_path(f, ino, path);
  2375. }
  2376. if (!err) {
  2377. linkname[PATH_MAX] = '\0';
  2378. fuse_reply_readlink(req, linkname);
  2379. } else
  2380. reply_err(req, err);
  2381. }
  2382. static void fuse_lib_mknod(fuse_req_t req, fuse_ino_t parent, const char *name,
  2383. mode_t mode, dev_t rdev)
  2384. {
  2385. struct fuse *f = req_fuse_prepare(req);
  2386. struct fuse_entry_param e;
  2387. char *path;
  2388. int err;
  2389. err = get_path_name(f, parent, name, &path);
  2390. if (!err) {
  2391. struct fuse_intr_data d;
  2392. fuse_prepare_interrupt(f, req, &d);
  2393. err = -ENOSYS;
  2394. if (S_ISREG(mode)) {
  2395. struct fuse_file_info fi;
  2396. memset(&fi, 0, sizeof(fi));
  2397. fi.flags = O_CREAT | O_EXCL | O_WRONLY;
  2398. err = fuse_fs_create(f->fs, path, mode, &fi);
  2399. if (!err) {
  2400. err = lookup_path(f, parent, name, path, &e,
  2401. &fi);
  2402. fuse_fs_release(f->fs, &fi);
  2403. }
  2404. }
  2405. if (err == -ENOSYS) {
  2406. err = fuse_fs_mknod(f->fs, path, mode, rdev);
  2407. if (!err)
  2408. err = lookup_path(f, parent, name, path, &e,
  2409. NULL);
  2410. }
  2411. fuse_finish_interrupt(f, req, &d);
  2412. free_path(f, parent, path);
  2413. }
  2414. reply_entry(req, &e, err);
  2415. }
  2416. static void fuse_lib_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name,
  2417. mode_t mode)
  2418. {
  2419. struct fuse *f = req_fuse_prepare(req);
  2420. struct fuse_entry_param e;
  2421. char *path;
  2422. int err;
  2423. err = get_path_name(f, parent, name, &path);
  2424. if (!err) {
  2425. struct fuse_intr_data d;
  2426. fuse_prepare_interrupt(f, req, &d);
  2427. err = fuse_fs_mkdir(f->fs, path, mode);
  2428. if (!err)
  2429. err = lookup_path(f, parent, name, path, &e, NULL);
  2430. fuse_finish_interrupt(f, req, &d);
  2431. free_path(f, parent, path);
  2432. }
  2433. reply_entry(req, &e, err);
  2434. }
  2435. static
  2436. void
  2437. fuse_lib_unlink(fuse_req_t req,
  2438. fuse_ino_t parent,
  2439. const char *name)
  2440. {
  2441. int err;
  2442. char *path;
  2443. struct fuse *f;
  2444. struct node *wnode;
  2445. struct fuse_intr_data d;
  2446. f = req_fuse_prepare(req);
  2447. err = get_path_wrlock(f,parent,name,&path,&wnode);
  2448. if(!err)
  2449. {
  2450. fuse_prepare_interrupt(f,req,&d);
  2451. pthread_mutex_lock(&f->lock);
  2452. if(node_open(wnode))
  2453. {
  2454. err = fuse_fs_prepare_hide(f->fs,path,&wnode->hidden_fh);
  2455. if(!err)
  2456. wnode->is_hidden = 1;
  2457. }
  2458. pthread_mutex_unlock(&f->lock);
  2459. err = fuse_fs_unlink(f->fs,path);
  2460. if(!err)
  2461. remove_node(f,parent,name);
  2462. fuse_finish_interrupt(f,req,&d);
  2463. free_path_wrlock(f,parent,wnode,path);
  2464. }
  2465. reply_err(req,err);
  2466. }
  2467. static void fuse_lib_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name)
  2468. {
  2469. struct fuse *f = req_fuse_prepare(req);
  2470. struct node *wnode;
  2471. char *path;
  2472. int err;
  2473. err = get_path_wrlock(f, parent, name, &path, &wnode);
  2474. if (!err) {
  2475. struct fuse_intr_data d;
  2476. fuse_prepare_interrupt(f, req, &d);
  2477. err = fuse_fs_rmdir(f->fs, path);
  2478. fuse_finish_interrupt(f, req, &d);
  2479. if (!err)
  2480. remove_node(f, parent, name);
  2481. free_path_wrlock(f, parent, wnode, path);
  2482. }
  2483. reply_err(req, err);
  2484. }
  2485. static
  2486. void
  2487. fuse_lib_symlink(fuse_req_t req_,
  2488. const char *linkname_,
  2489. fuse_ino_t parent_,
  2490. const char *name_)
  2491. {
  2492. int rv;
  2493. char *path;
  2494. struct fuse *f;
  2495. struct fuse_entry_param e;
  2496. f = req_fuse_prepare(req_);
  2497. rv = get_path_name(f,parent_,name_,&path);
  2498. if(!rv)
  2499. {
  2500. struct fuse_intr_data d;
  2501. fuse_prepare_interrupt(f,req_,&d);
  2502. rv = fuse_fs_symlink(f->fs,linkname_,path,&e.attr,&e.timeout);
  2503. if(rv == 0)
  2504. rv = set_path_info(f,parent_,name_,&e);
  2505. fuse_finish_interrupt(f,req_,&d);
  2506. free_path(f,parent_,path);
  2507. }
  2508. reply_entry(req_,&e,rv);
  2509. }
  2510. static
  2511. void
  2512. fuse_lib_rename(fuse_req_t req,
  2513. fuse_ino_t olddir,
  2514. const char *oldname,
  2515. fuse_ino_t newdir,
  2516. const char *newname)
  2517. {
  2518. int err;
  2519. struct fuse *f;
  2520. char *oldpath;
  2521. char *newpath;
  2522. struct node *wnode1;
  2523. struct node *wnode2;
  2524. struct fuse_intr_data d;
  2525. f = req_fuse_prepare(req);
  2526. err = get_path2(f,olddir,oldname,newdir,newname,
  2527. &oldpath,&newpath,&wnode1,&wnode2);
  2528. if(!err)
  2529. {
  2530. fuse_prepare_interrupt(f,req,&d);
  2531. pthread_mutex_lock(&f->lock);
  2532. if(node_open(wnode2))
  2533. {
  2534. err = fuse_fs_prepare_hide(f->fs,newpath,&wnode2->hidden_fh);
  2535. if(!err)
  2536. wnode2->is_hidden = 1;
  2537. }
  2538. pthread_mutex_unlock(&f->lock);
  2539. err = fuse_fs_rename(f->fs,oldpath,newpath);
  2540. if(!err)
  2541. err = rename_node(f,olddir,oldname,newdir,newname);
  2542. fuse_finish_interrupt(f,req,&d);
  2543. free_path2(f,olddir,newdir,wnode1,wnode2,oldpath,newpath);
  2544. }
  2545. reply_err(req,err);
  2546. }
  2547. static void fuse_lib_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent,
  2548. const char *newname)
  2549. {
  2550. struct fuse *f = req_fuse_prepare(req);
  2551. struct fuse_entry_param e;
  2552. char *oldpath;
  2553. char *newpath;
  2554. int err;
  2555. err = get_path2(f, ino, NULL, newparent, newname,
  2556. &oldpath, &newpath, NULL, NULL);
  2557. if (!err) {
  2558. struct fuse_intr_data d;
  2559. fuse_prepare_interrupt(f, req, &d);
  2560. err = fuse_fs_link(f->fs, oldpath, newpath);
  2561. if (!err)
  2562. err = lookup_path(f, newparent, newname, newpath,
  2563. &e, NULL);
  2564. fuse_finish_interrupt(f, req, &d);
  2565. free_path2(f, ino, newparent, NULL, NULL, oldpath, newpath);
  2566. }
  2567. reply_entry(req, &e, err);
  2568. }
  2569. static void fuse_do_release(struct fuse *f, fuse_ino_t ino,
  2570. struct fuse_file_info *fi)
  2571. {
  2572. struct node *node;
  2573. uint64_t fh;
  2574. int was_hidden;
  2575. fh = 0;
  2576. fuse_fs_release(f->fs, fi);
  2577. pthread_mutex_lock(&f->lock);
  2578. node = get_node(f, ino);
  2579. assert(node->open_count > 0);
  2580. node->open_count--;
  2581. was_hidden = 0;
  2582. if (node->is_hidden && (node->open_count == 0)) {
  2583. was_hidden = 1;
  2584. node->is_hidden = 0;
  2585. fh = node->hidden_fh;
  2586. }
  2587. pthread_mutex_unlock(&f->lock);
  2588. if(was_hidden)
  2589. fuse_fs_free_hide(f->fs,fh);
  2590. }
  2591. static
  2592. void
  2593. fuse_lib_create(fuse_req_t req,
  2594. fuse_ino_t parent,
  2595. const char *name,
  2596. mode_t mode,
  2597. struct fuse_file_info *fi)
  2598. {
  2599. int err;
  2600. char *path;
  2601. struct fuse *f;
  2602. struct fuse_intr_data d;
  2603. struct fuse_entry_param e;
  2604. f = req_fuse_prepare(req);
  2605. err = get_path_name(f, parent, name, &path);
  2606. if(!err)
  2607. {
  2608. fuse_prepare_interrupt(f, req, &d);
  2609. err = fuse_fs_create(f->fs, path, mode, fi);
  2610. if(!err)
  2611. {
  2612. err = lookup_path(f, parent, name, path, &e, fi);
  2613. if(err)
  2614. {
  2615. fuse_fs_release(f->fs, fi);
  2616. }
  2617. else if(!S_ISREG(e.attr.st_mode))
  2618. {
  2619. err = -EIO;
  2620. fuse_fs_release(f->fs, fi);
  2621. forget_node(f, e.ino, 1);
  2622. }
  2623. }
  2624. fuse_finish_interrupt(f, req, &d);
  2625. }
  2626. if(!err)
  2627. {
  2628. pthread_mutex_lock(&f->lock);
  2629. get_node(f,e.ino)->open_count++;
  2630. pthread_mutex_unlock(&f->lock);
  2631. if (fuse_reply_create(req, &e, fi) == -ENOENT) {
  2632. /* The open syscall was interrupted, so it
  2633. must be cancelled */
  2634. fuse_do_release(f, e.ino, fi);
  2635. forget_node(f, e.ino, 1);
  2636. }
  2637. }
  2638. else
  2639. {
  2640. reply_err(req, err);
  2641. }
  2642. free_path(f, parent, path);
  2643. }
  2644. static double diff_timespec(const struct timespec *t1,
  2645. const struct timespec *t2)
  2646. {
  2647. return (t1->tv_sec - t2->tv_sec) +
  2648. ((double) t1->tv_nsec - (double) t2->tv_nsec) / 1000000000.0;
  2649. }
  2650. static
  2651. void
  2652. open_auto_cache(struct fuse *f,
  2653. fuse_ino_t ino,
  2654. const char *path,
  2655. struct fuse_file_info *fi)
  2656. {
  2657. struct node *node;
  2658. fuse_timeouts_t timeout;
  2659. pthread_mutex_lock(&f->lock);
  2660. node = get_node(f,ino);
  2661. if(node->stat_cache_valid)
  2662. {
  2663. int err;
  2664. struct stat stbuf;
  2665. pthread_mutex_unlock(&f->lock);
  2666. err = fuse_fs_fgetattr(f->fs,&stbuf,fi,&timeout);
  2667. pthread_mutex_lock(&f->lock);
  2668. if(!err)
  2669. update_stat(node,&stbuf);
  2670. else
  2671. node->stat_cache_valid = 0;
  2672. }
  2673. if(node->stat_cache_valid)
  2674. fi->keep_cache = 1;
  2675. node->stat_cache_valid = 1;
  2676. pthread_mutex_unlock(&f->lock);
  2677. }
  2678. static
  2679. void
  2680. fuse_lib_open(fuse_req_t req,
  2681. fuse_ino_t ino,
  2682. struct fuse_file_info *fi)
  2683. {
  2684. int err;
  2685. char *path;
  2686. struct fuse *f;
  2687. struct fuse_intr_data d;
  2688. f = req_fuse_prepare(req);
  2689. err = get_path(f, ino, &path);
  2690. if(!err)
  2691. {
  2692. fuse_prepare_interrupt(f, req, &d);
  2693. err = fuse_fs_open(f->fs, path, fi);
  2694. if(!err)
  2695. {
  2696. if (fi && fi->auto_cache)
  2697. open_auto_cache(f, ino, path, fi);
  2698. }
  2699. fuse_finish_interrupt(f, req, &d);
  2700. }
  2701. if(!err)
  2702. {
  2703. pthread_mutex_lock(&f->lock);
  2704. get_node(f,ino)->open_count++;
  2705. pthread_mutex_unlock(&f->lock);
  2706. /* The open syscall was interrupted, so it must be cancelled */
  2707. if(fuse_reply_open(req, fi) == -ENOENT)
  2708. fuse_do_release(f, ino, fi);
  2709. }
  2710. else
  2711. {
  2712. reply_err(req, err);
  2713. }
  2714. free_path(f, ino, path);
  2715. }
  2716. static void fuse_lib_read(fuse_req_t req, fuse_ino_t ino, size_t size,
  2717. off_t off, struct fuse_file_info *fi)
  2718. {
  2719. struct fuse *f = req_fuse_prepare(req);
  2720. struct fuse_bufvec *buf = NULL;
  2721. int res;
  2722. struct fuse_intr_data d;
  2723. fuse_prepare_interrupt(f, req, &d);
  2724. res = fuse_fs_read_buf(f->fs, &buf, size, off, fi);
  2725. fuse_finish_interrupt(f, req, &d);
  2726. if (res == 0)
  2727. fuse_reply_data(req, buf, FUSE_BUF_SPLICE_MOVE);
  2728. else
  2729. reply_err(req, res);
  2730. fuse_free_buf(buf);
  2731. }
  2732. static void fuse_lib_write_buf(fuse_req_t req, fuse_ino_t ino,
  2733. struct fuse_bufvec *buf, off_t off,
  2734. struct fuse_file_info *fi)
  2735. {
  2736. struct fuse *f = req_fuse_prepare(req);
  2737. int res;
  2738. struct fuse_intr_data d;
  2739. fuse_prepare_interrupt(f, req, &d);
  2740. res = fuse_fs_write_buf(f->fs, buf, off, fi);
  2741. fuse_finish_interrupt(f, req, &d);
  2742. free_path(f, ino, NULL);
  2743. if (res >= 0)
  2744. fuse_reply_write(req, res);
  2745. else
  2746. reply_err(req, res);
  2747. }
  2748. static void fuse_lib_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
  2749. struct fuse_file_info *fi)
  2750. {
  2751. struct fuse *f = req_fuse_prepare(req);
  2752. int err;
  2753. struct fuse_intr_data d;
  2754. fuse_prepare_interrupt(f, req, &d);
  2755. err = fuse_fs_fsync(f->fs, datasync, fi);
  2756. fuse_finish_interrupt(f, req, &d);
  2757. reply_err(req, err);
  2758. }
  2759. static struct fuse_dh *get_dirhandle(const struct fuse_file_info *llfi,
  2760. struct fuse_file_info *fi)
  2761. {
  2762. struct fuse_dh *dh = (struct fuse_dh *) (uintptr_t) llfi->fh;
  2763. memset(fi, 0, sizeof(struct fuse_file_info));
  2764. fi->fh = dh->fh;
  2765. return dh;
  2766. }
  2767. static void fuse_lib_opendir(fuse_req_t req, fuse_ino_t ino,
  2768. struct fuse_file_info *llfi)
  2769. {
  2770. struct fuse *f = req_fuse_prepare(req);
  2771. struct fuse_intr_data d;
  2772. struct fuse_dh *dh;
  2773. struct fuse_file_info fi;
  2774. char *path;
  2775. int err;
  2776. dh = (struct fuse_dh *) calloc(1,sizeof(struct fuse_dh));
  2777. if (dh == NULL) {
  2778. reply_err(req, -ENOMEM);
  2779. return;
  2780. }
  2781. fuse_dirents_init(&dh->d);
  2782. fuse_mutex_init(&dh->lock);
  2783. llfi->fh = (uintptr_t) dh;
  2784. memset(&fi, 0, sizeof(fi));
  2785. fi.flags = llfi->flags;
  2786. err = get_path(f, ino, &path);
  2787. if (!err) {
  2788. fuse_prepare_interrupt(f, req, &d);
  2789. err = fuse_fs_opendir(f->fs, path, &fi);
  2790. fuse_finish_interrupt(f, req, &d);
  2791. dh->fh = fi.fh;
  2792. llfi->keep_cache = fi.keep_cache;
  2793. llfi->cache_readdir = fi.cache_readdir;
  2794. }
  2795. if (!err) {
  2796. if (fuse_reply_open(req, llfi) == -ENOENT) {
  2797. /* The opendir syscall was interrupted, so it
  2798. must be cancelled */
  2799. fuse_fs_releasedir(f->fs, &fi);
  2800. pthread_mutex_destroy(&dh->lock);
  2801. free(dh);
  2802. }
  2803. } else {
  2804. reply_err(req, err);
  2805. pthread_mutex_destroy(&dh->lock);
  2806. free(dh);
  2807. }
  2808. free_path(f, ino, path);
  2809. }
  2810. static
  2811. int
  2812. readdir_fill(struct fuse *f_,
  2813. fuse_req_t req_,
  2814. fuse_dirents_t *d_,
  2815. struct fuse_file_info *fi_)
  2816. {
  2817. int rv;
  2818. struct fuse_intr_data intr_data;
  2819. fuse_prepare_interrupt(f_,req_,&intr_data);
  2820. rv = fuse_fs_readdir(f_->fs,fi_,d_);
  2821. fuse_finish_interrupt(f_,req_,&intr_data);
  2822. return rv;
  2823. }
  2824. static
  2825. int
  2826. readdir_plus_fill(struct fuse *f_,
  2827. fuse_req_t req_,
  2828. fuse_dirents_t *d_,
  2829. struct fuse_file_info *fi_)
  2830. {
  2831. int rv;
  2832. struct fuse_intr_data intr_data;
  2833. fuse_prepare_interrupt(f_,req_,&intr_data);
  2834. rv = fuse_fs_readdir_plus(f_->fs,fi_,d_);
  2835. fuse_finish_interrupt(f_,req_,&intr_data);
  2836. return rv;
  2837. }
  2838. static
  2839. size_t
  2840. readdir_buf_size(fuse_dirents_t *d_,
  2841. size_t size_,
  2842. off_t off_)
  2843. {
  2844. if(off_ >= kv_size(d_->offs))
  2845. return 0;
  2846. if((kv_A(d_->offs,off_) + size_) > d_->data_len)
  2847. return (d_->data_len - kv_A(d_->offs,off_));
  2848. return size_;
  2849. }
  2850. static
  2851. char*
  2852. readdir_buf(fuse_dirents_t *d_,
  2853. off_t off_)
  2854. {
  2855. return &d_->buf[kv_A(d_->offs,off_)];
  2856. }
  2857. static
  2858. void
  2859. fuse_lib_readdir(fuse_req_t req_,
  2860. fuse_ino_t ino_,
  2861. size_t size_,
  2862. off_t off_,
  2863. struct fuse_file_info *llffi_)
  2864. {
  2865. int rv;
  2866. struct fuse *f;
  2867. fuse_dirents_t *d;
  2868. struct fuse_dh *dh;
  2869. struct fuse_file_info fi;
  2870. f = req_fuse_prepare(req_);
  2871. dh = get_dirhandle(llffi_,&fi);
  2872. d = &dh->d;
  2873. pthread_mutex_lock(&dh->lock);
  2874. rv = 0;
  2875. if((off_ == 0) || (d->data_len == 0))
  2876. rv = readdir_fill(f,req_,d,&fi);
  2877. if(rv)
  2878. {
  2879. reply_err(req_,rv);
  2880. goto out;
  2881. }
  2882. size_ = readdir_buf_size(d,size_,off_);
  2883. fuse_reply_buf(req_,
  2884. readdir_buf(d,off_),
  2885. size_);
  2886. out:
  2887. pthread_mutex_unlock(&dh->lock);
  2888. }
  2889. static
  2890. void
  2891. fuse_lib_readdir_plus(fuse_req_t req_,
  2892. fuse_ino_t ino_,
  2893. size_t size_,
  2894. off_t off_,
  2895. struct fuse_file_info *llffi_)
  2896. {
  2897. int rv;
  2898. struct fuse *f;
  2899. fuse_dirents_t *d;
  2900. struct fuse_dh *dh;
  2901. struct fuse_file_info fi;
  2902. f = req_fuse_prepare(req_);
  2903. dh = get_dirhandle(llffi_,&fi);
  2904. d = &dh->d;
  2905. pthread_mutex_lock(&dh->lock);
  2906. rv = 0;
  2907. if((off_ == 0) || (d->data_len == 0))
  2908. rv = readdir_plus_fill(f,req_,d,&fi);
  2909. if(rv)
  2910. {
  2911. reply_err(req_,rv);
  2912. goto out;
  2913. }
  2914. size_ = readdir_buf_size(d,size_,off_);
  2915. fuse_reply_buf(req_,
  2916. readdir_buf(d,off_),
  2917. size_);
  2918. out:
  2919. pthread_mutex_unlock(&dh->lock);
  2920. }
  2921. static
  2922. void
  2923. fuse_lib_releasedir(fuse_req_t req_,
  2924. fuse_ino_t ino_,
  2925. struct fuse_file_info *llfi_)
  2926. {
  2927. struct fuse *f;
  2928. struct fuse_dh *dh;
  2929. struct fuse_intr_data d;
  2930. struct fuse_file_info fi;
  2931. f = req_fuse_prepare(req_);
  2932. dh = get_dirhandle(llfi_,&fi);
  2933. fuse_prepare_interrupt(f,req_,&d);
  2934. fuse_fs_releasedir(f->fs,&fi);
  2935. fuse_finish_interrupt(f,req_,&d);
  2936. /* Done to keep race condition between last readdir reply and the unlock */
  2937. pthread_mutex_lock(&dh->lock);
  2938. pthread_mutex_unlock(&dh->lock);
  2939. pthread_mutex_destroy(&dh->lock);
  2940. fuse_dirents_free(&dh->d);
  2941. free(dh);
  2942. reply_err(req_,0);
  2943. }
  2944. static void fuse_lib_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync,
  2945. struct fuse_file_info *llfi)
  2946. {
  2947. struct fuse *f = req_fuse_prepare(req);
  2948. struct fuse_file_info fi;
  2949. int err;
  2950. struct fuse_intr_data d;
  2951. get_dirhandle(llfi, &fi);
  2952. fuse_prepare_interrupt(f, req, &d);
  2953. err = fuse_fs_fsyncdir(f->fs, datasync, &fi);
  2954. fuse_finish_interrupt(f, req, &d);
  2955. reply_err(req, err);
  2956. }
  2957. static void fuse_lib_statfs(fuse_req_t req, fuse_ino_t ino)
  2958. {
  2959. struct fuse *f = req_fuse_prepare(req);
  2960. struct statvfs buf;
  2961. char *path = NULL;
  2962. int err = 0;
  2963. memset(&buf, 0, sizeof(buf));
  2964. if (ino)
  2965. err = get_path(f, ino, &path);
  2966. if (!err) {
  2967. struct fuse_intr_data d;
  2968. fuse_prepare_interrupt(f, req, &d);
  2969. err = fuse_fs_statfs(f->fs, path ? path : "/", &buf);
  2970. fuse_finish_interrupt(f, req, &d);
  2971. free_path(f, ino, path);
  2972. }
  2973. if (!err)
  2974. fuse_reply_statfs(req, &buf);
  2975. else
  2976. reply_err(req, err);
  2977. }
  2978. static void fuse_lib_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  2979. const char *value, size_t size, int flags)
  2980. {
  2981. struct fuse *f = req_fuse_prepare(req);
  2982. char *path;
  2983. int err;
  2984. err = get_path(f, ino, &path);
  2985. if (!err) {
  2986. struct fuse_intr_data d;
  2987. fuse_prepare_interrupt(f, req, &d);
  2988. err = fuse_fs_setxattr(f->fs, path, name, value, size, flags);
  2989. fuse_finish_interrupt(f, req, &d);
  2990. free_path(f, ino, path);
  2991. }
  2992. reply_err(req, err);
  2993. }
  2994. static int common_getxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  2995. const char *name, char *value, size_t size)
  2996. {
  2997. int err;
  2998. char *path;
  2999. err = get_path(f, ino, &path);
  3000. if (!err) {
  3001. struct fuse_intr_data d;
  3002. fuse_prepare_interrupt(f, req, &d);
  3003. err = fuse_fs_getxattr(f->fs, path, name, value, size);
  3004. fuse_finish_interrupt(f, req, &d);
  3005. free_path(f, ino, path);
  3006. }
  3007. return err;
  3008. }
  3009. static void fuse_lib_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  3010. size_t size)
  3011. {
  3012. struct fuse *f = req_fuse_prepare(req);
  3013. int res;
  3014. if (size) {
  3015. char *value = (char *) malloc(size);
  3016. if (value == NULL) {
  3017. reply_err(req, -ENOMEM);
  3018. return;
  3019. }
  3020. res = common_getxattr(f, req, ino, name, value, size);
  3021. if (res > 0)
  3022. fuse_reply_buf(req, value, res);
  3023. else
  3024. reply_err(req, res);
  3025. free(value);
  3026. } else {
  3027. res = common_getxattr(f, req, ino, name, NULL, 0);
  3028. if (res >= 0)
  3029. fuse_reply_xattr(req, res);
  3030. else
  3031. reply_err(req, res);
  3032. }
  3033. }
  3034. static int common_listxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3035. char *list, size_t size)
  3036. {
  3037. char *path;
  3038. int err;
  3039. err = get_path(f, ino, &path);
  3040. if (!err) {
  3041. struct fuse_intr_data d;
  3042. fuse_prepare_interrupt(f, req, &d);
  3043. err = fuse_fs_listxattr(f->fs, path, list, size);
  3044. fuse_finish_interrupt(f, req, &d);
  3045. free_path(f, ino, path);
  3046. }
  3047. return err;
  3048. }
  3049. static void fuse_lib_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
  3050. {
  3051. struct fuse *f = req_fuse_prepare(req);
  3052. int res;
  3053. if (size) {
  3054. char *list = (char *) malloc(size);
  3055. if (list == NULL) {
  3056. reply_err(req, -ENOMEM);
  3057. return;
  3058. }
  3059. res = common_listxattr(f, req, ino, list, size);
  3060. if (res > 0)
  3061. fuse_reply_buf(req, list, res);
  3062. else
  3063. reply_err(req, res);
  3064. free(list);
  3065. } else {
  3066. res = common_listxattr(f, req, ino, NULL, 0);
  3067. if (res >= 0)
  3068. fuse_reply_xattr(req, res);
  3069. else
  3070. reply_err(req, res);
  3071. }
  3072. }
  3073. static void fuse_lib_removexattr(fuse_req_t req, fuse_ino_t ino,
  3074. const char *name)
  3075. {
  3076. struct fuse *f = req_fuse_prepare(req);
  3077. char *path;
  3078. int err;
  3079. err = get_path(f, ino, &path);
  3080. if (!err) {
  3081. struct fuse_intr_data d;
  3082. fuse_prepare_interrupt(f, req, &d);
  3083. err = fuse_fs_removexattr(f->fs, path, name);
  3084. fuse_finish_interrupt(f, req, &d);
  3085. free_path(f, ino, path);
  3086. }
  3087. reply_err(req, err);
  3088. }
  3089. static
  3090. void
  3091. fuse_lib_copy_file_range(fuse_req_t req_,
  3092. fuse_ino_t nodeid_in_,
  3093. off_t off_in_,
  3094. struct fuse_file_info *ffi_in_,
  3095. fuse_ino_t nodeid_out_,
  3096. off_t off_out_,
  3097. struct fuse_file_info *ffi_out_,
  3098. size_t len_,
  3099. int flags_)
  3100. {
  3101. ssize_t rv;
  3102. struct fuse *f;
  3103. struct fuse_intr_data d;
  3104. f = req_fuse_prepare(req_);
  3105. fuse_prepare_interrupt(f,req_,&d);
  3106. rv = fuse_fs_copy_file_range(f->fs,
  3107. ffi_in_,
  3108. off_in_,
  3109. ffi_out_,
  3110. off_out_,
  3111. len_,
  3112. flags_);
  3113. fuse_finish_interrupt(f,req_,&d);
  3114. if(rv >= 0)
  3115. fuse_reply_write(req_,rv);
  3116. else
  3117. reply_err(req_,rv);
  3118. }
  3119. static struct lock *locks_conflict(struct node *node, const struct lock *lock)
  3120. {
  3121. struct lock *l;
  3122. for (l = node->locks; l; l = l->next)
  3123. if (l->owner != lock->owner &&
  3124. lock->start <= l->end && l->start <= lock->end &&
  3125. (l->type == F_WRLCK || lock->type == F_WRLCK))
  3126. break;
  3127. return l;
  3128. }
  3129. static void delete_lock(struct lock **lockp)
  3130. {
  3131. struct lock *l = *lockp;
  3132. *lockp = l->next;
  3133. free(l);
  3134. }
  3135. static void insert_lock(struct lock **pos, struct lock *lock)
  3136. {
  3137. lock->next = *pos;
  3138. *pos = lock;
  3139. }
  3140. static int locks_insert(struct node *node, struct lock *lock)
  3141. {
  3142. struct lock **lp;
  3143. struct lock *newl1 = NULL;
  3144. struct lock *newl2 = NULL;
  3145. if (lock->type != F_UNLCK || lock->start != 0 ||
  3146. lock->end != OFFSET_MAX) {
  3147. newl1 = malloc(sizeof(struct lock));
  3148. newl2 = malloc(sizeof(struct lock));
  3149. if (!newl1 || !newl2) {
  3150. free(newl1);
  3151. free(newl2);
  3152. return -ENOLCK;
  3153. }
  3154. }
  3155. for (lp = &node->locks; *lp;) {
  3156. struct lock *l = *lp;
  3157. if (l->owner != lock->owner)
  3158. goto skip;
  3159. if (lock->type == l->type) {
  3160. if (l->end < lock->start - 1)
  3161. goto skip;
  3162. if (lock->end < l->start - 1)
  3163. break;
  3164. if (l->start <= lock->start && lock->end <= l->end)
  3165. goto out;
  3166. if (l->start < lock->start)
  3167. lock->start = l->start;
  3168. if (lock->end < l->end)
  3169. lock->end = l->end;
  3170. goto delete;
  3171. } else {
  3172. if (l->end < lock->start)
  3173. goto skip;
  3174. if (lock->end < l->start)
  3175. break;
  3176. if (lock->start <= l->start && l->end <= lock->end)
  3177. goto delete;
  3178. if (l->end <= lock->end) {
  3179. l->end = lock->start - 1;
  3180. goto skip;
  3181. }
  3182. if (lock->start <= l->start) {
  3183. l->start = lock->end + 1;
  3184. break;
  3185. }
  3186. *newl2 = *l;
  3187. newl2->start = lock->end + 1;
  3188. l->end = lock->start - 1;
  3189. insert_lock(&l->next, newl2);
  3190. newl2 = NULL;
  3191. }
  3192. skip:
  3193. lp = &l->next;
  3194. continue;
  3195. delete:
  3196. delete_lock(lp);
  3197. }
  3198. if (lock->type != F_UNLCK) {
  3199. *newl1 = *lock;
  3200. insert_lock(lp, newl1);
  3201. newl1 = NULL;
  3202. }
  3203. out:
  3204. free(newl1);
  3205. free(newl2);
  3206. return 0;
  3207. }
  3208. static void flock_to_lock(struct flock *flock, struct lock *lock)
  3209. {
  3210. memset(lock, 0, sizeof(struct lock));
  3211. lock->type = flock->l_type;
  3212. lock->start = flock->l_start;
  3213. lock->end =
  3214. flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
  3215. lock->pid = flock->l_pid;
  3216. }
  3217. static void lock_to_flock(struct lock *lock, struct flock *flock)
  3218. {
  3219. flock->l_type = lock->type;
  3220. flock->l_start = lock->start;
  3221. flock->l_len =
  3222. (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
  3223. flock->l_pid = lock->pid;
  3224. }
  3225. static int fuse_flush_common(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3226. struct fuse_file_info *fi)
  3227. {
  3228. struct fuse_intr_data d;
  3229. struct flock lock;
  3230. struct lock l;
  3231. int err;
  3232. int errlock;
  3233. fuse_prepare_interrupt(f, req, &d);
  3234. memset(&lock, 0, sizeof(lock));
  3235. lock.l_type = F_UNLCK;
  3236. lock.l_whence = SEEK_SET;
  3237. err = fuse_fs_flush(f->fs, fi);
  3238. errlock = fuse_fs_lock(f->fs, fi, F_SETLK, &lock);
  3239. fuse_finish_interrupt(f, req, &d);
  3240. if (errlock != -ENOSYS) {
  3241. flock_to_lock(&lock, &l);
  3242. l.owner = fi->lock_owner;
  3243. pthread_mutex_lock(&f->lock);
  3244. locks_insert(get_node(f, ino), &l);
  3245. pthread_mutex_unlock(&f->lock);
  3246. /* if op.lock() is defined FLUSH is needed regardless
  3247. of op.flush() */
  3248. if (err == -ENOSYS)
  3249. err = 0;
  3250. }
  3251. return err;
  3252. }
  3253. static void fuse_lib_release(fuse_req_t req, fuse_ino_t ino,
  3254. struct fuse_file_info *fi)
  3255. {
  3256. struct fuse *f = req_fuse_prepare(req);
  3257. struct fuse_intr_data d;
  3258. int err = 0;
  3259. if (fi->flush) {
  3260. err = fuse_flush_common(f, req, ino, fi);
  3261. if (err == -ENOSYS)
  3262. err = 0;
  3263. }
  3264. fuse_prepare_interrupt(f, req, &d);
  3265. fuse_do_release(f, ino, fi);
  3266. fuse_finish_interrupt(f, req, &d);
  3267. reply_err(req, err);
  3268. }
  3269. static void fuse_lib_flush(fuse_req_t req, fuse_ino_t ino,
  3270. struct fuse_file_info *fi)
  3271. {
  3272. struct fuse *f = req_fuse_prepare(req);
  3273. int err;
  3274. err = fuse_flush_common(f, req, ino, fi);
  3275. reply_err(req, err);
  3276. }
  3277. static int fuse_lock_common(fuse_req_t req, fuse_ino_t ino,
  3278. struct fuse_file_info *fi, struct flock *lock,
  3279. int cmd)
  3280. {
  3281. struct fuse *f = req_fuse_prepare(req);
  3282. int err;
  3283. struct fuse_intr_data d;
  3284. fuse_prepare_interrupt(f, req, &d);
  3285. err = fuse_fs_lock(f->fs, fi, cmd, lock);
  3286. fuse_finish_interrupt(f, req, &d);
  3287. return err;
  3288. }
  3289. static void fuse_lib_getlk(fuse_req_t req, fuse_ino_t ino,
  3290. struct fuse_file_info *fi, struct flock *lock)
  3291. {
  3292. int err;
  3293. struct lock l;
  3294. struct lock *conflict;
  3295. struct fuse *f = req_fuse(req);
  3296. flock_to_lock(lock, &l);
  3297. l.owner = fi->lock_owner;
  3298. pthread_mutex_lock(&f->lock);
  3299. conflict = locks_conflict(get_node(f, ino), &l);
  3300. if (conflict)
  3301. lock_to_flock(conflict, lock);
  3302. pthread_mutex_unlock(&f->lock);
  3303. if (!conflict)
  3304. err = fuse_lock_common(req, ino, fi, lock, F_GETLK);
  3305. else
  3306. err = 0;
  3307. if (!err)
  3308. fuse_reply_lock(req, lock);
  3309. else
  3310. reply_err(req, err);
  3311. }
  3312. static void fuse_lib_setlk(fuse_req_t req, fuse_ino_t ino,
  3313. struct fuse_file_info *fi, struct flock *lock,
  3314. int sleep)
  3315. {
  3316. int err = fuse_lock_common(req, ino, fi, lock,
  3317. sleep ? F_SETLKW : F_SETLK);
  3318. if (!err) {
  3319. struct fuse *f = req_fuse(req);
  3320. struct lock l;
  3321. flock_to_lock(lock, &l);
  3322. l.owner = fi->lock_owner;
  3323. pthread_mutex_lock(&f->lock);
  3324. locks_insert(get_node(f, ino), &l);
  3325. pthread_mutex_unlock(&f->lock);
  3326. }
  3327. reply_err(req, err);
  3328. }
  3329. static void fuse_lib_flock(fuse_req_t req, fuse_ino_t ino,
  3330. struct fuse_file_info *fi, int op)
  3331. {
  3332. struct fuse *f = req_fuse_prepare(req);
  3333. int err;
  3334. struct fuse_intr_data d;
  3335. fuse_prepare_interrupt(f, req, &d);
  3336. err = fuse_fs_flock(f->fs, fi, op);
  3337. fuse_finish_interrupt(f, req, &d);
  3338. reply_err(req, err);
  3339. }
  3340. static void fuse_lib_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
  3341. uint64_t idx)
  3342. {
  3343. struct fuse *f = req_fuse_prepare(req);
  3344. struct fuse_intr_data d;
  3345. char *path;
  3346. int err;
  3347. err = get_path(f, ino, &path);
  3348. if (!err) {
  3349. fuse_prepare_interrupt(f, req, &d);
  3350. err = fuse_fs_bmap(f->fs, path, blocksize, &idx);
  3351. fuse_finish_interrupt(f, req, &d);
  3352. free_path(f, ino, path);
  3353. }
  3354. if (!err)
  3355. fuse_reply_bmap(req, idx);
  3356. else
  3357. reply_err(req, err);
  3358. }
  3359. static void fuse_lib_ioctl(fuse_req_t req, fuse_ino_t ino, unsigned long cmd, void *arg,
  3360. struct fuse_file_info *llfi, unsigned int flags,
  3361. const void *in_buf, uint32_t in_bufsz,
  3362. uint32_t out_bufsz_)
  3363. {
  3364. struct fuse *f = req_fuse_prepare(req);
  3365. struct fuse_intr_data d;
  3366. struct fuse_file_info fi;
  3367. char *out_buf = NULL;
  3368. int err;
  3369. uint32_t out_bufsz = out_bufsz_;
  3370. err = -EPERM;
  3371. if (flags & FUSE_IOCTL_UNRESTRICTED)
  3372. goto err;
  3373. if (flags & FUSE_IOCTL_DIR)
  3374. get_dirhandle(llfi, &fi);
  3375. else
  3376. fi = *llfi;
  3377. if (out_bufsz) {
  3378. err = -ENOMEM;
  3379. out_buf = malloc(out_bufsz);
  3380. if (!out_buf)
  3381. goto err;
  3382. }
  3383. assert(!in_bufsz || !out_bufsz || in_bufsz == out_bufsz);
  3384. if (out_buf)
  3385. memcpy(out_buf, in_buf, in_bufsz);
  3386. fuse_prepare_interrupt(f, req, &d);
  3387. err = fuse_fs_ioctl(f->fs, cmd, arg, &fi, flags,
  3388. out_buf ?: (void *)in_buf, &out_bufsz);
  3389. fuse_finish_interrupt(f, req, &d);
  3390. fuse_reply_ioctl(req, err, out_buf, out_bufsz);
  3391. goto out;
  3392. err:
  3393. reply_err(req, err);
  3394. out:
  3395. free(out_buf);
  3396. }
  3397. static void fuse_lib_poll(fuse_req_t req, fuse_ino_t ino,
  3398. struct fuse_file_info *fi, struct fuse_pollhandle *ph)
  3399. {
  3400. struct fuse *f = req_fuse_prepare(req);
  3401. struct fuse_intr_data d;
  3402. int err;
  3403. unsigned revents = 0;
  3404. fuse_prepare_interrupt(f, req, &d);
  3405. err = fuse_fs_poll(f->fs, fi, ph, &revents);
  3406. fuse_finish_interrupt(f, req, &d);
  3407. if (!err)
  3408. fuse_reply_poll(req, revents);
  3409. else
  3410. reply_err(req, err);
  3411. }
  3412. static void fuse_lib_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
  3413. off_t offset, off_t length, struct fuse_file_info *fi)
  3414. {
  3415. struct fuse *f = req_fuse_prepare(req);
  3416. struct fuse_intr_data d;
  3417. int err;
  3418. fuse_prepare_interrupt(f, req, &d);
  3419. err = fuse_fs_fallocate(f->fs, mode, offset, length, fi);
  3420. fuse_finish_interrupt(f, req, &d);
  3421. reply_err(req, err);
  3422. }
  3423. static int clean_delay(struct fuse *f)
  3424. {
  3425. /*
  3426. * This is calculating the delay between clean runs. To
  3427. * reduce the number of cleans we are doing them 10 times
  3428. * within the remember window.
  3429. */
  3430. int min_sleep = 60;
  3431. int max_sleep = 3600;
  3432. int sleep_time = f->conf.remember / 10;
  3433. if (sleep_time > max_sleep)
  3434. return max_sleep;
  3435. if (sleep_time < min_sleep)
  3436. return min_sleep;
  3437. return sleep_time;
  3438. }
  3439. int fuse_clean_cache(struct fuse *f)
  3440. {
  3441. struct node_lru *lnode;
  3442. struct list_head *curr, *next;
  3443. struct node *node;
  3444. struct timespec now;
  3445. pthread_mutex_lock(&f->lock);
  3446. curr_time(&now);
  3447. for (curr = f->lru_table.next; curr != &f->lru_table; curr = next) {
  3448. double age;
  3449. next = curr->next;
  3450. lnode = list_entry(curr, struct node_lru, lru);
  3451. node = &lnode->node;
  3452. age = diff_timespec(&now, &lnode->forget_time);
  3453. if (age <= f->conf.remember)
  3454. break;
  3455. assert(node->nlookup == 1);
  3456. /* Don't forget active directories */
  3457. if (node->refctr > 1)
  3458. continue;
  3459. node->nlookup = 0;
  3460. unhash_name(f, node);
  3461. unref_node(f, node);
  3462. }
  3463. pthread_mutex_unlock(&f->lock);
  3464. return clean_delay(f);
  3465. }
  3466. static struct fuse_lowlevel_ops fuse_path_ops = {
  3467. .init = fuse_lib_init,
  3468. .destroy = fuse_lib_destroy,
  3469. .lookup = fuse_lib_lookup,
  3470. .forget = fuse_lib_forget,
  3471. .forget_multi = fuse_lib_forget_multi,
  3472. .getattr = fuse_lib_getattr,
  3473. .setattr = fuse_lib_setattr,
  3474. .access = fuse_lib_access,
  3475. .readlink = fuse_lib_readlink,
  3476. .mknod = fuse_lib_mknod,
  3477. .mkdir = fuse_lib_mkdir,
  3478. .unlink = fuse_lib_unlink,
  3479. .rmdir = fuse_lib_rmdir,
  3480. .symlink = fuse_lib_symlink,
  3481. .rename = fuse_lib_rename,
  3482. .link = fuse_lib_link,
  3483. .create = fuse_lib_create,
  3484. .open = fuse_lib_open,
  3485. .read = fuse_lib_read,
  3486. .write_buf = fuse_lib_write_buf,
  3487. .flush = fuse_lib_flush,
  3488. .release = fuse_lib_release,
  3489. .fsync = fuse_lib_fsync,
  3490. .opendir = fuse_lib_opendir,
  3491. .readdir = fuse_lib_readdir,
  3492. .readdir_plus = fuse_lib_readdir_plus,
  3493. .releasedir = fuse_lib_releasedir,
  3494. .fsyncdir = fuse_lib_fsyncdir,
  3495. .statfs = fuse_lib_statfs,
  3496. .setxattr = fuse_lib_setxattr,
  3497. .getxattr = fuse_lib_getxattr,
  3498. .listxattr = fuse_lib_listxattr,
  3499. .removexattr = fuse_lib_removexattr,
  3500. .getlk = fuse_lib_getlk,
  3501. .setlk = fuse_lib_setlk,
  3502. .flock = fuse_lib_flock,
  3503. .bmap = fuse_lib_bmap,
  3504. .ioctl = fuse_lib_ioctl,
  3505. .poll = fuse_lib_poll,
  3506. .fallocate = fuse_lib_fallocate,
  3507. .copy_file_range = fuse_lib_copy_file_range,
  3508. };
  3509. int fuse_notify_poll(struct fuse_pollhandle *ph)
  3510. {
  3511. return fuse_lowlevel_notify_poll(ph);
  3512. }
  3513. static void free_cmd(struct fuse_cmd *cmd)
  3514. {
  3515. free(cmd->buf);
  3516. free(cmd);
  3517. }
  3518. void fuse_process_cmd(struct fuse *f, struct fuse_cmd *cmd)
  3519. {
  3520. fuse_session_process(f->se, cmd->buf, cmd->buflen, cmd->ch);
  3521. free_cmd(cmd);
  3522. }
  3523. int fuse_exited(struct fuse *f)
  3524. {
  3525. return fuse_session_exited(f->se);
  3526. }
  3527. struct fuse_session *fuse_get_session(struct fuse *f)
  3528. {
  3529. return f->se;
  3530. }
  3531. static struct fuse_cmd *fuse_alloc_cmd(size_t bufsize)
  3532. {
  3533. struct fuse_cmd *cmd = (struct fuse_cmd *) malloc(sizeof(*cmd));
  3534. if (cmd == NULL) {
  3535. fprintf(stderr, "fuse: failed to allocate cmd\n");
  3536. return NULL;
  3537. }
  3538. cmd->buf = (char *) malloc(bufsize);
  3539. if (cmd->buf == NULL) {
  3540. fprintf(stderr, "fuse: failed to allocate read buffer\n");
  3541. free(cmd);
  3542. return NULL;
  3543. }
  3544. return cmd;
  3545. }
  3546. struct fuse_cmd *fuse_read_cmd(struct fuse *f)
  3547. {
  3548. struct fuse_chan *ch = fuse_session_next_chan(f->se, NULL);
  3549. size_t bufsize = fuse_chan_bufsize(ch);
  3550. struct fuse_cmd *cmd = fuse_alloc_cmd(bufsize);
  3551. if (cmd != NULL) {
  3552. int res = fuse_chan_recv(&ch, cmd->buf, bufsize);
  3553. if (res <= 0) {
  3554. free_cmd(cmd);
  3555. if (res < 0 && res != -EINTR && res != -EAGAIN)
  3556. fuse_exit(f);
  3557. return NULL;
  3558. }
  3559. cmd->buflen = res;
  3560. cmd->ch = ch;
  3561. }
  3562. return cmd;
  3563. }
  3564. int fuse_invalidate(struct fuse *f, const char *path)
  3565. {
  3566. (void) f;
  3567. (void) path;
  3568. return -EINVAL;
  3569. }
  3570. void fuse_exit(struct fuse *f)
  3571. {
  3572. fuse_session_exit(f->se);
  3573. }
  3574. struct fuse_context *fuse_get_context(void)
  3575. {
  3576. return &fuse_get_context_internal()->ctx;
  3577. }
  3578. int fuse_interrupted(void)
  3579. {
  3580. return fuse_req_interrupted(fuse_get_context_internal()->req);
  3581. }
  3582. void fuse_set_getcontext_func(struct fuse_context *(*func)(void))
  3583. {
  3584. (void) func;
  3585. /* no-op */
  3586. }
  3587. enum {
  3588. KEY_HELP,
  3589. };
  3590. #define FUSE_LIB_OPT(t, p, v) { t, offsetof(struct fuse_config, p), v }
  3591. static const struct fuse_opt fuse_lib_opts[] = {
  3592. FUSE_OPT_KEY("-h", KEY_HELP),
  3593. FUSE_OPT_KEY("--help", KEY_HELP),
  3594. FUSE_OPT_KEY("debug", FUSE_OPT_KEY_KEEP),
  3595. FUSE_OPT_KEY("-d", FUSE_OPT_KEY_KEEP),
  3596. FUSE_LIB_OPT("debug", debug, 1),
  3597. FUSE_LIB_OPT("-d", debug, 1),
  3598. FUSE_LIB_OPT("umask=", set_mode, 1),
  3599. FUSE_LIB_OPT("umask=%o", umask, 0),
  3600. FUSE_LIB_OPT("uid=", set_uid, 1),
  3601. FUSE_LIB_OPT("uid=%d", uid, 0),
  3602. FUSE_LIB_OPT("gid=", set_gid, 1),
  3603. FUSE_LIB_OPT("gid=%d", gid, 0),
  3604. FUSE_LIB_OPT("noforget", remember, -1),
  3605. FUSE_LIB_OPT("remember=%u", remember, 0),
  3606. FUSE_LIB_OPT("intr", intr, 1),
  3607. FUSE_LIB_OPT("intr_signal=%d", intr_signal, 0),
  3608. FUSE_LIB_OPT("threads=%d", threads, 0),
  3609. FUSE_LIB_OPT("use_ino", use_ino, 1),
  3610. FUSE_OPT_END
  3611. };
  3612. static void fuse_lib_help(void)
  3613. {
  3614. fprintf(stderr,
  3615. " -o umask=M set file permissions (octal)\n"
  3616. " -o uid=N set file owner\n"
  3617. " -o gid=N set file group\n"
  3618. " -o noforget never forget cached inodes\n"
  3619. " -o remember=T remember cached inodes for T seconds (0s)\n"
  3620. " -o intr allow requests to be interrupted\n"
  3621. " -o intr_signal=NUM signal to send on interrupt (%i)\n"
  3622. " -o threads=NUM number of worker threads. 0 = autodetect.\n"
  3623. " Negative values autodetect then divide by\n"
  3624. " absolute value. default = 0\n"
  3625. "\n", FUSE_DEFAULT_INTR_SIGNAL);
  3626. }
  3627. static int fuse_lib_opt_proc(void *data, const char *arg, int key,
  3628. struct fuse_args *outargs)
  3629. {
  3630. (void) arg; (void) outargs;
  3631. if (key == KEY_HELP) {
  3632. struct fuse_config *conf = (struct fuse_config *) data;
  3633. fuse_lib_help();
  3634. conf->help = 1;
  3635. }
  3636. return 1;
  3637. }
  3638. int fuse_is_lib_option(const char *opt)
  3639. {
  3640. return fuse_lowlevel_is_lib_option(opt) ||
  3641. fuse_opt_match(fuse_lib_opts, opt);
  3642. }
  3643. static int fuse_init_intr_signal(int signum, int *installed)
  3644. {
  3645. struct sigaction old_sa;
  3646. if (sigaction(signum, NULL, &old_sa) == -1) {
  3647. perror("fuse: cannot get old signal handler");
  3648. return -1;
  3649. }
  3650. if (old_sa.sa_handler == SIG_DFL) {
  3651. struct sigaction sa;
  3652. memset(&sa, 0, sizeof(struct sigaction));
  3653. sa.sa_handler = fuse_intr_sighandler;
  3654. sigemptyset(&sa.sa_mask);
  3655. if (sigaction(signum, &sa, NULL) == -1) {
  3656. perror("fuse: cannot set interrupt signal handler");
  3657. return -1;
  3658. }
  3659. *installed = 1;
  3660. }
  3661. return 0;
  3662. }
  3663. static void fuse_restore_intr_signal(int signum)
  3664. {
  3665. struct sigaction sa;
  3666. memset(&sa, 0, sizeof(struct sigaction));
  3667. sa.sa_handler = SIG_DFL;
  3668. sigaction(signum, &sa, NULL);
  3669. }
  3670. struct fuse_fs *fuse_fs_new(const struct fuse_operations *op, size_t op_size,
  3671. void *user_data)
  3672. {
  3673. struct fuse_fs *fs;
  3674. if (sizeof(struct fuse_operations) < op_size) {
  3675. fprintf(stderr, "fuse: warning: library too old, some operations may not not work\n");
  3676. op_size = sizeof(struct fuse_operations);
  3677. }
  3678. fs = (struct fuse_fs *) calloc(1, sizeof(struct fuse_fs));
  3679. if (!fs) {
  3680. fprintf(stderr, "fuse: failed to allocate fuse_fs object\n");
  3681. return NULL;
  3682. }
  3683. fs->user_data = user_data;
  3684. if (op)
  3685. memcpy(&fs->op, op, op_size);
  3686. return fs;
  3687. }
  3688. static int node_table_init(struct node_table *t)
  3689. {
  3690. t->size = NODE_TABLE_MIN_SIZE;
  3691. t->array = (struct node **) calloc(1, sizeof(struct node *) * t->size);
  3692. if (t->array == NULL) {
  3693. fprintf(stderr, "fuse: memory allocation failed\n");
  3694. return -1;
  3695. }
  3696. t->use = 0;
  3697. t->split = 0;
  3698. return 0;
  3699. }
  3700. static void *fuse_prune_nodes(void *fuse)
  3701. {
  3702. struct fuse *f = fuse;
  3703. int sleep_time;
  3704. while(1) {
  3705. sleep_time = fuse_clean_cache(f);
  3706. sleep(sleep_time);
  3707. }
  3708. return NULL;
  3709. }
  3710. int fuse_start_cleanup_thread(struct fuse *f)
  3711. {
  3712. if (lru_enabled(f))
  3713. return fuse_start_thread(&f->prune_thread, fuse_prune_nodes, f);
  3714. return 0;
  3715. }
  3716. void fuse_stop_cleanup_thread(struct fuse *f)
  3717. {
  3718. if (lru_enabled(f)) {
  3719. pthread_mutex_lock(&f->lock);
  3720. pthread_cancel(f->prune_thread);
  3721. pthread_mutex_unlock(&f->lock);
  3722. pthread_join(f->prune_thread, NULL);
  3723. }
  3724. }
  3725. struct fuse *fuse_new_common(struct fuse_chan *ch, struct fuse_args *args,
  3726. const struct fuse_operations *op,
  3727. size_t op_size, void *user_data)
  3728. {
  3729. struct fuse *f;
  3730. struct node *root;
  3731. struct fuse_fs *fs;
  3732. struct fuse_lowlevel_ops llop = fuse_path_ops;
  3733. if (fuse_create_context_key() == -1)
  3734. goto out;
  3735. f = (struct fuse *) calloc(1, sizeof(struct fuse));
  3736. if (f == NULL) {
  3737. fprintf(stderr, "fuse: failed to allocate fuse object\n");
  3738. goto out_delete_context_key;
  3739. }
  3740. fs = fuse_fs_new(op, op_size, user_data);
  3741. if (!fs)
  3742. goto out_free;
  3743. f->fs = fs;
  3744. /* Oh f**k, this is ugly! */
  3745. if (!fs->op.lock) {
  3746. llop.getlk = NULL;
  3747. llop.setlk = NULL;
  3748. }
  3749. f->conf.intr_signal = FUSE_DEFAULT_INTR_SIGNAL;
  3750. f->pagesize = getpagesize();
  3751. init_list_head(&f->partial_slabs);
  3752. init_list_head(&f->full_slabs);
  3753. init_list_head(&f->lru_table);
  3754. if (fuse_opt_parse(args, &f->conf, fuse_lib_opts,
  3755. fuse_lib_opt_proc) == -1)
  3756. goto out_free_fs;
  3757. f->se = fuse_lowlevel_new_common(args, &llop, sizeof(llop), f);
  3758. if (f->se == NULL) {
  3759. goto out_free_fs;
  3760. }
  3761. fuse_session_add_chan(f->se, ch);
  3762. /* Trace topmost layer by default */
  3763. srand(time(NULL));
  3764. f->fs->debug = f->conf.debug;
  3765. f->ctr = 0;
  3766. f->generation = rand64();
  3767. if (node_table_init(&f->name_table) == -1)
  3768. goto out_free_session;
  3769. if (node_table_init(&f->id_table) == -1)
  3770. goto out_free_name_table;
  3771. fuse_mutex_init(&f->lock);
  3772. root = alloc_node(f);
  3773. if (root == NULL) {
  3774. fprintf(stderr, "fuse: memory allocation failed\n");
  3775. goto out_free_id_table;
  3776. }
  3777. if (lru_enabled(f)) {
  3778. struct node_lru *lnode = node_lru(root);
  3779. init_list_head(&lnode->lru);
  3780. }
  3781. strcpy(root->inline_name, "/");
  3782. root->name = root->inline_name;
  3783. if (f->conf.intr &&
  3784. fuse_init_intr_signal(f->conf.intr_signal,
  3785. &f->intr_installed) == -1)
  3786. goto out_free_root;
  3787. root->parent = NULL;
  3788. root->nodeid = FUSE_ROOT_ID;
  3789. inc_nlookup(root);
  3790. hash_id(f, root);
  3791. return f;
  3792. out_free_root:
  3793. free(root);
  3794. out_free_id_table:
  3795. free(f->id_table.array);
  3796. out_free_name_table:
  3797. free(f->name_table.array);
  3798. out_free_session:
  3799. fuse_session_destroy(f->se);
  3800. out_free_fs:
  3801. /* Horrible compatibility hack to stop the destructor from being
  3802. called on the filesystem without init being called first */
  3803. fs->op.destroy = NULL;
  3804. fuse_fs_destroy(f->fs);
  3805. out_free:
  3806. free(f);
  3807. out_delete_context_key:
  3808. fuse_delete_context_key();
  3809. out:
  3810. return NULL;
  3811. }
  3812. struct fuse *fuse_new(struct fuse_chan *ch, struct fuse_args *args,
  3813. const struct fuse_operations *op, size_t op_size,
  3814. void *user_data)
  3815. {
  3816. return fuse_new_common(ch, args, op, op_size, user_data);
  3817. }
  3818. void fuse_destroy(struct fuse *f)
  3819. {
  3820. size_t i;
  3821. if (f->conf.intr && f->intr_installed)
  3822. fuse_restore_intr_signal(f->conf.intr_signal);
  3823. if (f->fs) {
  3824. struct fuse_context_i *c = fuse_get_context_internal();
  3825. memset(c, 0, sizeof(*c));
  3826. c->ctx.fuse = f;
  3827. for (i = 0; i < f->id_table.size; i++) {
  3828. struct node *node;
  3829. for (node = f->id_table.array[i]; node != NULL; node = node->id_next)
  3830. {
  3831. if (node->is_hidden)
  3832. fuse_fs_free_hide(f->fs,node->hidden_fh);
  3833. }
  3834. }
  3835. }
  3836. for (i = 0; i < f->id_table.size; i++) {
  3837. struct node *node;
  3838. struct node *next;
  3839. for (node = f->id_table.array[i]; node != NULL; node = next) {
  3840. next = node->id_next;
  3841. free_node(f, node);
  3842. f->id_table.use--;
  3843. }
  3844. }
  3845. assert(list_empty(&f->partial_slabs));
  3846. assert(list_empty(&f->full_slabs));
  3847. free(f->id_table.array);
  3848. free(f->name_table.array);
  3849. pthread_mutex_destroy(&f->lock);
  3850. fuse_session_destroy(f->se);
  3851. free(f);
  3852. fuse_delete_context_key();
  3853. }
  3854. int
  3855. fuse_config_num_threads(const struct fuse *fuse_)
  3856. {
  3857. return fuse_->conf.threads;
  3858. }