You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4818 lines
109 KiB

  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. /* For pthread_rwlock_t */
  8. #define _GNU_SOURCE
  9. #include "config.h"
  10. #include "fuse_i.h"
  11. #include "fuse_lowlevel.h"
  12. #include "fuse_opt.h"
  13. #include "fuse_misc.h"
  14. #include "fuse_common_compat.h"
  15. #include "fuse_compat.h"
  16. #include "fuse_kernel.h"
  17. #include <stdio.h>
  18. #include <string.h>
  19. #include <stdlib.h>
  20. #include <stddef.h>
  21. #include <stdbool.h>
  22. #include <unistd.h>
  23. #include <time.h>
  24. #include <fcntl.h>
  25. #include <limits.h>
  26. #include <errno.h>
  27. #include <signal.h>
  28. #include <dlfcn.h>
  29. #include <assert.h>
  30. #include <poll.h>
  31. #include <sys/param.h>
  32. #include <sys/uio.h>
  33. #include <sys/time.h>
  34. #include <sys/mman.h>
  35. #include <sys/file.h>
  36. #define FUSE_NODE_SLAB 1
  37. #ifndef MAP_ANONYMOUS
  38. #undef FUSE_NODE_SLAB
  39. #endif
  40. #define FUSE_DEFAULT_INTR_SIGNAL SIGUSR1
  41. #define FUSE_UNKNOWN_INO 0xffffffff
  42. #define OFFSET_MAX 0x7fffffffffffffffLL
  43. #define NODE_TABLE_MIN_SIZE 8192
  44. struct fuse_config {
  45. unsigned int uid;
  46. unsigned int gid;
  47. unsigned int umask;
  48. double entry_timeout;
  49. double negative_timeout;
  50. double attr_timeout;
  51. double ac_attr_timeout;
  52. int ac_attr_timeout_set;
  53. int remember;
  54. int nopath;
  55. int debug;
  56. int hard_remove; /* not used */
  57. int use_ino;
  58. int readdir_ino;
  59. int set_mode;
  60. int set_uid;
  61. int set_gid;
  62. int kernel_cache;
  63. int auto_cache;
  64. int intr;
  65. int intr_signal;
  66. int help;
  67. int threads;
  68. };
  69. struct fuse_fs {
  70. struct fuse_operations op;
  71. void *user_data;
  72. int compat;
  73. int debug;
  74. };
  75. struct fusemod_so {
  76. void *handle;
  77. int ctr;
  78. };
  79. struct lock_queue_element {
  80. struct lock_queue_element *next;
  81. pthread_cond_t cond;
  82. fuse_ino_t nodeid1;
  83. const char *name1;
  84. char **path1;
  85. struct node **wnode1;
  86. fuse_ino_t nodeid2;
  87. const char *name2;
  88. char **path2;
  89. struct node **wnode2;
  90. int err;
  91. bool first_locked : 1;
  92. bool second_locked : 1;
  93. bool done : 1;
  94. };
  95. struct node_table {
  96. struct node **array;
  97. size_t use;
  98. size_t size;
  99. size_t split;
  100. };
  101. #define container_of(ptr, type, member) ({ \
  102. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  103. (type *)( (char *)__mptr - offsetof(type,member) );})
  104. #define list_entry(ptr, type, member) \
  105. container_of(ptr, type, member)
  106. struct list_head {
  107. struct list_head *next;
  108. struct list_head *prev;
  109. };
  110. struct node_slab {
  111. struct list_head list; /* must be the first member */
  112. struct list_head freelist;
  113. int used;
  114. };
  115. struct fuse {
  116. struct fuse_session *se;
  117. struct node_table name_table;
  118. struct node_table id_table;
  119. struct list_head lru_table;
  120. fuse_ino_t ctr;
  121. unsigned int generation;
  122. unsigned int hidectr;
  123. pthread_mutex_t lock;
  124. struct fuse_config conf;
  125. int intr_installed;
  126. struct fuse_fs *fs;
  127. int nullpath_ok;
  128. int utime_omit_ok;
  129. struct lock_queue_element *lockq;
  130. int pagesize;
  131. struct list_head partial_slabs;
  132. struct list_head full_slabs;
  133. pthread_t prune_thread;
  134. };
  135. struct lock {
  136. int type;
  137. off_t start;
  138. off_t end;
  139. pid_t pid;
  140. uint64_t owner;
  141. struct lock *next;
  142. };
  143. struct node {
  144. struct node *name_next;
  145. struct node *id_next;
  146. fuse_ino_t nodeid;
  147. unsigned int generation;
  148. int refctr;
  149. struct node *parent;
  150. char *name;
  151. uint64_t nlookup;
  152. int open_count;
  153. struct timespec stat_updated;
  154. struct timespec mtime;
  155. off_t size;
  156. struct lock *locks;
  157. uint64_t hidden_fh;
  158. char is_hidden;
  159. char cache_valid;
  160. int treelock;
  161. char inline_name[32];
  162. };
  163. #define TREELOCK_WRITE -1
  164. #define TREELOCK_WAIT_OFFSET INT_MIN
  165. struct node_lru {
  166. struct node node;
  167. struct list_head lru;
  168. struct timespec forget_time;
  169. };
  170. struct fuse_dh {
  171. pthread_mutex_t lock;
  172. struct fuse *fuse;
  173. fuse_req_t req;
  174. char *contents;
  175. int allocated;
  176. unsigned len;
  177. unsigned size;
  178. unsigned needlen;
  179. int filled;
  180. uint64_t fh;
  181. int error;
  182. fuse_ino_t nodeid;
  183. };
  184. /* old dir handle */
  185. struct fuse_dirhandle {
  186. fuse_fill_dir_t filler;
  187. void *buf;
  188. };
  189. struct fuse_context_i {
  190. struct fuse_context ctx;
  191. fuse_req_t req;
  192. };
  193. static pthread_key_t fuse_context_key;
  194. static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
  195. static int fuse_context_ref;
  196. static void init_list_head(struct list_head *list)
  197. {
  198. list->next = list;
  199. list->prev = list;
  200. }
  201. static int list_empty(const struct list_head *head)
  202. {
  203. return head->next == head;
  204. }
  205. static void list_add(struct list_head *new, struct list_head *prev,
  206. struct list_head *next)
  207. {
  208. next->prev = new;
  209. new->next = next;
  210. new->prev = prev;
  211. prev->next = new;
  212. }
  213. static inline void list_add_head(struct list_head *new, struct list_head *head)
  214. {
  215. list_add(new, head, head->next);
  216. }
  217. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  218. {
  219. list_add(new, head->prev, head);
  220. }
  221. static inline void list_del(struct list_head *entry)
  222. {
  223. struct list_head *prev = entry->prev;
  224. struct list_head *next = entry->next;
  225. next->prev = prev;
  226. prev->next = next;
  227. }
  228. static inline int lru_enabled(struct fuse *f)
  229. {
  230. return f->conf.remember > 0;
  231. }
  232. static struct node_lru *node_lru(struct node *node)
  233. {
  234. return (struct node_lru *) node;
  235. }
  236. static size_t get_node_size(struct fuse *f)
  237. {
  238. if (lru_enabled(f))
  239. return sizeof(struct node_lru);
  240. else
  241. return sizeof(struct node);
  242. }
  243. #ifdef FUSE_NODE_SLAB
  244. static struct node_slab *list_to_slab(struct list_head *head)
  245. {
  246. return (struct node_slab *) head;
  247. }
  248. static struct node_slab *node_to_slab(struct fuse *f, struct node *node)
  249. {
  250. return (struct node_slab *) (((uintptr_t) node) & ~((uintptr_t) f->pagesize - 1));
  251. }
  252. static int alloc_slab(struct fuse *f)
  253. {
  254. void *mem;
  255. struct node_slab *slab;
  256. char *start;
  257. size_t num;
  258. size_t i;
  259. size_t node_size = get_node_size(f);
  260. mem = mmap(NULL, f->pagesize, PROT_READ | PROT_WRITE,
  261. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  262. if (mem == MAP_FAILED)
  263. return -1;
  264. slab = mem;
  265. init_list_head(&slab->freelist);
  266. slab->used = 0;
  267. num = (f->pagesize - sizeof(struct node_slab)) / node_size;
  268. start = (char *) mem + f->pagesize - num * node_size;
  269. for (i = 0; i < num; i++) {
  270. struct list_head *n;
  271. n = (struct list_head *) (start + i * node_size);
  272. list_add_tail(n, &slab->freelist);
  273. }
  274. list_add_tail(&slab->list, &f->partial_slabs);
  275. return 0;
  276. }
  277. static struct node *alloc_node(struct fuse *f)
  278. {
  279. struct node_slab *slab;
  280. struct list_head *node;
  281. if (list_empty(&f->partial_slabs)) {
  282. int res = alloc_slab(f);
  283. if (res != 0)
  284. return NULL;
  285. }
  286. slab = list_to_slab(f->partial_slabs.next);
  287. slab->used++;
  288. node = slab->freelist.next;
  289. list_del(node);
  290. if (list_empty(&slab->freelist)) {
  291. list_del(&slab->list);
  292. list_add_tail(&slab->list, &f->full_slabs);
  293. }
  294. memset(node, 0, sizeof(struct node));
  295. return (struct node *) node;
  296. }
  297. static void free_slab(struct fuse *f, struct node_slab *slab)
  298. {
  299. int res;
  300. list_del(&slab->list);
  301. res = munmap(slab, f->pagesize);
  302. if (res == -1)
  303. fprintf(stderr, "fuse warning: munmap(%p) failed\n", slab);
  304. }
  305. static void free_node_mem(struct fuse *f, struct node *node)
  306. {
  307. struct node_slab *slab = node_to_slab(f, node);
  308. struct list_head *n = (struct list_head *) node;
  309. slab->used--;
  310. if (slab->used) {
  311. if (list_empty(&slab->freelist)) {
  312. list_del(&slab->list);
  313. list_add_tail(&slab->list, &f->partial_slabs);
  314. }
  315. list_add_head(n, &slab->freelist);
  316. } else {
  317. free_slab(f, slab);
  318. }
  319. }
  320. #else
  321. static struct node *alloc_node(struct fuse *f)
  322. {
  323. return (struct node *) calloc(1, get_node_size(f));
  324. }
  325. static void free_node_mem(struct fuse *f, struct node *node)
  326. {
  327. (void) f;
  328. free(node);
  329. }
  330. #endif
  331. static size_t id_hash(struct fuse *f, fuse_ino_t ino)
  332. {
  333. uint64_t hash = ((uint32_t) ino * 2654435761U) % f->id_table.size;
  334. uint64_t oldhash = hash % (f->id_table.size / 2);
  335. if (oldhash >= f->id_table.split)
  336. return oldhash;
  337. else
  338. return hash;
  339. }
  340. static struct node *get_node_nocheck(struct fuse *f, fuse_ino_t nodeid)
  341. {
  342. size_t hash = id_hash(f, nodeid);
  343. struct node *node;
  344. for (node = f->id_table.array[hash]; node != NULL; node = node->id_next)
  345. if (node->nodeid == nodeid)
  346. return node;
  347. return NULL;
  348. }
  349. static struct node *get_node(struct fuse *f, fuse_ino_t nodeid)
  350. {
  351. struct node *node = get_node_nocheck(f, nodeid);
  352. if (!node) {
  353. fprintf(stderr, "fuse internal error: node %llu not found\n",
  354. (unsigned long long) nodeid);
  355. abort();
  356. }
  357. return node;
  358. }
  359. static void curr_time(struct timespec *now);
  360. static double diff_timespec(const struct timespec *t1,
  361. const struct timespec *t2);
  362. static void remove_node_lru(struct node *node)
  363. {
  364. struct node_lru *lnode = node_lru(node);
  365. list_del(&lnode->lru);
  366. init_list_head(&lnode->lru);
  367. }
  368. static void set_forget_time(struct fuse *f, struct node *node)
  369. {
  370. struct node_lru *lnode = node_lru(node);
  371. list_del(&lnode->lru);
  372. list_add_tail(&lnode->lru, &f->lru_table);
  373. curr_time(&lnode->forget_time);
  374. }
  375. static void free_node(struct fuse *f, struct node *node)
  376. {
  377. if (node->name != node->inline_name)
  378. free(node->name);
  379. free_node_mem(f, node);
  380. }
  381. static void node_table_reduce(struct node_table *t)
  382. {
  383. size_t newsize = t->size / 2;
  384. void *newarray;
  385. if (newsize < NODE_TABLE_MIN_SIZE)
  386. return;
  387. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  388. if (newarray != NULL)
  389. t->array = newarray;
  390. t->size = newsize;
  391. t->split = t->size / 2;
  392. }
  393. static void remerge_id(struct fuse *f)
  394. {
  395. struct node_table *t = &f->id_table;
  396. int iter;
  397. if (t->split == 0)
  398. node_table_reduce(t);
  399. for (iter = 8; t->split > 0 && iter; iter--) {
  400. struct node **upper;
  401. t->split--;
  402. upper = &t->array[t->split + t->size / 2];
  403. if (*upper) {
  404. struct node **nodep;
  405. for (nodep = &t->array[t->split]; *nodep;
  406. nodep = &(*nodep)->id_next);
  407. *nodep = *upper;
  408. *upper = NULL;
  409. break;
  410. }
  411. }
  412. }
  413. static void unhash_id(struct fuse *f, struct node *node)
  414. {
  415. struct node **nodep = &f->id_table.array[id_hash(f, node->nodeid)];
  416. for (; *nodep != NULL; nodep = &(*nodep)->id_next)
  417. if (*nodep == node) {
  418. *nodep = node->id_next;
  419. f->id_table.use--;
  420. if(f->id_table.use < f->id_table.size / 4)
  421. remerge_id(f);
  422. return;
  423. }
  424. }
  425. static int node_table_resize(struct node_table *t)
  426. {
  427. size_t newsize = t->size * 2;
  428. void *newarray;
  429. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  430. if (newarray == NULL)
  431. return -1;
  432. t->array = newarray;
  433. memset(t->array + t->size, 0, t->size * sizeof(struct node *));
  434. t->size = newsize;
  435. t->split = 0;
  436. return 0;
  437. }
  438. static void rehash_id(struct fuse *f)
  439. {
  440. struct node_table *t = &f->id_table;
  441. struct node **nodep;
  442. struct node **next;
  443. size_t hash;
  444. if (t->split == t->size / 2)
  445. return;
  446. hash = t->split;
  447. t->split++;
  448. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  449. struct node *node = *nodep;
  450. size_t newhash = id_hash(f, node->nodeid);
  451. if (newhash != hash) {
  452. next = nodep;
  453. *nodep = node->id_next;
  454. node->id_next = t->array[newhash];
  455. t->array[newhash] = node;
  456. } else {
  457. next = &node->id_next;
  458. }
  459. }
  460. if (t->split == t->size / 2)
  461. node_table_resize(t);
  462. }
  463. static void hash_id(struct fuse *f, struct node *node)
  464. {
  465. size_t hash = id_hash(f, node->nodeid);
  466. node->id_next = f->id_table.array[hash];
  467. f->id_table.array[hash] = node;
  468. f->id_table.use++;
  469. if (f->id_table.use >= f->id_table.size / 2)
  470. rehash_id(f);
  471. }
  472. static size_t name_hash(struct fuse *f, fuse_ino_t parent,
  473. const char *name)
  474. {
  475. uint64_t hash = parent;
  476. uint64_t oldhash;
  477. for (; *name; name++)
  478. hash = hash * 31 + (unsigned char) *name;
  479. hash %= f->name_table.size;
  480. oldhash = hash % (f->name_table.size / 2);
  481. if (oldhash >= f->name_table.split)
  482. return oldhash;
  483. else
  484. return hash;
  485. }
  486. static void unref_node(struct fuse *f, struct node *node);
  487. static void remerge_name(struct fuse *f)
  488. {
  489. struct node_table *t = &f->name_table;
  490. int iter;
  491. if (t->split == 0)
  492. node_table_reduce(t);
  493. for (iter = 8; t->split > 0 && iter; iter--) {
  494. struct node **upper;
  495. t->split--;
  496. upper = &t->array[t->split + t->size / 2];
  497. if (*upper) {
  498. struct node **nodep;
  499. for (nodep = &t->array[t->split]; *nodep;
  500. nodep = &(*nodep)->name_next);
  501. *nodep = *upper;
  502. *upper = NULL;
  503. break;
  504. }
  505. }
  506. }
  507. static void unhash_name(struct fuse *f, struct node *node)
  508. {
  509. if (node->name) {
  510. size_t hash = name_hash(f, node->parent->nodeid, node->name);
  511. struct node **nodep = &f->name_table.array[hash];
  512. for (; *nodep != NULL; nodep = &(*nodep)->name_next)
  513. if (*nodep == node) {
  514. *nodep = node->name_next;
  515. node->name_next = NULL;
  516. unref_node(f, node->parent);
  517. if (node->name != node->inline_name)
  518. free(node->name);
  519. node->name = NULL;
  520. node->parent = NULL;
  521. f->name_table.use--;
  522. if (f->name_table.use < f->name_table.size / 4)
  523. remerge_name(f);
  524. return;
  525. }
  526. fprintf(stderr,
  527. "fuse internal error: unable to unhash node: %llu\n",
  528. (unsigned long long) node->nodeid);
  529. abort();
  530. }
  531. }
  532. static void rehash_name(struct fuse *f)
  533. {
  534. struct node_table *t = &f->name_table;
  535. struct node **nodep;
  536. struct node **next;
  537. size_t hash;
  538. if (t->split == t->size / 2)
  539. return;
  540. hash = t->split;
  541. t->split++;
  542. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  543. struct node *node = *nodep;
  544. size_t newhash = name_hash(f, node->parent->nodeid, node->name);
  545. if (newhash != hash) {
  546. next = nodep;
  547. *nodep = node->name_next;
  548. node->name_next = t->array[newhash];
  549. t->array[newhash] = node;
  550. } else {
  551. next = &node->name_next;
  552. }
  553. }
  554. if (t->split == t->size / 2)
  555. node_table_resize(t);
  556. }
  557. static int hash_name(struct fuse *f, struct node *node, fuse_ino_t parentid,
  558. const char *name)
  559. {
  560. size_t hash = name_hash(f, parentid, name);
  561. struct node *parent = get_node(f, parentid);
  562. if (strlen(name) < sizeof(node->inline_name)) {
  563. strcpy(node->inline_name, name);
  564. node->name = node->inline_name;
  565. } else {
  566. node->name = strdup(name);
  567. if (node->name == NULL)
  568. return -1;
  569. }
  570. parent->refctr ++;
  571. node->parent = parent;
  572. node->name_next = f->name_table.array[hash];
  573. f->name_table.array[hash] = node;
  574. f->name_table.use++;
  575. if (f->name_table.use >= f->name_table.size / 2)
  576. rehash_name(f);
  577. return 0;
  578. }
  579. static void delete_node(struct fuse *f, struct node *node)
  580. {
  581. if (f->conf.debug)
  582. fprintf(stderr, "DELETE: %llu\n",
  583. (unsigned long long) node->nodeid);
  584. assert(node->treelock == 0);
  585. unhash_name(f, node);
  586. if (lru_enabled(f))
  587. remove_node_lru(node);
  588. unhash_id(f, node);
  589. free_node(f, node);
  590. }
  591. static void unref_node(struct fuse *f, struct node *node)
  592. {
  593. assert(node->refctr > 0);
  594. node->refctr --;
  595. if (!node->refctr)
  596. delete_node(f, node);
  597. }
  598. static fuse_ino_t next_id(struct fuse *f)
  599. {
  600. do {
  601. f->ctr = (f->ctr + 1) & 0xffffffff;
  602. if (!f->ctr)
  603. f->generation ++;
  604. } while (f->ctr == 0 || f->ctr == FUSE_UNKNOWN_INO ||
  605. get_node_nocheck(f, f->ctr) != NULL);
  606. return f->ctr;
  607. }
  608. static struct node *lookup_node(struct fuse *f, fuse_ino_t parent,
  609. const char *name)
  610. {
  611. size_t hash = name_hash(f, parent, name);
  612. struct node *node;
  613. for (node = f->name_table.array[hash]; node != NULL; node = node->name_next)
  614. if (node->parent->nodeid == parent &&
  615. strcmp(node->name, name) == 0)
  616. return node;
  617. return NULL;
  618. }
  619. static void inc_nlookup(struct node *node)
  620. {
  621. if (!node->nlookup)
  622. node->refctr++;
  623. node->nlookup++;
  624. }
  625. static struct node *find_node(struct fuse *f, fuse_ino_t parent,
  626. const char *name)
  627. {
  628. struct node *node;
  629. pthread_mutex_lock(&f->lock);
  630. if (!name)
  631. node = get_node(f, parent);
  632. else
  633. node = lookup_node(f, parent, name);
  634. if (node == NULL) {
  635. node = alloc_node(f);
  636. if (node == NULL)
  637. goto out_err;
  638. node->nodeid = next_id(f);
  639. node->generation = f->generation;
  640. if (f->conf.remember)
  641. inc_nlookup(node);
  642. if (hash_name(f, node, parent, name) == -1) {
  643. free_node(f, node);
  644. node = NULL;
  645. goto out_err;
  646. }
  647. hash_id(f, node);
  648. if (lru_enabled(f)) {
  649. struct node_lru *lnode = node_lru(node);
  650. init_list_head(&lnode->lru);
  651. }
  652. } else if (lru_enabled(f) && node->nlookup == 1) {
  653. remove_node_lru(node);
  654. }
  655. inc_nlookup(node);
  656. out_err:
  657. pthread_mutex_unlock(&f->lock);
  658. return node;
  659. }
  660. static char *add_name(char **buf, unsigned *bufsize, char *s, const char *name)
  661. {
  662. size_t len = strlen(name);
  663. if (s - len <= *buf) {
  664. unsigned pathlen = *bufsize - (s - *buf);
  665. unsigned newbufsize = *bufsize;
  666. char *newbuf;
  667. while (newbufsize < pathlen + len + 1) {
  668. if (newbufsize >= 0x80000000)
  669. newbufsize = 0xffffffff;
  670. else
  671. newbufsize *= 2;
  672. }
  673. newbuf = realloc(*buf, newbufsize);
  674. if (newbuf == NULL)
  675. return NULL;
  676. *buf = newbuf;
  677. s = newbuf + newbufsize - pathlen;
  678. memmove(s, newbuf + *bufsize - pathlen, pathlen);
  679. *bufsize = newbufsize;
  680. }
  681. s -= len;
  682. strncpy(s, name, len);
  683. s--;
  684. *s = '/';
  685. return s;
  686. }
  687. static void unlock_path(struct fuse *f, fuse_ino_t nodeid, struct node *wnode,
  688. struct node *end)
  689. {
  690. struct node *node;
  691. if (wnode) {
  692. assert(wnode->treelock == TREELOCK_WRITE);
  693. wnode->treelock = 0;
  694. }
  695. for (node = get_node(f, nodeid);
  696. node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent) {
  697. assert(node->treelock != 0);
  698. assert(node->treelock != TREELOCK_WAIT_OFFSET);
  699. assert(node->treelock != TREELOCK_WRITE);
  700. node->treelock--;
  701. if (node->treelock == TREELOCK_WAIT_OFFSET)
  702. node->treelock = 0;
  703. }
  704. }
  705. static int try_get_path(struct fuse *f, fuse_ino_t nodeid, const char *name,
  706. char **path, struct node **wnodep, bool need_lock)
  707. {
  708. unsigned bufsize = 256;
  709. char *buf;
  710. char *s;
  711. struct node *node;
  712. struct node *wnode = NULL;
  713. int err;
  714. *path = NULL;
  715. err = -ENOMEM;
  716. buf = malloc(bufsize);
  717. if (buf == NULL)
  718. goto out_err;
  719. s = buf + bufsize - 1;
  720. *s = '\0';
  721. if (name != NULL) {
  722. s = add_name(&buf, &bufsize, s, name);
  723. err = -ENOMEM;
  724. if (s == NULL)
  725. goto out_free;
  726. }
  727. if (wnodep) {
  728. assert(need_lock);
  729. wnode = lookup_node(f, nodeid, name);
  730. if (wnode) {
  731. if (wnode->treelock != 0) {
  732. if (wnode->treelock > 0)
  733. wnode->treelock += TREELOCK_WAIT_OFFSET;
  734. err = -EAGAIN;
  735. goto out_free;
  736. }
  737. wnode->treelock = TREELOCK_WRITE;
  738. }
  739. }
  740. for (node = get_node(f, nodeid); node->nodeid != FUSE_ROOT_ID;
  741. node = node->parent) {
  742. err = -ENOENT;
  743. if (node->name == NULL || node->parent == NULL)
  744. goto out_unlock;
  745. err = -ENOMEM;
  746. s = add_name(&buf, &bufsize, s, node->name);
  747. if (s == NULL)
  748. goto out_unlock;
  749. if (need_lock) {
  750. err = -EAGAIN;
  751. if (node->treelock < 0)
  752. goto out_unlock;
  753. node->treelock++;
  754. }
  755. }
  756. if (s[0])
  757. memmove(buf, s, bufsize - (s - buf));
  758. else
  759. strcpy(buf, "/");
  760. *path = buf;
  761. if (wnodep)
  762. *wnodep = wnode;
  763. return 0;
  764. out_unlock:
  765. if (need_lock)
  766. unlock_path(f, nodeid, wnode, node);
  767. out_free:
  768. free(buf);
  769. out_err:
  770. return err;
  771. }
  772. static void queue_element_unlock(struct fuse *f, struct lock_queue_element *qe)
  773. {
  774. struct node *wnode;
  775. if (qe->first_locked) {
  776. wnode = qe->wnode1 ? *qe->wnode1 : NULL;
  777. unlock_path(f, qe->nodeid1, wnode, NULL);
  778. qe->first_locked = false;
  779. }
  780. if (qe->second_locked) {
  781. wnode = qe->wnode2 ? *qe->wnode2 : NULL;
  782. unlock_path(f, qe->nodeid2, wnode, NULL);
  783. qe->second_locked = false;
  784. }
  785. }
  786. static void queue_element_wakeup(struct fuse *f, struct lock_queue_element *qe)
  787. {
  788. int err;
  789. bool first = (qe == f->lockq);
  790. if (!qe->path1) {
  791. /* Just waiting for it to be unlocked */
  792. if (get_node(f, qe->nodeid1)->treelock == 0)
  793. pthread_cond_signal(&qe->cond);
  794. return;
  795. }
  796. if (!qe->first_locked) {
  797. err = try_get_path(f, qe->nodeid1, qe->name1, qe->path1,
  798. qe->wnode1, true);
  799. if (!err)
  800. qe->first_locked = true;
  801. else if (err != -EAGAIN)
  802. goto err_unlock;
  803. }
  804. if (!qe->second_locked && qe->path2) {
  805. err = try_get_path(f, qe->nodeid2, qe->name2, qe->path2,
  806. qe->wnode2, true);
  807. if (!err)
  808. qe->second_locked = true;
  809. else if (err != -EAGAIN)
  810. goto err_unlock;
  811. }
  812. if (qe->first_locked && (qe->second_locked || !qe->path2)) {
  813. err = 0;
  814. goto done;
  815. }
  816. /*
  817. * Only let the first element be partially locked otherwise there could
  818. * be a deadlock.
  819. *
  820. * But do allow the first element to be partially locked to prevent
  821. * starvation.
  822. */
  823. if (!first)
  824. queue_element_unlock(f, qe);
  825. /* keep trying */
  826. return;
  827. err_unlock:
  828. queue_element_unlock(f, qe);
  829. done:
  830. qe->err = err;
  831. qe->done = true;
  832. pthread_cond_signal(&qe->cond);
  833. }
  834. static void wake_up_queued(struct fuse *f)
  835. {
  836. struct lock_queue_element *qe;
  837. for (qe = f->lockq; qe != NULL; qe = qe->next)
  838. queue_element_wakeup(f, qe);
  839. }
  840. static void debug_path(struct fuse *f, const char *msg, fuse_ino_t nodeid,
  841. const char *name, bool wr)
  842. {
  843. if (f->conf.debug) {
  844. struct node *wnode = NULL;
  845. if (wr)
  846. wnode = lookup_node(f, nodeid, name);
  847. if (wnode)
  848. fprintf(stderr, "%s %li (w)\n", msg, wnode->nodeid);
  849. else
  850. fprintf(stderr, "%s %li\n", msg, nodeid);
  851. }
  852. }
  853. static void queue_path(struct fuse *f, struct lock_queue_element *qe)
  854. {
  855. struct lock_queue_element **qp;
  856. qe->done = false;
  857. qe->first_locked = false;
  858. qe->second_locked = false;
  859. pthread_cond_init(&qe->cond, NULL);
  860. qe->next = NULL;
  861. for (qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
  862. *qp = qe;
  863. }
  864. static void dequeue_path(struct fuse *f, struct lock_queue_element *qe)
  865. {
  866. struct lock_queue_element **qp;
  867. pthread_cond_destroy(&qe->cond);
  868. for (qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
  869. *qp = qe->next;
  870. }
  871. static int wait_path(struct fuse *f, struct lock_queue_element *qe)
  872. {
  873. queue_path(f, qe);
  874. do {
  875. pthread_cond_wait(&qe->cond, &f->lock);
  876. } while (!qe->done);
  877. dequeue_path(f, qe);
  878. return qe->err;
  879. }
  880. static int get_path_common(struct fuse *f, fuse_ino_t nodeid, const char *name,
  881. char **path, struct node **wnode)
  882. {
  883. int err;
  884. pthread_mutex_lock(&f->lock);
  885. err = try_get_path(f, nodeid, name, path, wnode, true);
  886. if (err == -EAGAIN) {
  887. struct lock_queue_element qe = {
  888. .nodeid1 = nodeid,
  889. .name1 = name,
  890. .path1 = path,
  891. .wnode1 = wnode,
  892. };
  893. debug_path(f, "QUEUE PATH", nodeid, name, !!wnode);
  894. err = wait_path(f, &qe);
  895. debug_path(f, "DEQUEUE PATH", nodeid, name, !!wnode);
  896. }
  897. pthread_mutex_unlock(&f->lock);
  898. return err;
  899. }
  900. static int get_path(struct fuse *f, fuse_ino_t nodeid, char **path)
  901. {
  902. return get_path_common(f, nodeid, NULL, path, NULL);
  903. }
  904. static int get_path_nullok(struct fuse *f, fuse_ino_t nodeid, char **path)
  905. {
  906. int err = 0;
  907. if (f->conf.nopath) {
  908. *path = NULL;
  909. } else {
  910. err = get_path_common(f, nodeid, NULL, path, NULL);
  911. if (err == -ENOENT && f->nullpath_ok)
  912. err = 0;
  913. }
  914. return err;
  915. }
  916. static int get_path_name(struct fuse *f, fuse_ino_t nodeid, const char *name,
  917. char **path)
  918. {
  919. return get_path_common(f, nodeid, name, path, NULL);
  920. }
  921. static int get_path_wrlock(struct fuse *f, fuse_ino_t nodeid, const char *name,
  922. char **path, struct node **wnode)
  923. {
  924. return get_path_common(f, nodeid, name, path, wnode);
  925. }
  926. static int try_get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  927. fuse_ino_t nodeid2, const char *name2,
  928. char **path1, char **path2,
  929. struct node **wnode1, struct node **wnode2)
  930. {
  931. int err;
  932. /* FIXME: locking two paths needs deadlock checking */
  933. err = try_get_path(f, nodeid1, name1, path1, wnode1, true);
  934. if (!err) {
  935. err = try_get_path(f, nodeid2, name2, path2, wnode2, true);
  936. if (err) {
  937. struct node *wn1 = wnode1 ? *wnode1 : NULL;
  938. unlock_path(f, nodeid1, wn1, NULL);
  939. free(*path1);
  940. }
  941. }
  942. return err;
  943. }
  944. static int get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  945. fuse_ino_t nodeid2, const char *name2,
  946. char **path1, char **path2,
  947. struct node **wnode1, struct node **wnode2)
  948. {
  949. int err;
  950. pthread_mutex_lock(&f->lock);
  951. err = try_get_path2(f, nodeid1, name1, nodeid2, name2,
  952. path1, path2, wnode1, wnode2);
  953. if (err == -EAGAIN) {
  954. struct lock_queue_element qe = {
  955. .nodeid1 = nodeid1,
  956. .name1 = name1,
  957. .path1 = path1,
  958. .wnode1 = wnode1,
  959. .nodeid2 = nodeid2,
  960. .name2 = name2,
  961. .path2 = path2,
  962. .wnode2 = wnode2,
  963. };
  964. debug_path(f, "QUEUE PATH1", nodeid1, name1, !!wnode1);
  965. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  966. err = wait_path(f, &qe);
  967. debug_path(f, "DEQUEUE PATH1", nodeid1, name1, !!wnode1);
  968. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  969. }
  970. pthread_mutex_unlock(&f->lock);
  971. return err;
  972. }
  973. static void free_path_wrlock(struct fuse *f, fuse_ino_t nodeid,
  974. struct node *wnode, char *path)
  975. {
  976. pthread_mutex_lock(&f->lock);
  977. unlock_path(f, nodeid, wnode, NULL);
  978. if (f->lockq)
  979. wake_up_queued(f);
  980. pthread_mutex_unlock(&f->lock);
  981. free(path);
  982. }
  983. static void free_path(struct fuse *f, fuse_ino_t nodeid, char *path)
  984. {
  985. if (path)
  986. free_path_wrlock(f, nodeid, NULL, path);
  987. }
  988. static void free_path2(struct fuse *f, fuse_ino_t nodeid1, fuse_ino_t nodeid2,
  989. struct node *wnode1, struct node *wnode2,
  990. char *path1, char *path2)
  991. {
  992. pthread_mutex_lock(&f->lock);
  993. unlock_path(f, nodeid1, wnode1, NULL);
  994. unlock_path(f, nodeid2, wnode2, NULL);
  995. wake_up_queued(f);
  996. pthread_mutex_unlock(&f->lock);
  997. free(path1);
  998. free(path2);
  999. }
  1000. static void forget_node(struct fuse *f, fuse_ino_t nodeid, uint64_t nlookup)
  1001. {
  1002. struct node *node;
  1003. if (nodeid == FUSE_ROOT_ID)
  1004. return;
  1005. pthread_mutex_lock(&f->lock);
  1006. node = get_node(f, nodeid);
  1007. /*
  1008. * Node may still be locked due to interrupt idiocy in open,
  1009. * create and opendir
  1010. */
  1011. while (node->nlookup == nlookup && node->treelock) {
  1012. struct lock_queue_element qe = {
  1013. .nodeid1 = nodeid,
  1014. };
  1015. debug_path(f, "QUEUE PATH (forget)", nodeid, NULL, false);
  1016. queue_path(f, &qe);
  1017. do {
  1018. pthread_cond_wait(&qe.cond, &f->lock);
  1019. } while (node->nlookup == nlookup && node->treelock);
  1020. dequeue_path(f, &qe);
  1021. debug_path(f, "DEQUEUE_PATH (forget)", nodeid, NULL, false);
  1022. }
  1023. assert(node->nlookup >= nlookup);
  1024. node->nlookup -= nlookup;
  1025. if (!node->nlookup) {
  1026. unref_node(f, node);
  1027. } else if (lru_enabled(f) && node->nlookup == 1) {
  1028. set_forget_time(f, node);
  1029. }
  1030. pthread_mutex_unlock(&f->lock);
  1031. }
  1032. static void unlink_node(struct fuse *f, struct node *node)
  1033. {
  1034. if (f->conf.remember) {
  1035. assert(node->nlookup > 1);
  1036. node->nlookup--;
  1037. }
  1038. unhash_name(f, node);
  1039. }
  1040. static void remove_node(struct fuse *f, fuse_ino_t dir, const char *name)
  1041. {
  1042. struct node *node;
  1043. pthread_mutex_lock(&f->lock);
  1044. node = lookup_node(f, dir, name);
  1045. if (node != NULL)
  1046. unlink_node(f, node);
  1047. pthread_mutex_unlock(&f->lock);
  1048. }
  1049. static int rename_node(struct fuse *f, fuse_ino_t olddir, const char *oldname,
  1050. fuse_ino_t newdir, const char *newname)
  1051. {
  1052. struct node *node;
  1053. struct node *newnode;
  1054. int err = 0;
  1055. pthread_mutex_lock(&f->lock);
  1056. node = lookup_node(f, olddir, oldname);
  1057. newnode = lookup_node(f, newdir, newname);
  1058. if (node == NULL)
  1059. goto out;
  1060. if (newnode != NULL)
  1061. unlink_node(f, newnode);
  1062. unhash_name(f, node);
  1063. if (hash_name(f, node, newdir, newname) == -1) {
  1064. err = -ENOMEM;
  1065. goto out;
  1066. }
  1067. out:
  1068. pthread_mutex_unlock(&f->lock);
  1069. return err;
  1070. }
  1071. static void set_stat(struct fuse *f, fuse_ino_t nodeid, struct stat *stbuf)
  1072. {
  1073. if (!f->conf.use_ino)
  1074. stbuf->st_ino = nodeid;
  1075. if (f->conf.set_mode)
  1076. stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
  1077. (0777 & ~f->conf.umask);
  1078. if (f->conf.set_uid)
  1079. stbuf->st_uid = f->conf.uid;
  1080. if (f->conf.set_gid)
  1081. stbuf->st_gid = f->conf.gid;
  1082. }
  1083. static struct fuse *req_fuse(fuse_req_t req)
  1084. {
  1085. return (struct fuse *) fuse_req_userdata(req);
  1086. }
  1087. static void fuse_intr_sighandler(int sig)
  1088. {
  1089. (void) sig;
  1090. /* Nothing to do */
  1091. }
  1092. struct fuse_intr_data {
  1093. pthread_t id;
  1094. pthread_cond_t cond;
  1095. int finished;
  1096. };
  1097. static void fuse_interrupt(fuse_req_t req, void *d_)
  1098. {
  1099. struct fuse_intr_data *d = d_;
  1100. struct fuse *f = req_fuse(req);
  1101. if (d->id == pthread_self())
  1102. return;
  1103. pthread_mutex_lock(&f->lock);
  1104. while (!d->finished) {
  1105. struct timeval now;
  1106. struct timespec timeout;
  1107. pthread_kill(d->id, f->conf.intr_signal);
  1108. gettimeofday(&now, NULL);
  1109. timeout.tv_sec = now.tv_sec + 1;
  1110. timeout.tv_nsec = now.tv_usec * 1000;
  1111. pthread_cond_timedwait(&d->cond, &f->lock, &timeout);
  1112. }
  1113. pthread_mutex_unlock(&f->lock);
  1114. }
  1115. static void fuse_do_finish_interrupt(struct fuse *f, fuse_req_t req,
  1116. struct fuse_intr_data *d)
  1117. {
  1118. pthread_mutex_lock(&f->lock);
  1119. d->finished = 1;
  1120. pthread_cond_broadcast(&d->cond);
  1121. pthread_mutex_unlock(&f->lock);
  1122. fuse_req_interrupt_func(req, NULL, NULL);
  1123. pthread_cond_destroy(&d->cond);
  1124. }
  1125. static void fuse_do_prepare_interrupt(fuse_req_t req, struct fuse_intr_data *d)
  1126. {
  1127. d->id = pthread_self();
  1128. pthread_cond_init(&d->cond, NULL);
  1129. d->finished = 0;
  1130. fuse_req_interrupt_func(req, fuse_interrupt, d);
  1131. }
  1132. static inline void fuse_finish_interrupt(struct fuse *f, fuse_req_t req,
  1133. struct fuse_intr_data *d)
  1134. {
  1135. if (f->conf.intr)
  1136. fuse_do_finish_interrupt(f, req, d);
  1137. }
  1138. static inline void fuse_prepare_interrupt(struct fuse *f, fuse_req_t req,
  1139. struct fuse_intr_data *d)
  1140. {
  1141. if (f->conf.intr)
  1142. fuse_do_prepare_interrupt(req, d);
  1143. }
  1144. #if !defined(__FreeBSD__) && !defined(__NetBSD__)
  1145. static int fuse_compat_open(struct fuse_fs *fs, const char *path,
  1146. struct fuse_file_info *fi)
  1147. {
  1148. int err;
  1149. if (!fs->compat || fs->compat >= 25)
  1150. err = fs->op.open(path, fi);
  1151. else if (fs->compat == 22) {
  1152. struct fuse_file_info_compat tmp;
  1153. memcpy(&tmp, fi, sizeof(tmp));
  1154. err = ((struct fuse_operations_compat22 *) &fs->op)->open(path,
  1155. &tmp);
  1156. memcpy(fi, &tmp, sizeof(tmp));
  1157. fi->fh = tmp.fh;
  1158. } else
  1159. err = ((struct fuse_operations_compat2 *) &fs->op)
  1160. ->open(path, fi->flags);
  1161. return err;
  1162. }
  1163. static int fuse_compat_release(struct fuse_fs *fs, const char *path,
  1164. struct fuse_file_info *fi)
  1165. {
  1166. if (!fs->compat || fs->compat >= 22)
  1167. return fs->op.release(path, fi);
  1168. else
  1169. return ((struct fuse_operations_compat2 *) &fs->op)
  1170. ->release(path, fi->flags);
  1171. }
  1172. static int fuse_compat_opendir(struct fuse_fs *fs, const char *path,
  1173. struct fuse_file_info *fi)
  1174. {
  1175. if (!fs->compat || fs->compat >= 25)
  1176. return fs->op.opendir(path, fi);
  1177. else {
  1178. int err;
  1179. struct fuse_file_info_compat tmp;
  1180. memcpy(&tmp, fi, sizeof(tmp));
  1181. err = ((struct fuse_operations_compat22 *) &fs->op)
  1182. ->opendir(path, &tmp);
  1183. memcpy(fi, &tmp, sizeof(tmp));
  1184. fi->fh = tmp.fh;
  1185. return err;
  1186. }
  1187. }
  1188. static void convert_statfs_compat(struct fuse_statfs_compat1 *compatbuf,
  1189. struct statvfs *stbuf)
  1190. {
  1191. stbuf->f_bsize = compatbuf->block_size;
  1192. stbuf->f_blocks = compatbuf->blocks;
  1193. stbuf->f_bfree = compatbuf->blocks_free;
  1194. stbuf->f_bavail = compatbuf->blocks_free;
  1195. stbuf->f_files = compatbuf->files;
  1196. stbuf->f_ffree = compatbuf->files_free;
  1197. stbuf->f_namemax = compatbuf->namelen;
  1198. }
  1199. static void convert_statfs_old(struct statfs *oldbuf, struct statvfs *stbuf)
  1200. {
  1201. stbuf->f_bsize = oldbuf->f_bsize;
  1202. stbuf->f_blocks = oldbuf->f_blocks;
  1203. stbuf->f_bfree = oldbuf->f_bfree;
  1204. stbuf->f_bavail = oldbuf->f_bavail;
  1205. stbuf->f_files = oldbuf->f_files;
  1206. stbuf->f_ffree = oldbuf->f_ffree;
  1207. stbuf->f_namemax = oldbuf->f_namelen;
  1208. }
  1209. static int fuse_compat_statfs(struct fuse_fs *fs, const char *path,
  1210. struct statvfs *buf)
  1211. {
  1212. int err;
  1213. if (!fs->compat || fs->compat >= 25) {
  1214. err = fs->op.statfs(fs->compat == 25 ? "/" : path, buf);
  1215. } else if (fs->compat > 11) {
  1216. struct statfs oldbuf;
  1217. err = ((struct fuse_operations_compat22 *) &fs->op)
  1218. ->statfs("/", &oldbuf);
  1219. if (!err)
  1220. convert_statfs_old(&oldbuf, buf);
  1221. } else {
  1222. struct fuse_statfs_compat1 compatbuf;
  1223. memset(&compatbuf, 0, sizeof(struct fuse_statfs_compat1));
  1224. err = ((struct fuse_operations_compat1 *) &fs->op)
  1225. ->statfs(&compatbuf);
  1226. if (!err)
  1227. convert_statfs_compat(&compatbuf, buf);
  1228. }
  1229. return err;
  1230. }
  1231. #else /* __FreeBSD__ || __NetBSD__ */
  1232. static inline int fuse_compat_open(struct fuse_fs *fs, char *path,
  1233. struct fuse_file_info *fi)
  1234. {
  1235. return fs->op.open(path, fi);
  1236. }
  1237. static inline int fuse_compat_release(struct fuse_fs *fs, const char *path,
  1238. struct fuse_file_info *fi)
  1239. {
  1240. return fs->op.release(path, fi);
  1241. }
  1242. static inline int fuse_compat_opendir(struct fuse_fs *fs, const char *path,
  1243. struct fuse_file_info *fi)
  1244. {
  1245. return fs->op.opendir(path, fi);
  1246. }
  1247. static inline int fuse_compat_statfs(struct fuse_fs *fs, const char *path,
  1248. struct statvfs *buf)
  1249. {
  1250. return fs->op.statfs(fs->compat == 25 ? "/" : path, buf);
  1251. }
  1252. #endif /* __FreeBSD__ || __NetBSD__ */
  1253. int fuse_fs_getattr(struct fuse_fs *fs, const char *path, struct stat *buf)
  1254. {
  1255. fuse_get_context()->private_data = fs->user_data;
  1256. if (fs->op.getattr) {
  1257. if (fs->debug)
  1258. fprintf(stderr, "getattr %s\n", path);
  1259. return fs->op.getattr(path, buf);
  1260. } else {
  1261. return -ENOSYS;
  1262. }
  1263. }
  1264. int fuse_fs_fgetattr(struct fuse_fs *fs, const char *path, struct stat *buf,
  1265. struct fuse_file_info *fi)
  1266. {
  1267. fuse_get_context()->private_data = fs->user_data;
  1268. if (fs->op.fgetattr) {
  1269. if (fs->debug)
  1270. fprintf(stderr, "fgetattr[%llu] %s\n",
  1271. (unsigned long long) fi->fh, path);
  1272. return fs->op.fgetattr(path, buf, fi);
  1273. } else if (path && fs->op.getattr) {
  1274. if (fs->debug)
  1275. fprintf(stderr, "getattr %s\n", path);
  1276. return fs->op.getattr(path, buf);
  1277. } else {
  1278. return -ENOSYS;
  1279. }
  1280. }
  1281. int
  1282. fuse_fs_rename(struct fuse_fs *fs,
  1283. const char *oldpath,
  1284. const char *newpath)
  1285. {
  1286. fuse_get_context()->private_data = fs->user_data;
  1287. if(fs->op.rename)
  1288. return fs->op.rename(oldpath, newpath);
  1289. return -ENOSYS;
  1290. }
  1291. int
  1292. fuse_fs_prepare_hide(struct fuse_fs *fs_,
  1293. const char *path_,
  1294. uint64_t *fh_,
  1295. int type_)
  1296. {
  1297. fuse_get_context()->private_data = fs_->user_data;
  1298. if(fs_->op.prepare_hide)
  1299. return fs_->op.prepare_hide(path_,fh_,type_);
  1300. return -ENOSYS;
  1301. }
  1302. int
  1303. fuse_fs_free_hide(struct fuse_fs *fs_,
  1304. uint64_t fh_)
  1305. {
  1306. fuse_get_context()->private_data = fs_->user_data;
  1307. if(fs_->op.free_hide)
  1308. return fs_->op.free_hide(fh_);
  1309. return -ENOSYS;
  1310. }
  1311. int fuse_fs_unlink(struct fuse_fs *fs, const char *path)
  1312. {
  1313. fuse_get_context()->private_data = fs->user_data;
  1314. if (fs->op.unlink) {
  1315. if (fs->debug)
  1316. fprintf(stderr, "unlink %s\n", path);
  1317. return fs->op.unlink(path);
  1318. } else {
  1319. return -ENOSYS;
  1320. }
  1321. }
  1322. int fuse_fs_rmdir(struct fuse_fs *fs, const char *path)
  1323. {
  1324. fuse_get_context()->private_data = fs->user_data;
  1325. if (fs->op.rmdir) {
  1326. if (fs->debug)
  1327. fprintf(stderr, "rmdir %s\n", path);
  1328. return fs->op.rmdir(path);
  1329. } else {
  1330. return -ENOSYS;
  1331. }
  1332. }
  1333. int fuse_fs_symlink(struct fuse_fs *fs, const char *linkname, const char *path)
  1334. {
  1335. fuse_get_context()->private_data = fs->user_data;
  1336. if (fs->op.symlink) {
  1337. if (fs->debug)
  1338. fprintf(stderr, "symlink %s %s\n", linkname, path);
  1339. return fs->op.symlink(linkname, path);
  1340. } else {
  1341. return -ENOSYS;
  1342. }
  1343. }
  1344. int fuse_fs_link(struct fuse_fs *fs, const char *oldpath, const char *newpath)
  1345. {
  1346. fuse_get_context()->private_data = fs->user_data;
  1347. if (fs->op.link) {
  1348. if (fs->debug)
  1349. fprintf(stderr, "link %s %s\n", oldpath, newpath);
  1350. return fs->op.link(oldpath, newpath);
  1351. } else {
  1352. return -ENOSYS;
  1353. }
  1354. }
  1355. int fuse_fs_release(struct fuse_fs *fs, const char *path,
  1356. struct fuse_file_info *fi)
  1357. {
  1358. fuse_get_context()->private_data = fs->user_data;
  1359. if (fs->op.release) {
  1360. if (fs->debug)
  1361. fprintf(stderr, "release%s[%llu] flags: 0x%x\n",
  1362. fi->flush ? "+flush" : "",
  1363. (unsigned long long) fi->fh, fi->flags);
  1364. return fuse_compat_release(fs, path, fi);
  1365. } else {
  1366. return 0;
  1367. }
  1368. }
  1369. int fuse_fs_opendir(struct fuse_fs *fs, const char *path,
  1370. struct fuse_file_info *fi)
  1371. {
  1372. fuse_get_context()->private_data = fs->user_data;
  1373. if (fs->op.opendir) {
  1374. int err;
  1375. if (fs->debug)
  1376. fprintf(stderr, "opendir flags: 0x%x %s\n", fi->flags,
  1377. path);
  1378. err = fuse_compat_opendir(fs, path, fi);
  1379. if (fs->debug && !err)
  1380. fprintf(stderr, " opendir[%lli] flags: 0x%x %s\n",
  1381. (unsigned long long) fi->fh, fi->flags, path);
  1382. return err;
  1383. } else {
  1384. return 0;
  1385. }
  1386. }
  1387. int fuse_fs_open(struct fuse_fs *fs, const char *path,
  1388. struct fuse_file_info *fi)
  1389. {
  1390. fuse_get_context()->private_data = fs->user_data;
  1391. if (fs->op.open) {
  1392. int err;
  1393. if (fs->debug)
  1394. fprintf(stderr, "open flags: 0x%x %s\n", fi->flags,
  1395. path);
  1396. err = fuse_compat_open(fs, path, fi);
  1397. if (fs->debug && !err)
  1398. fprintf(stderr, " open[%lli] flags: 0x%x %s\n",
  1399. (unsigned long long) fi->fh, fi->flags, path);
  1400. return err;
  1401. } else {
  1402. return 0;
  1403. }
  1404. }
  1405. static void fuse_free_buf(struct fuse_bufvec *buf)
  1406. {
  1407. if (buf != NULL) {
  1408. size_t i;
  1409. for (i = 0; i < buf->count; i++)
  1410. free(buf->buf[i].mem);
  1411. free(buf);
  1412. }
  1413. }
  1414. int fuse_fs_read_buf(struct fuse_fs *fs, const char *path,
  1415. struct fuse_bufvec **bufp, size_t size, off_t off,
  1416. struct fuse_file_info *fi)
  1417. {
  1418. fuse_get_context()->private_data = fs->user_data;
  1419. if (fs->op.read || fs->op.read_buf) {
  1420. int res;
  1421. if (fs->debug)
  1422. fprintf(stderr,
  1423. "read[%llu] %zu bytes from %llu flags: 0x%x\n",
  1424. (unsigned long long) fi->fh,
  1425. size, (unsigned long long) off, fi->flags);
  1426. if (fs->op.read_buf) {
  1427. res = fs->op.read_buf(path, bufp, size, off, fi);
  1428. } else {
  1429. struct fuse_bufvec *buf;
  1430. void *mem;
  1431. buf = malloc(sizeof(struct fuse_bufvec));
  1432. if (buf == NULL)
  1433. return -ENOMEM;
  1434. mem = malloc(size);
  1435. if (mem == NULL) {
  1436. free(buf);
  1437. return -ENOMEM;
  1438. }
  1439. *buf = FUSE_BUFVEC_INIT(size);
  1440. buf->buf[0].mem = mem;
  1441. *bufp = buf;
  1442. res = fs->op.read(path, mem, size, off, fi);
  1443. if (res >= 0)
  1444. buf->buf[0].size = res;
  1445. }
  1446. if (fs->debug && res >= 0)
  1447. fprintf(stderr, " read[%llu] %zu bytes from %llu\n",
  1448. (unsigned long long) fi->fh,
  1449. fuse_buf_size(*bufp),
  1450. (unsigned long long) off);
  1451. if (res >= 0 && fuse_buf_size(*bufp) > (int) size)
  1452. fprintf(stderr, "fuse: read too many bytes\n");
  1453. if (res < 0)
  1454. return res;
  1455. return 0;
  1456. } else {
  1457. return -ENOSYS;
  1458. }
  1459. }
  1460. int fuse_fs_read(struct fuse_fs *fs, const char *path, char *mem, size_t size,
  1461. off_t off, struct fuse_file_info *fi)
  1462. {
  1463. int res;
  1464. struct fuse_bufvec *buf = NULL;
  1465. res = fuse_fs_read_buf(fs, path, &buf, size, off, fi);
  1466. if (res == 0) {
  1467. struct fuse_bufvec dst = FUSE_BUFVEC_INIT(size);
  1468. dst.buf[0].mem = mem;
  1469. res = fuse_buf_copy(&dst, buf, 0);
  1470. }
  1471. fuse_free_buf(buf);
  1472. return res;
  1473. }
  1474. int fuse_fs_write_buf(struct fuse_fs *fs, const char *path,
  1475. struct fuse_bufvec *buf, off_t off,
  1476. struct fuse_file_info *fi)
  1477. {
  1478. fuse_get_context()->private_data = fs->user_data;
  1479. if (fs->op.write_buf || fs->op.write) {
  1480. int res;
  1481. size_t size = fuse_buf_size(buf);
  1482. assert(buf->idx == 0 && buf->off == 0);
  1483. if (fs->debug)
  1484. fprintf(stderr,
  1485. "write%s[%llu] %zu bytes to %llu flags: 0x%x\n",
  1486. fi->writepage ? "page" : "",
  1487. (unsigned long long) fi->fh,
  1488. size,
  1489. (unsigned long long) off,
  1490. fi->flags);
  1491. if (fs->op.write_buf) {
  1492. res = fs->op.write_buf(path, buf, off, fi);
  1493. } else {
  1494. void *mem = NULL;
  1495. struct fuse_buf *flatbuf;
  1496. struct fuse_bufvec tmp = FUSE_BUFVEC_INIT(size);
  1497. if (buf->count == 1 &&
  1498. !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
  1499. flatbuf = &buf->buf[0];
  1500. } else {
  1501. res = -ENOMEM;
  1502. mem = malloc(size);
  1503. if (mem == NULL)
  1504. goto out;
  1505. tmp.buf[0].mem = mem;
  1506. res = fuse_buf_copy(&tmp, buf, 0);
  1507. if (res <= 0)
  1508. goto out_free;
  1509. tmp.buf[0].size = res;
  1510. flatbuf = &tmp.buf[0];
  1511. }
  1512. res = fs->op.write(path, flatbuf->mem, flatbuf->size,
  1513. off, fi);
  1514. out_free:
  1515. free(mem);
  1516. }
  1517. out:
  1518. if (fs->debug && res >= 0)
  1519. fprintf(stderr, " write%s[%llu] %u bytes to %llu\n",
  1520. fi->writepage ? "page" : "",
  1521. (unsigned long long) fi->fh, res,
  1522. (unsigned long long) off);
  1523. if (res > (int) size)
  1524. fprintf(stderr, "fuse: wrote too many bytes\n");
  1525. return res;
  1526. } else {
  1527. return -ENOSYS;
  1528. }
  1529. }
  1530. int fuse_fs_write(struct fuse_fs *fs, const char *path, const char *mem,
  1531. size_t size, off_t off, struct fuse_file_info *fi)
  1532. {
  1533. struct fuse_bufvec bufv = FUSE_BUFVEC_INIT(size);
  1534. bufv.buf[0].mem = (void *) mem;
  1535. return fuse_fs_write_buf(fs, path, &bufv, off, fi);
  1536. }
  1537. int fuse_fs_fsync(struct fuse_fs *fs, const char *path, int datasync,
  1538. struct fuse_file_info *fi)
  1539. {
  1540. fuse_get_context()->private_data = fs->user_data;
  1541. if (fs->op.fsync) {
  1542. if (fs->debug)
  1543. fprintf(stderr, "fsync[%llu] datasync: %i\n",
  1544. (unsigned long long) fi->fh, datasync);
  1545. return fs->op.fsync(path, datasync, fi);
  1546. } else {
  1547. return -ENOSYS;
  1548. }
  1549. }
  1550. int fuse_fs_fsyncdir(struct fuse_fs *fs, const char *path, int datasync,
  1551. struct fuse_file_info *fi)
  1552. {
  1553. fuse_get_context()->private_data = fs->user_data;
  1554. if (fs->op.fsyncdir) {
  1555. if (fs->debug)
  1556. fprintf(stderr, "fsyncdir[%llu] datasync: %i\n",
  1557. (unsigned long long) fi->fh, datasync);
  1558. return fs->op.fsyncdir(path, datasync, fi);
  1559. } else {
  1560. return -ENOSYS;
  1561. }
  1562. }
  1563. int fuse_fs_flush(struct fuse_fs *fs, const char *path,
  1564. struct fuse_file_info *fi)
  1565. {
  1566. fuse_get_context()->private_data = fs->user_data;
  1567. if (fs->op.flush) {
  1568. if (fs->debug)
  1569. fprintf(stderr, "flush[%llu]\n",
  1570. (unsigned long long) fi->fh);
  1571. return fs->op.flush(path, fi);
  1572. } else {
  1573. return -ENOSYS;
  1574. }
  1575. }
  1576. int fuse_fs_statfs(struct fuse_fs *fs, const char *path, struct statvfs *buf)
  1577. {
  1578. fuse_get_context()->private_data = fs->user_data;
  1579. if (fs->op.statfs) {
  1580. if (fs->debug)
  1581. fprintf(stderr, "statfs %s\n", path);
  1582. return fuse_compat_statfs(fs, path, buf);
  1583. } else {
  1584. buf->f_namemax = 255;
  1585. buf->f_bsize = 512;
  1586. return 0;
  1587. }
  1588. }
  1589. int fuse_fs_releasedir(struct fuse_fs *fs, const char *path,
  1590. struct fuse_file_info *fi)
  1591. {
  1592. fuse_get_context()->private_data = fs->user_data;
  1593. if (fs->op.releasedir) {
  1594. if (fs->debug)
  1595. fprintf(stderr, "releasedir[%llu] flags: 0x%x\n",
  1596. (unsigned long long) fi->fh, fi->flags);
  1597. return fs->op.releasedir(path, fi);
  1598. } else {
  1599. return 0;
  1600. }
  1601. }
  1602. static int fill_dir_old(struct fuse_dirhandle *dh, const char *name, int type,
  1603. ino_t ino)
  1604. {
  1605. int res;
  1606. struct stat stbuf;
  1607. memset(&stbuf, 0, sizeof(stbuf));
  1608. stbuf.st_mode = type << 12;
  1609. stbuf.st_ino = ino;
  1610. res = dh->filler(dh->buf, name, &stbuf, 0);
  1611. return res ? -ENOMEM : 0;
  1612. }
  1613. int fuse_fs_readdir(struct fuse_fs *fs, const char *path, void *buf,
  1614. fuse_fill_dir_t filler, off_t off,
  1615. struct fuse_file_info *fi)
  1616. {
  1617. fuse_get_context()->private_data = fs->user_data;
  1618. if (fs->op.readdir) {
  1619. if (fs->debug)
  1620. fprintf(stderr, "readdir[%llu] from %llu\n",
  1621. (unsigned long long) fi->fh,
  1622. (unsigned long long) off);
  1623. return fs->op.readdir(path, buf, filler, off, fi);
  1624. } else if (fs->op.getdir) {
  1625. struct fuse_dirhandle dh;
  1626. if (fs->debug)
  1627. fprintf(stderr, "getdir[%llu]\n",
  1628. (unsigned long long) fi->fh);
  1629. dh.filler = filler;
  1630. dh.buf = buf;
  1631. return fs->op.getdir(path, &dh, fill_dir_old);
  1632. } else {
  1633. return -ENOSYS;
  1634. }
  1635. }
  1636. int fuse_fs_create(struct fuse_fs *fs, const char *path, mode_t mode,
  1637. struct fuse_file_info *fi)
  1638. {
  1639. fuse_get_context()->private_data = fs->user_data;
  1640. if (fs->op.create) {
  1641. int err;
  1642. if (fs->debug)
  1643. fprintf(stderr,
  1644. "create flags: 0x%x %s 0%o umask=0%03o\n",
  1645. fi->flags, path, mode,
  1646. fuse_get_context()->umask);
  1647. err = fs->op.create(path, mode, fi);
  1648. if (fs->debug && !err)
  1649. fprintf(stderr, " create[%llu] flags: 0x%x %s\n",
  1650. (unsigned long long) fi->fh, fi->flags, path);
  1651. return err;
  1652. } else {
  1653. return -ENOSYS;
  1654. }
  1655. }
  1656. int fuse_fs_lock(struct fuse_fs *fs, const char *path,
  1657. struct fuse_file_info *fi, int cmd, struct flock *lock)
  1658. {
  1659. fuse_get_context()->private_data = fs->user_data;
  1660. if (fs->op.lock) {
  1661. if (fs->debug)
  1662. fprintf(stderr, "lock[%llu] %s %s start: %llu len: %llu pid: %llu\n",
  1663. (unsigned long long) fi->fh,
  1664. (cmd == F_GETLK ? "F_GETLK" :
  1665. (cmd == F_SETLK ? "F_SETLK" :
  1666. (cmd == F_SETLKW ? "F_SETLKW" : "???"))),
  1667. (lock->l_type == F_RDLCK ? "F_RDLCK" :
  1668. (lock->l_type == F_WRLCK ? "F_WRLCK" :
  1669. (lock->l_type == F_UNLCK ? "F_UNLCK" :
  1670. "???"))),
  1671. (unsigned long long) lock->l_start,
  1672. (unsigned long long) lock->l_len,
  1673. (unsigned long long) lock->l_pid);
  1674. return fs->op.lock(path, fi, cmd, lock);
  1675. } else {
  1676. return -ENOSYS;
  1677. }
  1678. }
  1679. int fuse_fs_flock(struct fuse_fs *fs, const char *path,
  1680. struct fuse_file_info *fi, int op)
  1681. {
  1682. fuse_get_context()->private_data = fs->user_data;
  1683. if (fs->op.flock) {
  1684. if (fs->debug) {
  1685. int xop = op & ~LOCK_NB;
  1686. fprintf(stderr, "lock[%llu] %s%s\n",
  1687. (unsigned long long) fi->fh,
  1688. xop == LOCK_SH ? "LOCK_SH" :
  1689. (xop == LOCK_EX ? "LOCK_EX" :
  1690. (xop == LOCK_UN ? "LOCK_UN" : "???")),
  1691. (op & LOCK_NB) ? "|LOCK_NB" : "");
  1692. }
  1693. return fs->op.flock(path, fi, op);
  1694. } else {
  1695. return -ENOSYS;
  1696. }
  1697. }
  1698. int fuse_fs_chown(struct fuse_fs *fs, const char *path, uid_t uid, gid_t gid)
  1699. {
  1700. fuse_get_context()->private_data = fs->user_data;
  1701. if (fs->op.chown) {
  1702. if (fs->debug)
  1703. fprintf(stderr, "chown %s %lu %lu\n", path,
  1704. (unsigned long) uid, (unsigned long) gid);
  1705. return fs->op.chown(path, uid, gid);
  1706. } else {
  1707. return -ENOSYS;
  1708. }
  1709. }
  1710. int
  1711. fuse_fs_fchown(struct fuse_fs *fs_,
  1712. const struct fuse_file_info *ffi_,
  1713. const uid_t uid_,
  1714. const gid_t gid_)
  1715. {
  1716. fuse_get_context()->private_data = fs_->user_data;
  1717. if(fs_->op.fchown)
  1718. return fs_->op.fchown(ffi_,uid_,gid_);
  1719. return -ENOSYS;
  1720. }
  1721. int fuse_fs_truncate(struct fuse_fs *fs, const char *path, off_t size)
  1722. {
  1723. fuse_get_context()->private_data = fs->user_data;
  1724. if (fs->op.truncate) {
  1725. if (fs->debug)
  1726. fprintf(stderr, "truncate %s %llu\n", path,
  1727. (unsigned long long) size);
  1728. return fs->op.truncate(path, size);
  1729. } else {
  1730. return -ENOSYS;
  1731. }
  1732. }
  1733. int fuse_fs_ftruncate(struct fuse_fs *fs, const char *path, off_t size,
  1734. struct fuse_file_info *fi)
  1735. {
  1736. fuse_get_context()->private_data = fs->user_data;
  1737. if (fs->op.ftruncate) {
  1738. if (fs->debug)
  1739. fprintf(stderr, "ftruncate[%llu] %llu\n",
  1740. (unsigned long long) fi->fh,
  1741. (unsigned long long) size);
  1742. return fs->op.ftruncate(path, size, fi);
  1743. } else if (path && fs->op.truncate) {
  1744. if (fs->debug)
  1745. fprintf(stderr, "truncate %s %llu\n", path,
  1746. (unsigned long long) size);
  1747. return fs->op.truncate(path, size);
  1748. } else {
  1749. return -ENOSYS;
  1750. }
  1751. }
  1752. int fuse_fs_utimens(struct fuse_fs *fs, const char *path,
  1753. const struct timespec tv[2])
  1754. {
  1755. fuse_get_context()->private_data = fs->user_data;
  1756. if (fs->op.utimens) {
  1757. if (fs->debug)
  1758. fprintf(stderr, "utimens %s %li.%09lu %li.%09lu\n",
  1759. path, tv[0].tv_sec, tv[0].tv_nsec,
  1760. tv[1].tv_sec, tv[1].tv_nsec);
  1761. return fs->op.utimens(path, tv);
  1762. } else if(fs->op.utime) {
  1763. struct utimbuf buf;
  1764. if (fs->debug)
  1765. fprintf(stderr, "utime %s %li %li\n", path,
  1766. tv[0].tv_sec, tv[1].tv_sec);
  1767. buf.actime = tv[0].tv_sec;
  1768. buf.modtime = tv[1].tv_sec;
  1769. return fs->op.utime(path, &buf);
  1770. } else {
  1771. return -ENOSYS;
  1772. }
  1773. }
  1774. int
  1775. fuse_fs_futimens(struct fuse_fs *fs_,
  1776. const struct fuse_file_info *ffi_,
  1777. const struct timespec tv_[2])
  1778. {
  1779. fuse_get_context()->private_data = fs_->user_data;
  1780. if(fs_->op.futimens)
  1781. return fs_->op.futimens(ffi_,tv_);
  1782. return -ENOSYS;
  1783. }
  1784. int fuse_fs_access(struct fuse_fs *fs, const char *path, int mask)
  1785. {
  1786. fuse_get_context()->private_data = fs->user_data;
  1787. if (fs->op.access) {
  1788. if (fs->debug)
  1789. fprintf(stderr, "access %s 0%o\n", path, mask);
  1790. return fs->op.access(path, mask);
  1791. } else {
  1792. return -ENOSYS;
  1793. }
  1794. }
  1795. int fuse_fs_readlink(struct fuse_fs *fs, const char *path, char *buf,
  1796. size_t len)
  1797. {
  1798. fuse_get_context()->private_data = fs->user_data;
  1799. if (fs->op.readlink) {
  1800. if (fs->debug)
  1801. fprintf(stderr, "readlink %s %lu\n", path,
  1802. (unsigned long) len);
  1803. return fs->op.readlink(path, buf, len);
  1804. } else {
  1805. return -ENOSYS;
  1806. }
  1807. }
  1808. int fuse_fs_mknod(struct fuse_fs *fs, const char *path, mode_t mode,
  1809. dev_t rdev)
  1810. {
  1811. fuse_get_context()->private_data = fs->user_data;
  1812. if (fs->op.mknod) {
  1813. if (fs->debug)
  1814. fprintf(stderr, "mknod %s 0%o 0x%llx umask=0%03o\n",
  1815. path, mode, (unsigned long long) rdev,
  1816. fuse_get_context()->umask);
  1817. return fs->op.mknod(path, mode, rdev);
  1818. } else {
  1819. return -ENOSYS;
  1820. }
  1821. }
  1822. int fuse_fs_mkdir(struct fuse_fs *fs, const char *path, mode_t mode)
  1823. {
  1824. fuse_get_context()->private_data = fs->user_data;
  1825. if (fs->op.mkdir) {
  1826. if (fs->debug)
  1827. fprintf(stderr, "mkdir %s 0%o umask=0%03o\n",
  1828. path, mode, fuse_get_context()->umask);
  1829. return fs->op.mkdir(path, mode);
  1830. } else {
  1831. return -ENOSYS;
  1832. }
  1833. }
  1834. int fuse_fs_setxattr(struct fuse_fs *fs, const char *path, const char *name,
  1835. const char *value, size_t size, int flags)
  1836. {
  1837. fuse_get_context()->private_data = fs->user_data;
  1838. if (fs->op.setxattr) {
  1839. if (fs->debug)
  1840. fprintf(stderr, "setxattr %s %s %lu 0x%x\n",
  1841. path, name, (unsigned long) size, flags);
  1842. return fs->op.setxattr(path, name, value, size, flags);
  1843. } else {
  1844. return -ENOSYS;
  1845. }
  1846. }
  1847. int fuse_fs_getxattr(struct fuse_fs *fs, const char *path, const char *name,
  1848. char *value, size_t size)
  1849. {
  1850. fuse_get_context()->private_data = fs->user_data;
  1851. if (fs->op.getxattr) {
  1852. if (fs->debug)
  1853. fprintf(stderr, "getxattr %s %s %lu\n",
  1854. path, name, (unsigned long) size);
  1855. return fs->op.getxattr(path, name, value, size);
  1856. } else {
  1857. return -ENOSYS;
  1858. }
  1859. }
  1860. int fuse_fs_listxattr(struct fuse_fs *fs, const char *path, char *list,
  1861. size_t size)
  1862. {
  1863. fuse_get_context()->private_data = fs->user_data;
  1864. if (fs->op.listxattr) {
  1865. if (fs->debug)
  1866. fprintf(stderr, "listxattr %s %lu\n",
  1867. path, (unsigned long) size);
  1868. return fs->op.listxattr(path, list, size);
  1869. } else {
  1870. return -ENOSYS;
  1871. }
  1872. }
  1873. int fuse_fs_bmap(struct fuse_fs *fs, const char *path, size_t blocksize,
  1874. uint64_t *idx)
  1875. {
  1876. fuse_get_context()->private_data = fs->user_data;
  1877. if (fs->op.bmap) {
  1878. if (fs->debug)
  1879. fprintf(stderr, "bmap %s blocksize: %lu index: %llu\n",
  1880. path, (unsigned long) blocksize,
  1881. (unsigned long long) *idx);
  1882. return fs->op.bmap(path, blocksize, idx);
  1883. } else {
  1884. return -ENOSYS;
  1885. }
  1886. }
  1887. int fuse_fs_removexattr(struct fuse_fs *fs, const char *path, const char *name)
  1888. {
  1889. fuse_get_context()->private_data = fs->user_data;
  1890. if (fs->op.removexattr) {
  1891. if (fs->debug)
  1892. fprintf(stderr, "removexattr %s %s\n", path, name);
  1893. return fs->op.removexattr(path, name);
  1894. } else {
  1895. return -ENOSYS;
  1896. }
  1897. }
  1898. int fuse_fs_ioctl(struct fuse_fs *fs, const char *path, int cmd, void *arg,
  1899. struct fuse_file_info *fi, unsigned int flags,
  1900. void *data, uint32_t *out_size)
  1901. {
  1902. fuse_get_context()->private_data = fs->user_data;
  1903. if (fs->op.ioctl) {
  1904. if (fs->debug)
  1905. fprintf(stderr, "ioctl[%llu] 0x%x flags: 0x%x\n",
  1906. (unsigned long long) fi->fh, cmd, flags);
  1907. return fs->op.ioctl(path, cmd, arg, fi, flags, data, out_size);
  1908. } else
  1909. return -ENOSYS;
  1910. }
  1911. int fuse_fs_poll(struct fuse_fs *fs, const char *path,
  1912. struct fuse_file_info *fi, struct fuse_pollhandle *ph,
  1913. unsigned *reventsp)
  1914. {
  1915. fuse_get_context()->private_data = fs->user_data;
  1916. if (fs->op.poll) {
  1917. int res;
  1918. if (fs->debug)
  1919. fprintf(stderr, "poll[%llu] ph: %p\n",
  1920. (unsigned long long) fi->fh, ph);
  1921. res = fs->op.poll(path, fi, ph, reventsp);
  1922. if (fs->debug && !res)
  1923. fprintf(stderr, " poll[%llu] revents: 0x%x\n",
  1924. (unsigned long long) fi->fh, *reventsp);
  1925. return res;
  1926. } else
  1927. return -ENOSYS;
  1928. }
  1929. int fuse_fs_fallocate(struct fuse_fs *fs, const char *path, int mode,
  1930. off_t offset, off_t length, struct fuse_file_info *fi)
  1931. {
  1932. fuse_get_context()->private_data = fs->user_data;
  1933. if (fs->op.fallocate) {
  1934. if (fs->debug)
  1935. fprintf(stderr, "fallocate %s mode %x, offset: %llu, length: %llu\n",
  1936. path,
  1937. mode,
  1938. (unsigned long long) offset,
  1939. (unsigned long long) length);
  1940. return fs->op.fallocate(path, mode, offset, length, fi);
  1941. } else
  1942. return -ENOSYS;
  1943. }
  1944. static
  1945. int
  1946. node_open_and_visible(const struct node *node_)
  1947. {
  1948. return ((node_ != NULL) &&
  1949. (node_->open_count > 0) &&
  1950. (node_->is_hidden == 0));
  1951. }
  1952. static int mtime_eq(const struct stat *stbuf, const struct timespec *ts)
  1953. {
  1954. return stbuf->st_mtime == ts->tv_sec &&
  1955. ST_MTIM_NSEC(stbuf) == ts->tv_nsec;
  1956. }
  1957. #ifndef CLOCK_MONOTONIC
  1958. #define CLOCK_MONOTONIC CLOCK_REALTIME
  1959. #endif
  1960. static void curr_time(struct timespec *now)
  1961. {
  1962. static clockid_t clockid = CLOCK_MONOTONIC;
  1963. int res = clock_gettime(clockid, now);
  1964. if (res == -1 && errno == EINVAL) {
  1965. clockid = CLOCK_REALTIME;
  1966. res = clock_gettime(clockid, now);
  1967. }
  1968. if (res == -1) {
  1969. perror("fuse: clock_gettime");
  1970. abort();
  1971. }
  1972. }
  1973. static void update_stat(struct node *node, const struct stat *stbuf)
  1974. {
  1975. if (node->cache_valid && (!mtime_eq(stbuf, &node->mtime) ||
  1976. stbuf->st_size != node->size))
  1977. node->cache_valid = 0;
  1978. node->mtime.tv_sec = stbuf->st_mtime;
  1979. node->mtime.tv_nsec = ST_MTIM_NSEC(stbuf);
  1980. node->size = stbuf->st_size;
  1981. curr_time(&node->stat_updated);
  1982. }
  1983. static int lookup_path(struct fuse *f, fuse_ino_t nodeid,
  1984. const char *name, const char *path,
  1985. struct fuse_entry_param *e, struct fuse_file_info *fi)
  1986. {
  1987. int res;
  1988. memset(e, 0, sizeof(struct fuse_entry_param));
  1989. if (fi)
  1990. res = fuse_fs_fgetattr(f->fs, path, &e->attr, fi);
  1991. else
  1992. res = fuse_fs_getattr(f->fs, path, &e->attr);
  1993. if (res == 0) {
  1994. struct node *node;
  1995. node = find_node(f, nodeid, name);
  1996. if (node == NULL)
  1997. res = -ENOMEM;
  1998. else {
  1999. e->ino = node->nodeid;
  2000. e->generation = node->generation;
  2001. e->entry_timeout = f->conf.entry_timeout;
  2002. e->attr_timeout = f->conf.attr_timeout;
  2003. if (f->conf.auto_cache) {
  2004. pthread_mutex_lock(&f->lock);
  2005. update_stat(node, &e->attr);
  2006. pthread_mutex_unlock(&f->lock);
  2007. }
  2008. set_stat(f, e->ino, &e->attr);
  2009. if (f->conf.debug)
  2010. fprintf(stderr, " NODEID: %lu\n",
  2011. (unsigned long) e->ino);
  2012. }
  2013. }
  2014. return res;
  2015. }
  2016. static struct fuse_context_i *fuse_get_context_internal(void)
  2017. {
  2018. struct fuse_context_i *c;
  2019. c = (struct fuse_context_i *) pthread_getspecific(fuse_context_key);
  2020. if (c == NULL) {
  2021. c = (struct fuse_context_i *)
  2022. calloc(1, sizeof(struct fuse_context_i));
  2023. if (c == NULL) {
  2024. /* This is hard to deal with properly, so just
  2025. abort. If memory is so low that the
  2026. context cannot be allocated, there's not
  2027. much hope for the filesystem anyway */
  2028. fprintf(stderr, "fuse: failed to allocate thread specific data\n");
  2029. abort();
  2030. }
  2031. pthread_setspecific(fuse_context_key, c);
  2032. }
  2033. return c;
  2034. }
  2035. static void fuse_freecontext(void *data)
  2036. {
  2037. free(data);
  2038. }
  2039. static int fuse_create_context_key(void)
  2040. {
  2041. int err = 0;
  2042. pthread_mutex_lock(&fuse_context_lock);
  2043. if (!fuse_context_ref) {
  2044. err = pthread_key_create(&fuse_context_key, fuse_freecontext);
  2045. if (err) {
  2046. fprintf(stderr, "fuse: failed to create thread specific key: %s\n",
  2047. strerror(err));
  2048. pthread_mutex_unlock(&fuse_context_lock);
  2049. return -1;
  2050. }
  2051. }
  2052. fuse_context_ref++;
  2053. pthread_mutex_unlock(&fuse_context_lock);
  2054. return 0;
  2055. }
  2056. static void fuse_delete_context_key(void)
  2057. {
  2058. pthread_mutex_lock(&fuse_context_lock);
  2059. fuse_context_ref--;
  2060. if (!fuse_context_ref) {
  2061. free(pthread_getspecific(fuse_context_key));
  2062. pthread_key_delete(fuse_context_key);
  2063. }
  2064. pthread_mutex_unlock(&fuse_context_lock);
  2065. }
  2066. static struct fuse *req_fuse_prepare(fuse_req_t req)
  2067. {
  2068. struct fuse_context_i *c = fuse_get_context_internal();
  2069. const struct fuse_ctx *ctx = fuse_req_ctx(req);
  2070. c->req = req;
  2071. c->ctx.fuse = req_fuse(req);
  2072. c->ctx.uid = ctx->uid;
  2073. c->ctx.gid = ctx->gid;
  2074. c->ctx.pid = ctx->pid;
  2075. c->ctx.umask = ctx->umask;
  2076. return c->ctx.fuse;
  2077. }
  2078. static inline void reply_err(fuse_req_t req, int err)
  2079. {
  2080. /* fuse_reply_err() uses non-negated errno values */
  2081. fuse_reply_err(req, -err);
  2082. }
  2083. static void reply_entry(fuse_req_t req, const struct fuse_entry_param *e,
  2084. int err)
  2085. {
  2086. if (!err) {
  2087. struct fuse *f = req_fuse(req);
  2088. if (fuse_reply_entry(req, e) == -ENOENT) {
  2089. /* Skip forget for negative result */
  2090. if (e->ino != 0)
  2091. forget_node(f, e->ino, 1);
  2092. }
  2093. } else
  2094. reply_err(req, err);
  2095. }
  2096. void fuse_fs_init(struct fuse_fs *fs, struct fuse_conn_info *conn)
  2097. {
  2098. fuse_get_context()->private_data = fs->user_data;
  2099. if (!fs->op.write_buf)
  2100. conn->want &= ~FUSE_CAP_SPLICE_READ;
  2101. if (!fs->op.lock)
  2102. conn->want &= ~FUSE_CAP_POSIX_LOCKS;
  2103. if (!fs->op.flock)
  2104. conn->want &= ~FUSE_CAP_FLOCK_LOCKS;
  2105. if (fs->op.init)
  2106. fs->user_data = fs->op.init(conn);
  2107. }
  2108. static void fuse_lib_init(void *data, struct fuse_conn_info *conn)
  2109. {
  2110. struct fuse *f = (struct fuse *) data;
  2111. struct fuse_context_i *c = fuse_get_context_internal();
  2112. memset(c, 0, sizeof(*c));
  2113. c->ctx.fuse = f;
  2114. conn->want |= FUSE_CAP_EXPORT_SUPPORT;
  2115. fuse_fs_init(f->fs, conn);
  2116. }
  2117. void fuse_fs_destroy(struct fuse_fs *fs)
  2118. {
  2119. fuse_get_context()->private_data = fs->user_data;
  2120. if (fs->op.destroy)
  2121. fs->op.destroy(fs->user_data);
  2122. free(fs);
  2123. }
  2124. static void fuse_lib_destroy(void *data)
  2125. {
  2126. struct fuse *f = (struct fuse *) data;
  2127. struct fuse_context_i *c = fuse_get_context_internal();
  2128. memset(c, 0, sizeof(*c));
  2129. c->ctx.fuse = f;
  2130. fuse_fs_destroy(f->fs);
  2131. f->fs = NULL;
  2132. }
  2133. static void fuse_lib_lookup(fuse_req_t req, fuse_ino_t parent,
  2134. const char *name)
  2135. {
  2136. struct fuse *f = req_fuse_prepare(req);
  2137. struct fuse_entry_param e;
  2138. char *path;
  2139. int err;
  2140. struct node *dot = NULL;
  2141. if (name[0] == '.') {
  2142. int len = strlen(name);
  2143. if (len == 1 || (name[1] == '.' && len == 2)) {
  2144. pthread_mutex_lock(&f->lock);
  2145. if (len == 1) {
  2146. if (f->conf.debug)
  2147. fprintf(stderr, "LOOKUP-DOT\n");
  2148. dot = get_node_nocheck(f, parent);
  2149. if (dot == NULL) {
  2150. pthread_mutex_unlock(&f->lock);
  2151. reply_entry(req, &e, -ESTALE);
  2152. return;
  2153. }
  2154. dot->refctr++;
  2155. } else {
  2156. if (f->conf.debug)
  2157. fprintf(stderr, "LOOKUP-DOTDOT\n");
  2158. parent = get_node(f, parent)->parent->nodeid;
  2159. }
  2160. pthread_mutex_unlock(&f->lock);
  2161. name = NULL;
  2162. }
  2163. }
  2164. err = get_path_name(f, parent, name, &path);
  2165. if (!err) {
  2166. struct fuse_intr_data d;
  2167. if (f->conf.debug)
  2168. fprintf(stderr, "LOOKUP %s\n", path);
  2169. fuse_prepare_interrupt(f, req, &d);
  2170. err = lookup_path(f, parent, name, path, &e, NULL);
  2171. if (err == -ENOENT && f->conf.negative_timeout != 0.0) {
  2172. e.ino = 0;
  2173. e.entry_timeout = f->conf.negative_timeout;
  2174. err = 0;
  2175. }
  2176. fuse_finish_interrupt(f, req, &d);
  2177. free_path(f, parent, path);
  2178. }
  2179. if (dot) {
  2180. pthread_mutex_lock(&f->lock);
  2181. unref_node(f, dot);
  2182. pthread_mutex_unlock(&f->lock);
  2183. }
  2184. reply_entry(req, &e, err);
  2185. }
  2186. static void do_forget(struct fuse *f, fuse_ino_t ino, uint64_t nlookup)
  2187. {
  2188. if (f->conf.debug)
  2189. fprintf(stderr, "FORGET %llu/%llu\n", (unsigned long long)ino,
  2190. (unsigned long long) nlookup);
  2191. forget_node(f, ino, nlookup);
  2192. }
  2193. static void fuse_lib_forget(fuse_req_t req, fuse_ino_t ino,
  2194. unsigned long nlookup)
  2195. {
  2196. do_forget(req_fuse(req), ino, nlookup);
  2197. fuse_reply_none(req);
  2198. }
  2199. static void fuse_lib_forget_multi(fuse_req_t req, size_t count,
  2200. struct fuse_forget_data *forgets)
  2201. {
  2202. struct fuse *f = req_fuse(req);
  2203. size_t i;
  2204. for (i = 0; i < count; i++)
  2205. do_forget(f, forgets[i].ino, forgets[i].nlookup);
  2206. fuse_reply_none(req);
  2207. }
  2208. static void fuse_lib_getattr(fuse_req_t req, fuse_ino_t ino,
  2209. struct fuse_file_info *fi)
  2210. {
  2211. struct fuse *f = req_fuse_prepare(req);
  2212. struct stat buf;
  2213. char *path;
  2214. int err;
  2215. struct node *node;
  2216. struct fuse_file_info ffi = {0};
  2217. if(fi == NULL)
  2218. {
  2219. pthread_mutex_lock(&f->lock);
  2220. node = get_node(f,ino);
  2221. if(node->is_hidden)
  2222. {
  2223. fi = &ffi;
  2224. fi->fh = node->hidden_fh;
  2225. }
  2226. pthread_mutex_unlock(&f->lock);
  2227. }
  2228. memset(&buf, 0, sizeof(buf));
  2229. err = (((fi == NULL) || (f->fs->op.fgetattr == NULL)) ?
  2230. get_path(f,ino,&path) :
  2231. get_path_nullok(f,ino,&path));
  2232. if (!err) {
  2233. struct fuse_intr_data d;
  2234. fuse_prepare_interrupt(f, req, &d);
  2235. err = ((fi == NULL) ?
  2236. fuse_fs_getattr(f->fs,path,&buf) :
  2237. fuse_fs_fgetattr(f->fs,path,&buf,fi));
  2238. fuse_finish_interrupt(f, req, &d);
  2239. free_path(f, ino, path);
  2240. }
  2241. if (!err) {
  2242. pthread_mutex_lock(&f->lock);
  2243. node = get_node(f, ino);
  2244. if (f->conf.auto_cache)
  2245. update_stat(node, &buf);
  2246. pthread_mutex_unlock(&f->lock);
  2247. set_stat(f, ino, &buf);
  2248. fuse_reply_attr(req, &buf, f->conf.attr_timeout);
  2249. } else
  2250. reply_err(req, err);
  2251. }
  2252. int fuse_fs_chmod(struct fuse_fs *fs, const char *path, mode_t mode)
  2253. {
  2254. fuse_get_context()->private_data = fs->user_data;
  2255. if (fs->op.chmod)
  2256. return fs->op.chmod(path, mode);
  2257. else
  2258. return -ENOSYS;
  2259. }
  2260. int
  2261. fuse_fs_fchmod(struct fuse_fs *fs_,
  2262. const struct fuse_file_info *ffi_,
  2263. const mode_t mode_)
  2264. {
  2265. fuse_get_context()->private_data = fs_->user_data;
  2266. if(fs_->op.fchmod)
  2267. return fs_->op.fchmod(ffi_,mode_);
  2268. return -ENOSYS;
  2269. }
  2270. static void fuse_lib_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr,
  2271. int valid, struct fuse_file_info *fi)
  2272. {
  2273. struct fuse *f = req_fuse_prepare(req);
  2274. struct stat buf;
  2275. char *path;
  2276. int err;
  2277. struct node *node;
  2278. struct fuse_file_info ffi = {0};
  2279. if(fi == NULL)
  2280. {
  2281. pthread_mutex_lock(&f->lock);
  2282. node = get_node(f,ino);
  2283. if(node->is_hidden)
  2284. {
  2285. fi = &ffi;
  2286. fi->fh = node->hidden_fh;
  2287. }
  2288. pthread_mutex_unlock(&f->lock);
  2289. }
  2290. memset(&buf, 0, sizeof(buf));
  2291. err = ((fi == NULL) ?
  2292. get_path(f,ino,&path) :
  2293. get_path_nullok(f, ino, &path));
  2294. if (!err) {
  2295. struct fuse_intr_data d;
  2296. fuse_prepare_interrupt(f, req, &d);
  2297. err = 0;
  2298. if (!err && (valid & FUSE_SET_ATTR_MODE))
  2299. err = ((fi == NULL) ?
  2300. fuse_fs_chmod(f->fs, path, attr->st_mode) :
  2301. fuse_fs_fchmod(f->fs, fi, attr->st_mode));
  2302. if (!err && (valid & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID)))
  2303. {
  2304. uid_t uid = (valid & FUSE_SET_ATTR_UID) ?
  2305. attr->st_uid : (uid_t) -1;
  2306. gid_t gid = (valid & FUSE_SET_ATTR_GID) ?
  2307. attr->st_gid : (gid_t) -1;
  2308. err = ((fi == NULL) ?
  2309. fuse_fs_chown(f->fs, path, uid, gid) :
  2310. fuse_fs_fchown(f->fs, fi, uid, gid));
  2311. }
  2312. if (!err && (valid & FUSE_SET_ATTR_SIZE))
  2313. {
  2314. err = ((fi == NULL) ?
  2315. fuse_fs_truncate(f->fs, path, attr->st_size) :
  2316. fuse_fs_ftruncate(f->fs, path, attr->st_size, fi));
  2317. }
  2318. #ifdef HAVE_UTIMENSAT
  2319. if (!err && f->utime_omit_ok &&
  2320. (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME))) {
  2321. struct timespec tv[2];
  2322. tv[0].tv_sec = 0;
  2323. tv[1].tv_sec = 0;
  2324. tv[0].tv_nsec = UTIME_OMIT;
  2325. tv[1].tv_nsec = UTIME_OMIT;
  2326. if (valid & FUSE_SET_ATTR_ATIME_NOW)
  2327. tv[0].tv_nsec = UTIME_NOW;
  2328. else if (valid & FUSE_SET_ATTR_ATIME)
  2329. tv[0] = attr->st_atim;
  2330. if (valid & FUSE_SET_ATTR_MTIME_NOW)
  2331. tv[1].tv_nsec = UTIME_NOW;
  2332. else if (valid & FUSE_SET_ATTR_MTIME)
  2333. tv[1] = attr->st_mtim;
  2334. err = ((fi == NULL) ?
  2335. fuse_fs_utimens(f->fs, path, tv) :
  2336. fuse_fs_futimens(f->fs, fi, tv));
  2337. } else
  2338. #endif
  2339. if (!err &&
  2340. (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) ==
  2341. (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) {
  2342. struct timespec tv[2];
  2343. tv[0].tv_sec = attr->st_atime;
  2344. tv[0].tv_nsec = ST_ATIM_NSEC(attr);
  2345. tv[1].tv_sec = attr->st_mtime;
  2346. tv[1].tv_nsec = ST_MTIM_NSEC(attr);
  2347. err = ((fi == NULL) ?
  2348. fuse_fs_utimens(f->fs, path, tv) :
  2349. fuse_fs_futimens(f->fs, fi, tv));
  2350. }
  2351. if (!err)
  2352. err = ((fi == NULL) ?
  2353. fuse_fs_getattr(f->fs, path, &buf) :
  2354. fuse_fs_fgetattr(f->fs, path, &buf, fi));
  2355. fuse_finish_interrupt(f, req, &d);
  2356. free_path(f, ino, path);
  2357. }
  2358. if (!err) {
  2359. if (f->conf.auto_cache) {
  2360. pthread_mutex_lock(&f->lock);
  2361. update_stat(get_node(f, ino), &buf);
  2362. pthread_mutex_unlock(&f->lock);
  2363. }
  2364. set_stat(f, ino, &buf);
  2365. fuse_reply_attr(req, &buf, f->conf.attr_timeout);
  2366. } else {
  2367. reply_err(req, err);
  2368. }
  2369. }
  2370. static void fuse_lib_access(fuse_req_t req, fuse_ino_t ino, int mask)
  2371. {
  2372. struct fuse *f = req_fuse_prepare(req);
  2373. char *path;
  2374. int err;
  2375. err = get_path(f, ino, &path);
  2376. if (!err) {
  2377. struct fuse_intr_data d;
  2378. fuse_prepare_interrupt(f, req, &d);
  2379. err = fuse_fs_access(f->fs, path, mask);
  2380. fuse_finish_interrupt(f, req, &d);
  2381. free_path(f, ino, path);
  2382. }
  2383. reply_err(req, err);
  2384. }
  2385. static void fuse_lib_readlink(fuse_req_t req, fuse_ino_t ino)
  2386. {
  2387. struct fuse *f = req_fuse_prepare(req);
  2388. char linkname[PATH_MAX + 1];
  2389. char *path;
  2390. int err;
  2391. err = get_path(f, ino, &path);
  2392. if (!err) {
  2393. struct fuse_intr_data d;
  2394. fuse_prepare_interrupt(f, req, &d);
  2395. err = fuse_fs_readlink(f->fs, path, linkname, sizeof(linkname));
  2396. fuse_finish_interrupt(f, req, &d);
  2397. free_path(f, ino, path);
  2398. }
  2399. if (!err) {
  2400. linkname[PATH_MAX] = '\0';
  2401. fuse_reply_readlink(req, linkname);
  2402. } else
  2403. reply_err(req, err);
  2404. }
  2405. static void fuse_lib_mknod(fuse_req_t req, fuse_ino_t parent, const char *name,
  2406. mode_t mode, dev_t rdev)
  2407. {
  2408. struct fuse *f = req_fuse_prepare(req);
  2409. struct fuse_entry_param e;
  2410. char *path;
  2411. int err;
  2412. err = get_path_name(f, parent, name, &path);
  2413. if (!err) {
  2414. struct fuse_intr_data d;
  2415. fuse_prepare_interrupt(f, req, &d);
  2416. err = -ENOSYS;
  2417. if (S_ISREG(mode)) {
  2418. struct fuse_file_info fi;
  2419. memset(&fi, 0, sizeof(fi));
  2420. fi.flags = O_CREAT | O_EXCL | O_WRONLY;
  2421. err = fuse_fs_create(f->fs, path, mode, &fi);
  2422. if (!err) {
  2423. err = lookup_path(f, parent, name, path, &e,
  2424. &fi);
  2425. fuse_fs_release(f->fs, path, &fi);
  2426. }
  2427. }
  2428. if (err == -ENOSYS) {
  2429. err = fuse_fs_mknod(f->fs, path, mode, rdev);
  2430. if (!err)
  2431. err = lookup_path(f, parent, name, path, &e,
  2432. NULL);
  2433. }
  2434. fuse_finish_interrupt(f, req, &d);
  2435. free_path(f, parent, path);
  2436. }
  2437. reply_entry(req, &e, err);
  2438. }
  2439. static void fuse_lib_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name,
  2440. mode_t mode)
  2441. {
  2442. struct fuse *f = req_fuse_prepare(req);
  2443. struct fuse_entry_param e;
  2444. char *path;
  2445. int err;
  2446. err = get_path_name(f, parent, name, &path);
  2447. if (!err) {
  2448. struct fuse_intr_data d;
  2449. fuse_prepare_interrupt(f, req, &d);
  2450. err = fuse_fs_mkdir(f->fs, path, mode);
  2451. if (!err)
  2452. err = lookup_path(f, parent, name, path, &e, NULL);
  2453. fuse_finish_interrupt(f, req, &d);
  2454. free_path(f, parent, path);
  2455. }
  2456. reply_entry(req, &e, err);
  2457. }
  2458. static
  2459. void
  2460. fuse_lib_unlink(fuse_req_t req,
  2461. fuse_ino_t parent,
  2462. const char *name)
  2463. {
  2464. int err;
  2465. char *path;
  2466. struct fuse *f;
  2467. struct node *wnode;
  2468. struct fuse_intr_data d;
  2469. f = req_fuse_prepare(req);
  2470. err = get_path_wrlock(f,parent,name,&path,&wnode);
  2471. if(!err)
  2472. {
  2473. fuse_prepare_interrupt(f,req,&d);
  2474. if(node_open_and_visible(wnode))
  2475. {
  2476. err = fuse_fs_prepare_hide(f->fs,path,&wnode->hidden_fh,0);
  2477. if(!err)
  2478. wnode->is_hidden = 1;
  2479. }
  2480. err = fuse_fs_unlink(f->fs,path);
  2481. if(!err && !wnode->is_hidden)
  2482. remove_node(f,parent,name);
  2483. fuse_finish_interrupt(f,req,&d);
  2484. free_path_wrlock(f,parent,wnode,path);
  2485. }
  2486. reply_err(req,err);
  2487. }
  2488. static void fuse_lib_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name)
  2489. {
  2490. struct fuse *f = req_fuse_prepare(req);
  2491. struct node *wnode;
  2492. char *path;
  2493. int err;
  2494. err = get_path_wrlock(f, parent, name, &path, &wnode);
  2495. if (!err) {
  2496. struct fuse_intr_data d;
  2497. fuse_prepare_interrupt(f, req, &d);
  2498. err = fuse_fs_rmdir(f->fs, path);
  2499. fuse_finish_interrupt(f, req, &d);
  2500. if (!err)
  2501. remove_node(f, parent, name);
  2502. free_path_wrlock(f, parent, wnode, path);
  2503. }
  2504. reply_err(req, err);
  2505. }
  2506. static void fuse_lib_symlink(fuse_req_t req, const char *linkname,
  2507. fuse_ino_t parent, const char *name)
  2508. {
  2509. struct fuse *f = req_fuse_prepare(req);
  2510. struct fuse_entry_param e;
  2511. char *path;
  2512. int err;
  2513. err = get_path_name(f, parent, name, &path);
  2514. if (!err) {
  2515. struct fuse_intr_data d;
  2516. fuse_prepare_interrupt(f, req, &d);
  2517. err = fuse_fs_symlink(f->fs, linkname, path);
  2518. if (!err)
  2519. err = lookup_path(f, parent, name, path, &e, NULL);
  2520. fuse_finish_interrupt(f, req, &d);
  2521. free_path(f, parent, path);
  2522. }
  2523. reply_entry(req, &e, err);
  2524. }
  2525. static
  2526. void
  2527. fuse_lib_rename(fuse_req_t req,
  2528. fuse_ino_t olddir,
  2529. const char *oldname,
  2530. fuse_ino_t newdir,
  2531. const char *newname)
  2532. {
  2533. int err;
  2534. struct fuse *f;
  2535. char *oldpath;
  2536. char *newpath;
  2537. struct node *wnode1;
  2538. struct node *wnode2;
  2539. struct fuse_intr_data d;
  2540. f = req_fuse_prepare(req);
  2541. err = get_path2(f,olddir,oldname,newdir,newname,
  2542. &oldpath,&newpath,&wnode1,&wnode2);
  2543. if(!err)
  2544. {
  2545. fuse_prepare_interrupt(f,req,&d);
  2546. if(node_open_and_visible(wnode2))
  2547. {
  2548. err = fuse_fs_prepare_hide(f->fs,newpath,&wnode2->hidden_fh,1);
  2549. if(!err)
  2550. wnode2->is_hidden = 1;
  2551. }
  2552. err = fuse_fs_rename(f->fs,oldpath,newpath);
  2553. if(!err)
  2554. err = rename_node(f,olddir,oldname,newdir,newname);
  2555. fuse_finish_interrupt(f,req,&d);
  2556. free_path2(f,olddir,newdir,wnode1,wnode2,oldpath,newpath);
  2557. }
  2558. reply_err(req,err);
  2559. }
  2560. static void fuse_lib_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent,
  2561. const char *newname)
  2562. {
  2563. struct fuse *f = req_fuse_prepare(req);
  2564. struct fuse_entry_param e;
  2565. char *oldpath;
  2566. char *newpath;
  2567. int err;
  2568. err = get_path2(f, ino, NULL, newparent, newname,
  2569. &oldpath, &newpath, NULL, NULL);
  2570. if (!err) {
  2571. struct fuse_intr_data d;
  2572. fuse_prepare_interrupt(f, req, &d);
  2573. err = fuse_fs_link(f->fs, oldpath, newpath);
  2574. if (!err)
  2575. err = lookup_path(f, newparent, newname, newpath,
  2576. &e, NULL);
  2577. fuse_finish_interrupt(f, req, &d);
  2578. free_path2(f, ino, newparent, NULL, NULL, oldpath, newpath);
  2579. }
  2580. reply_entry(req, &e, err);
  2581. }
  2582. static void fuse_do_release(struct fuse *f, fuse_ino_t ino, const char *path,
  2583. struct fuse_file_info *fi)
  2584. {
  2585. struct node *node;
  2586. uint64_t fh;
  2587. int was_hidden;
  2588. const char *compatpath;
  2589. fh = 0;
  2590. if (path != NULL || f->nullpath_ok || f->conf.nopath)
  2591. compatpath = path;
  2592. else
  2593. compatpath = "-";
  2594. fuse_fs_release(f->fs, compatpath, fi);
  2595. pthread_mutex_lock(&f->lock);
  2596. node = get_node(f, ino);
  2597. assert(node->open_count > 0);
  2598. node->open_count--;
  2599. was_hidden = 0;
  2600. if (node->is_hidden && (node->open_count == 0)) {
  2601. was_hidden = 1;
  2602. node->is_hidden = 0;
  2603. fh = node->hidden_fh;
  2604. }
  2605. pthread_mutex_unlock(&f->lock);
  2606. if(was_hidden)
  2607. fuse_fs_free_hide(f->fs,fh);
  2608. }
  2609. static void fuse_lib_create(fuse_req_t req, fuse_ino_t parent,
  2610. const char *name, mode_t mode,
  2611. struct fuse_file_info *fi)
  2612. {
  2613. struct fuse *f = req_fuse_prepare(req);
  2614. struct fuse_intr_data d;
  2615. struct fuse_entry_param e;
  2616. char *path;
  2617. int err;
  2618. err = get_path_name(f, parent, name, &path);
  2619. if (!err) {
  2620. fuse_prepare_interrupt(f, req, &d);
  2621. err = fuse_fs_create(f->fs, path, mode, fi);
  2622. if (!err) {
  2623. err = lookup_path(f, parent, name, path, &e, fi);
  2624. if (err)
  2625. fuse_fs_release(f->fs, path, fi);
  2626. else if (!S_ISREG(e.attr.st_mode)) {
  2627. err = -EIO;
  2628. fuse_fs_release(f->fs, path, fi);
  2629. forget_node(f, e.ino, 1);
  2630. } else {
  2631. if (f->conf.kernel_cache)
  2632. fi->keep_cache = 1;
  2633. }
  2634. }
  2635. fuse_finish_interrupt(f, req, &d);
  2636. }
  2637. if (!err) {
  2638. pthread_mutex_lock(&f->lock);
  2639. struct node *n = get_node(f,e.ino);
  2640. n->open_count++;
  2641. pthread_mutex_unlock(&f->lock);
  2642. if (fuse_reply_create(req, &e, fi) == -ENOENT) {
  2643. /* The open syscall was interrupted, so it
  2644. must be cancelled */
  2645. fuse_do_release(f, e.ino, path, fi);
  2646. forget_node(f, e.ino, 1);
  2647. }
  2648. } else {
  2649. reply_err(req, err);
  2650. }
  2651. free_path(f, parent, path);
  2652. }
  2653. static double diff_timespec(const struct timespec *t1,
  2654. const struct timespec *t2)
  2655. {
  2656. return (t1->tv_sec - t2->tv_sec) +
  2657. ((double) t1->tv_nsec - (double) t2->tv_nsec) / 1000000000.0;
  2658. }
  2659. static void open_auto_cache(struct fuse *f, fuse_ino_t ino, const char *path,
  2660. struct fuse_file_info *fi)
  2661. {
  2662. struct node *node;
  2663. pthread_mutex_lock(&f->lock);
  2664. node = get_node(f, ino);
  2665. if (node->cache_valid) {
  2666. struct timespec now;
  2667. curr_time(&now);
  2668. if (diff_timespec(&now, &node->stat_updated) >
  2669. f->conf.ac_attr_timeout) {
  2670. struct stat stbuf;
  2671. int err;
  2672. pthread_mutex_unlock(&f->lock);
  2673. err = fuse_fs_fgetattr(f->fs, path, &stbuf, fi);
  2674. pthread_mutex_lock(&f->lock);
  2675. if (!err)
  2676. update_stat(node, &stbuf);
  2677. else
  2678. node->cache_valid = 0;
  2679. }
  2680. }
  2681. if (node->cache_valid)
  2682. fi->keep_cache = 1;
  2683. node->cache_valid = 1;
  2684. pthread_mutex_unlock(&f->lock);
  2685. }
  2686. static void fuse_lib_open(fuse_req_t req, fuse_ino_t ino,
  2687. struct fuse_file_info *fi)
  2688. {
  2689. struct fuse *f = req_fuse_prepare(req);
  2690. struct fuse_intr_data d;
  2691. char *path;
  2692. int err;
  2693. err = get_path(f, ino, &path);
  2694. if (!err) {
  2695. fuse_prepare_interrupt(f, req, &d);
  2696. err = fuse_fs_open(f->fs, path, fi);
  2697. if (!err) {
  2698. if (f->conf.kernel_cache)
  2699. fi->keep_cache = 1;
  2700. if (f->conf.auto_cache)
  2701. open_auto_cache(f, ino, path, fi);
  2702. }
  2703. fuse_finish_interrupt(f, req, &d);
  2704. }
  2705. if (!err) {
  2706. pthread_mutex_lock(&f->lock);
  2707. struct node *n = get_node(f,ino);
  2708. n->open_count++;
  2709. pthread_mutex_unlock(&f->lock);
  2710. if (fuse_reply_open(req, fi) == -ENOENT) {
  2711. /* The open syscall was interrupted, so it
  2712. must be cancelled */
  2713. fuse_do_release(f, ino, path, fi);
  2714. }
  2715. } else
  2716. reply_err(req, err);
  2717. free_path(f, ino, path);
  2718. }
  2719. static void fuse_lib_read(fuse_req_t req, fuse_ino_t ino, size_t size,
  2720. off_t off, struct fuse_file_info *fi)
  2721. {
  2722. struct fuse *f = req_fuse_prepare(req);
  2723. struct fuse_bufvec *buf = NULL;
  2724. char *path;
  2725. int res;
  2726. res = get_path_nullok(f, ino, &path);
  2727. if (res == 0) {
  2728. struct fuse_intr_data d;
  2729. fuse_prepare_interrupt(f, req, &d);
  2730. res = fuse_fs_read_buf(f->fs, path, &buf, size, off, fi);
  2731. fuse_finish_interrupt(f, req, &d);
  2732. free_path(f, ino, path);
  2733. }
  2734. if (res == 0)
  2735. fuse_reply_data(req, buf, FUSE_BUF_SPLICE_MOVE);
  2736. else
  2737. reply_err(req, res);
  2738. fuse_free_buf(buf);
  2739. }
  2740. static void fuse_lib_write_buf(fuse_req_t req, fuse_ino_t ino,
  2741. struct fuse_bufvec *buf, off_t off,
  2742. struct fuse_file_info *fi)
  2743. {
  2744. struct fuse *f = req_fuse_prepare(req);
  2745. char *path;
  2746. int res;
  2747. res = get_path_nullok(f, ino, &path);
  2748. if (res == 0) {
  2749. struct fuse_intr_data d;
  2750. fuse_prepare_interrupt(f, req, &d);
  2751. res = fuse_fs_write_buf(f->fs, path, buf, off, fi);
  2752. fuse_finish_interrupt(f, req, &d);
  2753. free_path(f, ino, path);
  2754. }
  2755. if (res >= 0)
  2756. fuse_reply_write(req, res);
  2757. else
  2758. reply_err(req, res);
  2759. }
  2760. static void fuse_lib_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
  2761. struct fuse_file_info *fi)
  2762. {
  2763. struct fuse *f = req_fuse_prepare(req);
  2764. char *path;
  2765. int err;
  2766. err = get_path_nullok(f, ino, &path);
  2767. if (!err) {
  2768. struct fuse_intr_data d;
  2769. fuse_prepare_interrupt(f, req, &d);
  2770. err = fuse_fs_fsync(f->fs, path, datasync, fi);
  2771. fuse_finish_interrupt(f, req, &d);
  2772. free_path(f, ino, path);
  2773. }
  2774. reply_err(req, err);
  2775. }
  2776. static struct fuse_dh *get_dirhandle(const struct fuse_file_info *llfi,
  2777. struct fuse_file_info *fi)
  2778. {
  2779. struct fuse_dh *dh = (struct fuse_dh *) (uintptr_t) llfi->fh;
  2780. memset(fi, 0, sizeof(struct fuse_file_info));
  2781. fi->fh = dh->fh;
  2782. fi->fh_old = dh->fh;
  2783. return dh;
  2784. }
  2785. static void fuse_lib_opendir(fuse_req_t req, fuse_ino_t ino,
  2786. struct fuse_file_info *llfi)
  2787. {
  2788. struct fuse *f = req_fuse_prepare(req);
  2789. struct fuse_intr_data d;
  2790. struct fuse_dh *dh;
  2791. struct fuse_file_info fi;
  2792. char *path;
  2793. int err;
  2794. dh = (struct fuse_dh *) malloc(sizeof(struct fuse_dh));
  2795. if (dh == NULL) {
  2796. reply_err(req, -ENOMEM);
  2797. return;
  2798. }
  2799. memset(dh, 0, sizeof(struct fuse_dh));
  2800. dh->fuse = f;
  2801. dh->contents = NULL;
  2802. dh->len = 0;
  2803. dh->filled = 0;
  2804. dh->nodeid = ino;
  2805. fuse_mutex_init(&dh->lock);
  2806. llfi->fh = (uintptr_t) dh;
  2807. memset(&fi, 0, sizeof(fi));
  2808. fi.flags = llfi->flags;
  2809. err = get_path(f, ino, &path);
  2810. if (!err) {
  2811. fuse_prepare_interrupt(f, req, &d);
  2812. err = fuse_fs_opendir(f->fs, path, &fi);
  2813. fuse_finish_interrupt(f, req, &d);
  2814. dh->fh = fi.fh;
  2815. }
  2816. if (!err) {
  2817. if (fuse_reply_open(req, llfi) == -ENOENT) {
  2818. /* The opendir syscall was interrupted, so it
  2819. must be cancelled */
  2820. fuse_fs_releasedir(f->fs, path, &fi);
  2821. pthread_mutex_destroy(&dh->lock);
  2822. free(dh);
  2823. }
  2824. } else {
  2825. reply_err(req, err);
  2826. pthread_mutex_destroy(&dh->lock);
  2827. free(dh);
  2828. }
  2829. free_path(f, ino, path);
  2830. }
  2831. static int extend_contents(struct fuse_dh *dh, unsigned minsize)
  2832. {
  2833. if (minsize > dh->size) {
  2834. char *newptr;
  2835. unsigned newsize = dh->size;
  2836. if (!newsize)
  2837. newsize = 1024;
  2838. while (newsize < minsize) {
  2839. if (newsize >= 0x80000000)
  2840. newsize = 0xffffffff;
  2841. else
  2842. newsize *= 2;
  2843. }
  2844. newptr = (char *) realloc(dh->contents, newsize);
  2845. if (!newptr) {
  2846. dh->error = -ENOMEM;
  2847. return -1;
  2848. }
  2849. dh->contents = newptr;
  2850. dh->size = newsize;
  2851. }
  2852. return 0;
  2853. }
  2854. static int fill_dir(void *dh_, const char *name, const struct stat *statp,
  2855. off_t off)
  2856. {
  2857. struct fuse_dh *dh = (struct fuse_dh *) dh_;
  2858. struct stat stbuf;
  2859. size_t newlen;
  2860. if (statp)
  2861. stbuf = *statp;
  2862. else {
  2863. memset(&stbuf, 0, sizeof(stbuf));
  2864. stbuf.st_ino = FUSE_UNKNOWN_INO;
  2865. }
  2866. if (!dh->fuse->conf.use_ino) {
  2867. stbuf.st_ino = FUSE_UNKNOWN_INO;
  2868. if (dh->fuse->conf.readdir_ino) {
  2869. struct node *node;
  2870. pthread_mutex_lock(&dh->fuse->lock);
  2871. node = lookup_node(dh->fuse, dh->nodeid, name);
  2872. if (node)
  2873. stbuf.st_ino = (ino_t) node->nodeid;
  2874. pthread_mutex_unlock(&dh->fuse->lock);
  2875. }
  2876. }
  2877. if (off) {
  2878. if (extend_contents(dh, dh->needlen) == -1)
  2879. return 1;
  2880. dh->filled = 0;
  2881. newlen = dh->len +
  2882. fuse_add_direntry(dh->req, dh->contents + dh->len,
  2883. dh->needlen - dh->len, name,
  2884. &stbuf, off);
  2885. if (newlen > dh->needlen)
  2886. return 1;
  2887. } else {
  2888. newlen = dh->len +
  2889. fuse_add_direntry(dh->req, NULL, 0, name, NULL, 0);
  2890. if (extend_contents(dh, newlen) == -1)
  2891. return 1;
  2892. fuse_add_direntry(dh->req, dh->contents + dh->len,
  2893. dh->size - dh->len, name, &stbuf, newlen);
  2894. }
  2895. dh->len = newlen;
  2896. return 0;
  2897. }
  2898. static int readdir_fill(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  2899. size_t size, off_t off, struct fuse_dh *dh,
  2900. struct fuse_file_info *fi)
  2901. {
  2902. char *path;
  2903. int err;
  2904. if (f->fs->op.readdir)
  2905. err = get_path_nullok(f, ino, &path);
  2906. else
  2907. err = get_path(f, ino, &path);
  2908. if (!err) {
  2909. struct fuse_intr_data d;
  2910. dh->len = 0;
  2911. dh->error = 0;
  2912. dh->needlen = size;
  2913. dh->filled = 1;
  2914. dh->req = req;
  2915. fuse_prepare_interrupt(f, req, &d);
  2916. err = fuse_fs_readdir(f->fs, path, dh, fill_dir, off, fi);
  2917. fuse_finish_interrupt(f, req, &d);
  2918. dh->req = NULL;
  2919. if (!err)
  2920. err = dh->error;
  2921. if (err)
  2922. dh->filled = 0;
  2923. free_path(f, ino, path);
  2924. }
  2925. return err;
  2926. }
  2927. static void fuse_lib_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
  2928. off_t off, struct fuse_file_info *llfi)
  2929. {
  2930. struct fuse *f = req_fuse_prepare(req);
  2931. struct fuse_file_info fi;
  2932. struct fuse_dh *dh = get_dirhandle(llfi, &fi);
  2933. pthread_mutex_lock(&dh->lock);
  2934. /* According to SUS, directory contents need to be refreshed on
  2935. rewinddir() */
  2936. if (!off)
  2937. dh->filled = 0;
  2938. if (!dh->filled) {
  2939. int err = readdir_fill(f, req, ino, size, off, dh, &fi);
  2940. if (err) {
  2941. reply_err(req, err);
  2942. goto out;
  2943. }
  2944. }
  2945. if (dh->filled) {
  2946. if (off < dh->len) {
  2947. if (off + size > dh->len)
  2948. size = dh->len - off;
  2949. } else
  2950. size = 0;
  2951. } else {
  2952. size = dh->len;
  2953. off = 0;
  2954. }
  2955. fuse_reply_buf(req, dh->contents + off, size);
  2956. out:
  2957. pthread_mutex_unlock(&dh->lock);
  2958. }
  2959. static void fuse_lib_releasedir(fuse_req_t req, fuse_ino_t ino,
  2960. struct fuse_file_info *llfi)
  2961. {
  2962. struct fuse *f = req_fuse_prepare(req);
  2963. struct fuse_intr_data d;
  2964. struct fuse_file_info fi;
  2965. struct fuse_dh *dh = get_dirhandle(llfi, &fi);
  2966. char *path;
  2967. const char *compatpath;
  2968. get_path_nullok(f, ino, &path);
  2969. if (path != NULL || f->nullpath_ok || f->conf.nopath)
  2970. compatpath = path;
  2971. else
  2972. compatpath = "-";
  2973. fuse_prepare_interrupt(f, req, &d);
  2974. fuse_fs_releasedir(f->fs, compatpath, &fi);
  2975. fuse_finish_interrupt(f, req, &d);
  2976. free_path(f, ino, path);
  2977. pthread_mutex_lock(&dh->lock);
  2978. pthread_mutex_unlock(&dh->lock);
  2979. pthread_mutex_destroy(&dh->lock);
  2980. free(dh->contents);
  2981. free(dh);
  2982. reply_err(req, 0);
  2983. }
  2984. static void fuse_lib_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync,
  2985. struct fuse_file_info *llfi)
  2986. {
  2987. struct fuse *f = req_fuse_prepare(req);
  2988. struct fuse_file_info fi;
  2989. char *path;
  2990. int err;
  2991. get_dirhandle(llfi, &fi);
  2992. err = get_path_nullok(f, ino, &path);
  2993. if (!err) {
  2994. struct fuse_intr_data d;
  2995. fuse_prepare_interrupt(f, req, &d);
  2996. err = fuse_fs_fsyncdir(f->fs, path, datasync, &fi);
  2997. fuse_finish_interrupt(f, req, &d);
  2998. free_path(f, ino, path);
  2999. }
  3000. reply_err(req, err);
  3001. }
  3002. static void fuse_lib_statfs(fuse_req_t req, fuse_ino_t ino)
  3003. {
  3004. struct fuse *f = req_fuse_prepare(req);
  3005. struct statvfs buf;
  3006. char *path = NULL;
  3007. int err = 0;
  3008. memset(&buf, 0, sizeof(buf));
  3009. if (ino)
  3010. err = get_path(f, ino, &path);
  3011. if (!err) {
  3012. struct fuse_intr_data d;
  3013. fuse_prepare_interrupt(f, req, &d);
  3014. err = fuse_fs_statfs(f->fs, path ? path : "/", &buf);
  3015. fuse_finish_interrupt(f, req, &d);
  3016. free_path(f, ino, path);
  3017. }
  3018. if (!err)
  3019. fuse_reply_statfs(req, &buf);
  3020. else
  3021. reply_err(req, err);
  3022. }
  3023. static void fuse_lib_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  3024. const char *value, size_t size, int flags)
  3025. {
  3026. struct fuse *f = req_fuse_prepare(req);
  3027. char *path;
  3028. int err;
  3029. err = get_path(f, ino, &path);
  3030. if (!err) {
  3031. struct fuse_intr_data d;
  3032. fuse_prepare_interrupt(f, req, &d);
  3033. err = fuse_fs_setxattr(f->fs, path, name, value, size, flags);
  3034. fuse_finish_interrupt(f, req, &d);
  3035. free_path(f, ino, path);
  3036. }
  3037. reply_err(req, err);
  3038. }
  3039. static int common_getxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3040. const char *name, char *value, size_t size)
  3041. {
  3042. int err;
  3043. char *path;
  3044. err = get_path(f, ino, &path);
  3045. if (!err) {
  3046. struct fuse_intr_data d;
  3047. fuse_prepare_interrupt(f, req, &d);
  3048. err = fuse_fs_getxattr(f->fs, path, name, value, size);
  3049. fuse_finish_interrupt(f, req, &d);
  3050. free_path(f, ino, path);
  3051. }
  3052. return err;
  3053. }
  3054. static void fuse_lib_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  3055. size_t size)
  3056. {
  3057. struct fuse *f = req_fuse_prepare(req);
  3058. int res;
  3059. if (size) {
  3060. char *value = (char *) malloc(size);
  3061. if (value == NULL) {
  3062. reply_err(req, -ENOMEM);
  3063. return;
  3064. }
  3065. res = common_getxattr(f, req, ino, name, value, size);
  3066. if (res > 0)
  3067. fuse_reply_buf(req, value, res);
  3068. else
  3069. reply_err(req, res);
  3070. free(value);
  3071. } else {
  3072. res = common_getxattr(f, req, ino, name, NULL, 0);
  3073. if (res >= 0)
  3074. fuse_reply_xattr(req, res);
  3075. else
  3076. reply_err(req, res);
  3077. }
  3078. }
  3079. static int common_listxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3080. char *list, size_t size)
  3081. {
  3082. char *path;
  3083. int err;
  3084. err = get_path(f, ino, &path);
  3085. if (!err) {
  3086. struct fuse_intr_data d;
  3087. fuse_prepare_interrupt(f, req, &d);
  3088. err = fuse_fs_listxattr(f->fs, path, list, size);
  3089. fuse_finish_interrupt(f, req, &d);
  3090. free_path(f, ino, path);
  3091. }
  3092. return err;
  3093. }
  3094. static void fuse_lib_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
  3095. {
  3096. struct fuse *f = req_fuse_prepare(req);
  3097. int res;
  3098. if (size) {
  3099. char *list = (char *) malloc(size);
  3100. if (list == NULL) {
  3101. reply_err(req, -ENOMEM);
  3102. return;
  3103. }
  3104. res = common_listxattr(f, req, ino, list, size);
  3105. if (res > 0)
  3106. fuse_reply_buf(req, list, res);
  3107. else
  3108. reply_err(req, res);
  3109. free(list);
  3110. } else {
  3111. res = common_listxattr(f, req, ino, NULL, 0);
  3112. if (res >= 0)
  3113. fuse_reply_xattr(req, res);
  3114. else
  3115. reply_err(req, res);
  3116. }
  3117. }
  3118. static void fuse_lib_removexattr(fuse_req_t req, fuse_ino_t ino,
  3119. const char *name)
  3120. {
  3121. struct fuse *f = req_fuse_prepare(req);
  3122. char *path;
  3123. int err;
  3124. err = get_path(f, ino, &path);
  3125. if (!err) {
  3126. struct fuse_intr_data d;
  3127. fuse_prepare_interrupt(f, req, &d);
  3128. err = fuse_fs_removexattr(f->fs, path, name);
  3129. fuse_finish_interrupt(f, req, &d);
  3130. free_path(f, ino, path);
  3131. }
  3132. reply_err(req, err);
  3133. }
  3134. static struct lock *locks_conflict(struct node *node, const struct lock *lock)
  3135. {
  3136. struct lock *l;
  3137. for (l = node->locks; l; l = l->next)
  3138. if (l->owner != lock->owner &&
  3139. lock->start <= l->end && l->start <= lock->end &&
  3140. (l->type == F_WRLCK || lock->type == F_WRLCK))
  3141. break;
  3142. return l;
  3143. }
  3144. static void delete_lock(struct lock **lockp)
  3145. {
  3146. struct lock *l = *lockp;
  3147. *lockp = l->next;
  3148. free(l);
  3149. }
  3150. static void insert_lock(struct lock **pos, struct lock *lock)
  3151. {
  3152. lock->next = *pos;
  3153. *pos = lock;
  3154. }
  3155. static int locks_insert(struct node *node, struct lock *lock)
  3156. {
  3157. struct lock **lp;
  3158. struct lock *newl1 = NULL;
  3159. struct lock *newl2 = NULL;
  3160. if (lock->type != F_UNLCK || lock->start != 0 ||
  3161. lock->end != OFFSET_MAX) {
  3162. newl1 = malloc(sizeof(struct lock));
  3163. newl2 = malloc(sizeof(struct lock));
  3164. if (!newl1 || !newl2) {
  3165. free(newl1);
  3166. free(newl2);
  3167. return -ENOLCK;
  3168. }
  3169. }
  3170. for (lp = &node->locks; *lp;) {
  3171. struct lock *l = *lp;
  3172. if (l->owner != lock->owner)
  3173. goto skip;
  3174. if (lock->type == l->type) {
  3175. if (l->end < lock->start - 1)
  3176. goto skip;
  3177. if (lock->end < l->start - 1)
  3178. break;
  3179. if (l->start <= lock->start && lock->end <= l->end)
  3180. goto out;
  3181. if (l->start < lock->start)
  3182. lock->start = l->start;
  3183. if (lock->end < l->end)
  3184. lock->end = l->end;
  3185. goto delete;
  3186. } else {
  3187. if (l->end < lock->start)
  3188. goto skip;
  3189. if (lock->end < l->start)
  3190. break;
  3191. if (lock->start <= l->start && l->end <= lock->end)
  3192. goto delete;
  3193. if (l->end <= lock->end) {
  3194. l->end = lock->start - 1;
  3195. goto skip;
  3196. }
  3197. if (lock->start <= l->start) {
  3198. l->start = lock->end + 1;
  3199. break;
  3200. }
  3201. *newl2 = *l;
  3202. newl2->start = lock->end + 1;
  3203. l->end = lock->start - 1;
  3204. insert_lock(&l->next, newl2);
  3205. newl2 = NULL;
  3206. }
  3207. skip:
  3208. lp = &l->next;
  3209. continue;
  3210. delete:
  3211. delete_lock(lp);
  3212. }
  3213. if (lock->type != F_UNLCK) {
  3214. *newl1 = *lock;
  3215. insert_lock(lp, newl1);
  3216. newl1 = NULL;
  3217. }
  3218. out:
  3219. free(newl1);
  3220. free(newl2);
  3221. return 0;
  3222. }
  3223. static void flock_to_lock(struct flock *flock, struct lock *lock)
  3224. {
  3225. memset(lock, 0, sizeof(struct lock));
  3226. lock->type = flock->l_type;
  3227. lock->start = flock->l_start;
  3228. lock->end =
  3229. flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
  3230. lock->pid = flock->l_pid;
  3231. }
  3232. static void lock_to_flock(struct lock *lock, struct flock *flock)
  3233. {
  3234. flock->l_type = lock->type;
  3235. flock->l_start = lock->start;
  3236. flock->l_len =
  3237. (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
  3238. flock->l_pid = lock->pid;
  3239. }
  3240. static int fuse_flush_common(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3241. const char *path, struct fuse_file_info *fi)
  3242. {
  3243. struct fuse_intr_data d;
  3244. struct flock lock;
  3245. struct lock l;
  3246. int err;
  3247. int errlock;
  3248. fuse_prepare_interrupt(f, req, &d);
  3249. memset(&lock, 0, sizeof(lock));
  3250. lock.l_type = F_UNLCK;
  3251. lock.l_whence = SEEK_SET;
  3252. err = fuse_fs_flush(f->fs, path, fi);
  3253. errlock = fuse_fs_lock(f->fs, path, fi, F_SETLK, &lock);
  3254. fuse_finish_interrupt(f, req, &d);
  3255. if (errlock != -ENOSYS) {
  3256. flock_to_lock(&lock, &l);
  3257. l.owner = fi->lock_owner;
  3258. pthread_mutex_lock(&f->lock);
  3259. locks_insert(get_node(f, ino), &l);
  3260. pthread_mutex_unlock(&f->lock);
  3261. /* if op.lock() is defined FLUSH is needed regardless
  3262. of op.flush() */
  3263. if (err == -ENOSYS)
  3264. err = 0;
  3265. }
  3266. return err;
  3267. }
  3268. static void fuse_lib_release(fuse_req_t req, fuse_ino_t ino,
  3269. struct fuse_file_info *fi)
  3270. {
  3271. struct fuse *f = req_fuse_prepare(req);
  3272. struct fuse_intr_data d;
  3273. char *path;
  3274. int err = 0;
  3275. get_path_nullok(f, ino, &path);
  3276. if (fi->flush) {
  3277. err = fuse_flush_common(f, req, ino, path, fi);
  3278. if (err == -ENOSYS)
  3279. err = 0;
  3280. }
  3281. fuse_prepare_interrupt(f, req, &d);
  3282. fuse_do_release(f, ino, path, fi);
  3283. fuse_finish_interrupt(f, req, &d);
  3284. free_path(f, ino, path);
  3285. reply_err(req, err);
  3286. }
  3287. static void fuse_lib_flush(fuse_req_t req, fuse_ino_t ino,
  3288. struct fuse_file_info *fi)
  3289. {
  3290. struct fuse *f = req_fuse_prepare(req);
  3291. char *path;
  3292. int err;
  3293. get_path_nullok(f, ino, &path);
  3294. err = fuse_flush_common(f, req, ino, path, fi);
  3295. free_path(f, ino, path);
  3296. reply_err(req, err);
  3297. }
  3298. static int fuse_lock_common(fuse_req_t req, fuse_ino_t ino,
  3299. struct fuse_file_info *fi, struct flock *lock,
  3300. int cmd)
  3301. {
  3302. struct fuse *f = req_fuse_prepare(req);
  3303. char *path;
  3304. int err;
  3305. err = get_path_nullok(f, ino, &path);
  3306. if (!err) {
  3307. struct fuse_intr_data d;
  3308. fuse_prepare_interrupt(f, req, &d);
  3309. err = fuse_fs_lock(f->fs, path, fi, cmd, lock);
  3310. fuse_finish_interrupt(f, req, &d);
  3311. free_path(f, ino, path);
  3312. }
  3313. return err;
  3314. }
  3315. static void fuse_lib_getlk(fuse_req_t req, fuse_ino_t ino,
  3316. struct fuse_file_info *fi, struct flock *lock)
  3317. {
  3318. int err;
  3319. struct lock l;
  3320. struct lock *conflict;
  3321. struct fuse *f = req_fuse(req);
  3322. flock_to_lock(lock, &l);
  3323. l.owner = fi->lock_owner;
  3324. pthread_mutex_lock(&f->lock);
  3325. conflict = locks_conflict(get_node(f, ino), &l);
  3326. if (conflict)
  3327. lock_to_flock(conflict, lock);
  3328. pthread_mutex_unlock(&f->lock);
  3329. if (!conflict)
  3330. err = fuse_lock_common(req, ino, fi, lock, F_GETLK);
  3331. else
  3332. err = 0;
  3333. if (!err)
  3334. fuse_reply_lock(req, lock);
  3335. else
  3336. reply_err(req, err);
  3337. }
  3338. static void fuse_lib_setlk(fuse_req_t req, fuse_ino_t ino,
  3339. struct fuse_file_info *fi, struct flock *lock,
  3340. int sleep)
  3341. {
  3342. int err = fuse_lock_common(req, ino, fi, lock,
  3343. sleep ? F_SETLKW : F_SETLK);
  3344. if (!err) {
  3345. struct fuse *f = req_fuse(req);
  3346. struct lock l;
  3347. flock_to_lock(lock, &l);
  3348. l.owner = fi->lock_owner;
  3349. pthread_mutex_lock(&f->lock);
  3350. locks_insert(get_node(f, ino), &l);
  3351. pthread_mutex_unlock(&f->lock);
  3352. }
  3353. reply_err(req, err);
  3354. }
  3355. static void fuse_lib_flock(fuse_req_t req, fuse_ino_t ino,
  3356. struct fuse_file_info *fi, int op)
  3357. {
  3358. struct fuse *f = req_fuse_prepare(req);
  3359. char *path;
  3360. int err;
  3361. err = get_path_nullok(f, ino, &path);
  3362. if (err == 0) {
  3363. struct fuse_intr_data d;
  3364. fuse_prepare_interrupt(f, req, &d);
  3365. err = fuse_fs_flock(f->fs, path, fi, op);
  3366. fuse_finish_interrupt(f, req, &d);
  3367. free_path(f, ino, path);
  3368. }
  3369. reply_err(req, err);
  3370. }
  3371. static void fuse_lib_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
  3372. uint64_t idx)
  3373. {
  3374. struct fuse *f = req_fuse_prepare(req);
  3375. struct fuse_intr_data d;
  3376. char *path;
  3377. int err;
  3378. err = get_path(f, ino, &path);
  3379. if (!err) {
  3380. fuse_prepare_interrupt(f, req, &d);
  3381. err = fuse_fs_bmap(f->fs, path, blocksize, &idx);
  3382. fuse_finish_interrupt(f, req, &d);
  3383. free_path(f, ino, path);
  3384. }
  3385. if (!err)
  3386. fuse_reply_bmap(req, idx);
  3387. else
  3388. reply_err(req, err);
  3389. }
  3390. static void fuse_lib_ioctl(fuse_req_t req, fuse_ino_t ino, int cmd, void *arg,
  3391. struct fuse_file_info *llfi, unsigned int flags,
  3392. const void *in_buf, uint32_t in_bufsz,
  3393. uint32_t out_bufsz_)
  3394. {
  3395. struct fuse *f = req_fuse_prepare(req);
  3396. struct fuse_intr_data d;
  3397. struct fuse_file_info fi;
  3398. char *path, *out_buf = NULL;
  3399. int err;
  3400. uint32_t out_bufsz = out_bufsz_;
  3401. err = -EPERM;
  3402. if (flags & FUSE_IOCTL_UNRESTRICTED)
  3403. goto err;
  3404. if (flags & FUSE_IOCTL_DIR)
  3405. get_dirhandle(llfi, &fi);
  3406. else
  3407. fi = *llfi;
  3408. if (out_bufsz) {
  3409. err = -ENOMEM;
  3410. out_buf = malloc(out_bufsz);
  3411. if (!out_buf)
  3412. goto err;
  3413. }
  3414. assert(!in_bufsz || !out_bufsz || in_bufsz == out_bufsz);
  3415. if (out_buf)
  3416. memcpy(out_buf, in_buf, in_bufsz);
  3417. err = get_path_nullok(f, ino, &path);
  3418. if (err)
  3419. goto err;
  3420. fuse_prepare_interrupt(f, req, &d);
  3421. err = fuse_fs_ioctl(f->fs, path, cmd, arg, &fi, flags,
  3422. out_buf ?: (void *)in_buf, &out_bufsz);
  3423. fuse_finish_interrupt(f, req, &d);
  3424. free_path(f, ino, path);
  3425. fuse_reply_ioctl(req, err, out_buf, out_bufsz);
  3426. goto out;
  3427. err:
  3428. reply_err(req, err);
  3429. out:
  3430. free(out_buf);
  3431. }
  3432. static void fuse_lib_poll(fuse_req_t req, fuse_ino_t ino,
  3433. struct fuse_file_info *fi, struct fuse_pollhandle *ph)
  3434. {
  3435. struct fuse *f = req_fuse_prepare(req);
  3436. struct fuse_intr_data d;
  3437. char *path;
  3438. int err;
  3439. unsigned revents = 0;
  3440. err = get_path_nullok(f, ino, &path);
  3441. if (!err) {
  3442. fuse_prepare_interrupt(f, req, &d);
  3443. err = fuse_fs_poll(f->fs, path, fi, ph, &revents);
  3444. fuse_finish_interrupt(f, req, &d);
  3445. free_path(f, ino, path);
  3446. }
  3447. if (!err)
  3448. fuse_reply_poll(req, revents);
  3449. else
  3450. reply_err(req, err);
  3451. }
  3452. static void fuse_lib_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
  3453. off_t offset, off_t length, struct fuse_file_info *fi)
  3454. {
  3455. struct fuse *f = req_fuse_prepare(req);
  3456. struct fuse_intr_data d;
  3457. char *path;
  3458. int err;
  3459. err = get_path_nullok(f, ino, &path);
  3460. if (!err) {
  3461. fuse_prepare_interrupt(f, req, &d);
  3462. err = fuse_fs_fallocate(f->fs, path, mode, offset, length, fi);
  3463. fuse_finish_interrupt(f, req, &d);
  3464. free_path(f, ino, path);
  3465. }
  3466. reply_err(req, err);
  3467. }
  3468. static int clean_delay(struct fuse *f)
  3469. {
  3470. /*
  3471. * This is calculating the delay between clean runs. To
  3472. * reduce the number of cleans we are doing them 10 times
  3473. * within the remember window.
  3474. */
  3475. int min_sleep = 60;
  3476. int max_sleep = 3600;
  3477. int sleep_time = f->conf.remember / 10;
  3478. if (sleep_time > max_sleep)
  3479. return max_sleep;
  3480. if (sleep_time < min_sleep)
  3481. return min_sleep;
  3482. return sleep_time;
  3483. }
  3484. int fuse_clean_cache(struct fuse *f)
  3485. {
  3486. struct node_lru *lnode;
  3487. struct list_head *curr, *next;
  3488. struct node *node;
  3489. struct timespec now;
  3490. pthread_mutex_lock(&f->lock);
  3491. curr_time(&now);
  3492. for (curr = f->lru_table.next; curr != &f->lru_table; curr = next) {
  3493. double age;
  3494. next = curr->next;
  3495. lnode = list_entry(curr, struct node_lru, lru);
  3496. node = &lnode->node;
  3497. age = diff_timespec(&now, &lnode->forget_time);
  3498. if (age <= f->conf.remember)
  3499. break;
  3500. assert(node->nlookup == 1);
  3501. /* Don't forget active directories */
  3502. if (node->refctr > 1)
  3503. continue;
  3504. node->nlookup = 0;
  3505. unhash_name(f, node);
  3506. unref_node(f, node);
  3507. }
  3508. pthread_mutex_unlock(&f->lock);
  3509. return clean_delay(f);
  3510. }
  3511. static struct fuse_lowlevel_ops fuse_path_ops = {
  3512. .init = fuse_lib_init,
  3513. .destroy = fuse_lib_destroy,
  3514. .lookup = fuse_lib_lookup,
  3515. .forget = fuse_lib_forget,
  3516. .forget_multi = fuse_lib_forget_multi,
  3517. .getattr = fuse_lib_getattr,
  3518. .setattr = fuse_lib_setattr,
  3519. .access = fuse_lib_access,
  3520. .readlink = fuse_lib_readlink,
  3521. .mknod = fuse_lib_mknod,
  3522. .mkdir = fuse_lib_mkdir,
  3523. .unlink = fuse_lib_unlink,
  3524. .rmdir = fuse_lib_rmdir,
  3525. .symlink = fuse_lib_symlink,
  3526. .rename = fuse_lib_rename,
  3527. .link = fuse_lib_link,
  3528. .create = fuse_lib_create,
  3529. .open = fuse_lib_open,
  3530. .read = fuse_lib_read,
  3531. .write_buf = fuse_lib_write_buf,
  3532. .flush = fuse_lib_flush,
  3533. .release = fuse_lib_release,
  3534. .fsync = fuse_lib_fsync,
  3535. .opendir = fuse_lib_opendir,
  3536. .readdir = fuse_lib_readdir,
  3537. .releasedir = fuse_lib_releasedir,
  3538. .fsyncdir = fuse_lib_fsyncdir,
  3539. .statfs = fuse_lib_statfs,
  3540. .setxattr = fuse_lib_setxattr,
  3541. .getxattr = fuse_lib_getxattr,
  3542. .listxattr = fuse_lib_listxattr,
  3543. .removexattr = fuse_lib_removexattr,
  3544. .getlk = fuse_lib_getlk,
  3545. .setlk = fuse_lib_setlk,
  3546. .flock = fuse_lib_flock,
  3547. .bmap = fuse_lib_bmap,
  3548. .ioctl = fuse_lib_ioctl,
  3549. .poll = fuse_lib_poll,
  3550. .fallocate = fuse_lib_fallocate,
  3551. };
  3552. int fuse_notify_poll(struct fuse_pollhandle *ph)
  3553. {
  3554. return fuse_lowlevel_notify_poll(ph);
  3555. }
  3556. static void free_cmd(struct fuse_cmd *cmd)
  3557. {
  3558. free(cmd->buf);
  3559. free(cmd);
  3560. }
  3561. void fuse_process_cmd(struct fuse *f, struct fuse_cmd *cmd)
  3562. {
  3563. fuse_session_process(f->se, cmd->buf, cmd->buflen, cmd->ch);
  3564. free_cmd(cmd);
  3565. }
  3566. int fuse_exited(struct fuse *f)
  3567. {
  3568. return fuse_session_exited(f->se);
  3569. }
  3570. struct fuse_session *fuse_get_session(struct fuse *f)
  3571. {
  3572. return f->se;
  3573. }
  3574. static struct fuse_cmd *fuse_alloc_cmd(size_t bufsize)
  3575. {
  3576. struct fuse_cmd *cmd = (struct fuse_cmd *) malloc(sizeof(*cmd));
  3577. if (cmd == NULL) {
  3578. fprintf(stderr, "fuse: failed to allocate cmd\n");
  3579. return NULL;
  3580. }
  3581. cmd->buf = (char *) malloc(bufsize);
  3582. if (cmd->buf == NULL) {
  3583. fprintf(stderr, "fuse: failed to allocate read buffer\n");
  3584. free(cmd);
  3585. return NULL;
  3586. }
  3587. return cmd;
  3588. }
  3589. struct fuse_cmd *fuse_read_cmd(struct fuse *f)
  3590. {
  3591. struct fuse_chan *ch = fuse_session_next_chan(f->se, NULL);
  3592. size_t bufsize = fuse_chan_bufsize(ch);
  3593. struct fuse_cmd *cmd = fuse_alloc_cmd(bufsize);
  3594. if (cmd != NULL) {
  3595. int res = fuse_chan_recv(&ch, cmd->buf, bufsize);
  3596. if (res <= 0) {
  3597. free_cmd(cmd);
  3598. if (res < 0 && res != -EINTR && res != -EAGAIN)
  3599. fuse_exit(f);
  3600. return NULL;
  3601. }
  3602. cmd->buflen = res;
  3603. cmd->ch = ch;
  3604. }
  3605. return cmd;
  3606. }
  3607. static int fuse_session_loop_remember(struct fuse *f)
  3608. {
  3609. struct fuse_session *se = f->se;
  3610. int res = 0;
  3611. struct timespec now;
  3612. time_t next_clean;
  3613. struct fuse_chan *ch = fuse_session_next_chan(se, NULL);
  3614. size_t bufsize = fuse_chan_bufsize(ch);
  3615. char *buf = (char *) malloc(bufsize);
  3616. struct pollfd fds = {
  3617. .fd = fuse_chan_fd(ch),
  3618. .events = POLLIN
  3619. };
  3620. if (!buf) {
  3621. fprintf(stderr, "fuse: failed to allocate read buffer\n");
  3622. return -1;
  3623. }
  3624. curr_time(&now);
  3625. next_clean = now.tv_sec;
  3626. while (!fuse_session_exited(se)) {
  3627. struct fuse_chan *tmpch = ch;
  3628. struct fuse_buf fbuf = {
  3629. .mem = buf,
  3630. .size = bufsize,
  3631. };
  3632. unsigned timeout;
  3633. curr_time(&now);
  3634. if (now.tv_sec < next_clean)
  3635. timeout = next_clean - now.tv_sec;
  3636. else
  3637. timeout = 0;
  3638. res = poll(&fds, 1, timeout * 1000);
  3639. if (res == -1) {
  3640. if (errno == -EINTR)
  3641. continue;
  3642. else
  3643. break;
  3644. } else if (res > 0) {
  3645. res = fuse_session_receive_buf(se, &fbuf, &tmpch);
  3646. if (res == -EINTR)
  3647. continue;
  3648. if (res <= 0)
  3649. break;
  3650. fuse_session_process_buf(se, &fbuf, tmpch);
  3651. } else {
  3652. timeout = fuse_clean_cache(f);
  3653. curr_time(&now);
  3654. next_clean = now.tv_sec + timeout;
  3655. }
  3656. }
  3657. free(buf);
  3658. fuse_session_reset(se);
  3659. return res < 0 ? -1 : 0;
  3660. }
  3661. int fuse_loop(struct fuse *f)
  3662. {
  3663. if (!f)
  3664. return -1;
  3665. if (lru_enabled(f))
  3666. return fuse_session_loop_remember(f);
  3667. return fuse_session_loop(f->se);
  3668. }
  3669. int fuse_invalidate(struct fuse *f, const char *path)
  3670. {
  3671. (void) f;
  3672. (void) path;
  3673. return -EINVAL;
  3674. }
  3675. void fuse_exit(struct fuse *f)
  3676. {
  3677. fuse_session_exit(f->se);
  3678. }
  3679. struct fuse_context *fuse_get_context(void)
  3680. {
  3681. return &fuse_get_context_internal()->ctx;
  3682. }
  3683. /*
  3684. * The size of fuse_context got extended, so need to be careful about
  3685. * incompatibility (i.e. a new binary cannot work with an old
  3686. * library).
  3687. */
  3688. struct fuse_context *fuse_get_context_compat22(void);
  3689. struct fuse_context *fuse_get_context_compat22(void)
  3690. {
  3691. return &fuse_get_context_internal()->ctx;
  3692. }
  3693. FUSE_SYMVER(".symver fuse_get_context_compat22,fuse_get_context@FUSE_2.2");
  3694. int fuse_getgroups(int size, gid_t list[])
  3695. {
  3696. fuse_req_t req = fuse_get_context_internal()->req;
  3697. return fuse_req_getgroups(req, size, list);
  3698. }
  3699. int fuse_interrupted(void)
  3700. {
  3701. return fuse_req_interrupted(fuse_get_context_internal()->req);
  3702. }
  3703. void fuse_set_getcontext_func(struct fuse_context *(*func)(void))
  3704. {
  3705. (void) func;
  3706. /* no-op */
  3707. }
  3708. enum {
  3709. KEY_HELP,
  3710. };
  3711. #define FUSE_LIB_OPT(t, p, v) { t, offsetof(struct fuse_config, p), v }
  3712. static const struct fuse_opt fuse_lib_opts[] = {
  3713. FUSE_OPT_KEY("-h", KEY_HELP),
  3714. FUSE_OPT_KEY("--help", KEY_HELP),
  3715. FUSE_OPT_KEY("debug", FUSE_OPT_KEY_KEEP),
  3716. FUSE_OPT_KEY("-d", FUSE_OPT_KEY_KEEP),
  3717. FUSE_LIB_OPT("debug", debug, 1),
  3718. FUSE_LIB_OPT("-d", debug, 1),
  3719. FUSE_LIB_OPT("hard_remove", hard_remove, 1),
  3720. FUSE_LIB_OPT("use_ino", use_ino, 1),
  3721. FUSE_LIB_OPT("readdir_ino", readdir_ino, 1),
  3722. FUSE_LIB_OPT("kernel_cache", kernel_cache, 1),
  3723. FUSE_LIB_OPT("auto_cache", auto_cache, 1),
  3724. FUSE_LIB_OPT("noauto_cache", auto_cache, 0),
  3725. FUSE_LIB_OPT("umask=", set_mode, 1),
  3726. FUSE_LIB_OPT("umask=%o", umask, 0),
  3727. FUSE_LIB_OPT("uid=", set_uid, 1),
  3728. FUSE_LIB_OPT("uid=%d", uid, 0),
  3729. FUSE_LIB_OPT("gid=", set_gid, 1),
  3730. FUSE_LIB_OPT("gid=%d", gid, 0),
  3731. FUSE_LIB_OPT("entry_timeout=%lf", entry_timeout, 0),
  3732. FUSE_LIB_OPT("attr_timeout=%lf", attr_timeout, 0),
  3733. FUSE_LIB_OPT("ac_attr_timeout=%lf", ac_attr_timeout, 0),
  3734. FUSE_LIB_OPT("ac_attr_timeout=", ac_attr_timeout_set, 1),
  3735. FUSE_LIB_OPT("negative_timeout=%lf", negative_timeout, 0),
  3736. FUSE_LIB_OPT("noforget", remember, -1),
  3737. FUSE_LIB_OPT("remember=%u", remember, 0),
  3738. FUSE_LIB_OPT("nopath", nopath, 1),
  3739. FUSE_LIB_OPT("intr", intr, 1),
  3740. FUSE_LIB_OPT("intr_signal=%d", intr_signal, 0),
  3741. FUSE_LIB_OPT("threads=%d", threads, 0),
  3742. FUSE_OPT_END
  3743. };
  3744. static void fuse_lib_help(void)
  3745. {
  3746. fprintf(stderr,
  3747. " -o use_ino let filesystem set inode numbers\n"
  3748. " -o readdir_ino try to fill in d_ino in readdir\n"
  3749. " -o kernel_cache cache files in kernel\n"
  3750. " -o [no]auto_cache enable caching based on modification times (off)\n"
  3751. " -o umask=M set file permissions (octal)\n"
  3752. " -o uid=N set file owner\n"
  3753. " -o gid=N set file group\n"
  3754. " -o entry_timeout=T cache timeout for names (1.0s)\n"
  3755. " -o negative_timeout=T cache timeout for deleted names (0.0s)\n"
  3756. " -o attr_timeout=T cache timeout for attributes (1.0s)\n"
  3757. " -o ac_attr_timeout=T auto cache timeout for attributes (attr_timeout)\n"
  3758. " -o noforget never forget cached inodes\n"
  3759. " -o remember=T remember cached inodes for T seconds (0s)\n"
  3760. " -o nopath don't supply path if not necessary\n"
  3761. " -o intr allow requests to be interrupted\n"
  3762. " -o intr_signal=NUM signal to send on interrupt (%i)\n"
  3763. " -o threads=NUM number of worker threads. 0 = autodetect.\n"
  3764. " Negative values autodetect then divide by\n"
  3765. " absolute value. default = 0\n"
  3766. "\n", FUSE_DEFAULT_INTR_SIGNAL);
  3767. }
  3768. static int fuse_lib_opt_proc(void *data, const char *arg, int key,
  3769. struct fuse_args *outargs)
  3770. {
  3771. (void) arg; (void) outargs;
  3772. if (key == KEY_HELP) {
  3773. struct fuse_config *conf = (struct fuse_config *) data;
  3774. fuse_lib_help();
  3775. conf->help = 1;
  3776. }
  3777. return 1;
  3778. }
  3779. int fuse_is_lib_option(const char *opt)
  3780. {
  3781. return fuse_lowlevel_is_lib_option(opt) ||
  3782. fuse_opt_match(fuse_lib_opts, opt);
  3783. }
  3784. static int fuse_init_intr_signal(int signum, int *installed)
  3785. {
  3786. struct sigaction old_sa;
  3787. if (sigaction(signum, NULL, &old_sa) == -1) {
  3788. perror("fuse: cannot get old signal handler");
  3789. return -1;
  3790. }
  3791. if (old_sa.sa_handler == SIG_DFL) {
  3792. struct sigaction sa;
  3793. memset(&sa, 0, sizeof(struct sigaction));
  3794. sa.sa_handler = fuse_intr_sighandler;
  3795. sigemptyset(&sa.sa_mask);
  3796. if (sigaction(signum, &sa, NULL) == -1) {
  3797. perror("fuse: cannot set interrupt signal handler");
  3798. return -1;
  3799. }
  3800. *installed = 1;
  3801. }
  3802. return 0;
  3803. }
  3804. static void fuse_restore_intr_signal(int signum)
  3805. {
  3806. struct sigaction sa;
  3807. memset(&sa, 0, sizeof(struct sigaction));
  3808. sa.sa_handler = SIG_DFL;
  3809. sigaction(signum, &sa, NULL);
  3810. }
  3811. struct fuse_fs *fuse_fs_new(const struct fuse_operations *op, size_t op_size,
  3812. void *user_data)
  3813. {
  3814. struct fuse_fs *fs;
  3815. if (sizeof(struct fuse_operations) < op_size) {
  3816. fprintf(stderr, "fuse: warning: library too old, some operations may not not work\n");
  3817. op_size = sizeof(struct fuse_operations);
  3818. }
  3819. fs = (struct fuse_fs *) calloc(1, sizeof(struct fuse_fs));
  3820. if (!fs) {
  3821. fprintf(stderr, "fuse: failed to allocate fuse_fs object\n");
  3822. return NULL;
  3823. }
  3824. fs->user_data = user_data;
  3825. if (op)
  3826. memcpy(&fs->op, op, op_size);
  3827. return fs;
  3828. }
  3829. static int node_table_init(struct node_table *t)
  3830. {
  3831. t->size = NODE_TABLE_MIN_SIZE;
  3832. t->array = (struct node **) calloc(1, sizeof(struct node *) * t->size);
  3833. if (t->array == NULL) {
  3834. fprintf(stderr, "fuse: memory allocation failed\n");
  3835. return -1;
  3836. }
  3837. t->use = 0;
  3838. t->split = 0;
  3839. return 0;
  3840. }
  3841. static void *fuse_prune_nodes(void *fuse)
  3842. {
  3843. struct fuse *f = fuse;
  3844. int sleep_time;
  3845. while(1) {
  3846. sleep_time = fuse_clean_cache(f);
  3847. sleep(sleep_time);
  3848. }
  3849. return NULL;
  3850. }
  3851. int fuse_start_cleanup_thread(struct fuse *f)
  3852. {
  3853. if (lru_enabled(f))
  3854. return fuse_start_thread(&f->prune_thread, fuse_prune_nodes, f);
  3855. return 0;
  3856. }
  3857. void fuse_stop_cleanup_thread(struct fuse *f)
  3858. {
  3859. if (lru_enabled(f)) {
  3860. pthread_mutex_lock(&f->lock);
  3861. pthread_cancel(f->prune_thread);
  3862. pthread_mutex_unlock(&f->lock);
  3863. pthread_join(f->prune_thread, NULL);
  3864. }
  3865. }
  3866. struct fuse *fuse_new_common(struct fuse_chan *ch, struct fuse_args *args,
  3867. const struct fuse_operations *op,
  3868. size_t op_size, void *user_data, int compat)
  3869. {
  3870. struct fuse *f;
  3871. struct node *root;
  3872. struct fuse_fs *fs;
  3873. struct fuse_lowlevel_ops llop = fuse_path_ops;
  3874. if (fuse_create_context_key() == -1)
  3875. goto out;
  3876. f = (struct fuse *) calloc(1, sizeof(struct fuse));
  3877. if (f == NULL) {
  3878. fprintf(stderr, "fuse: failed to allocate fuse object\n");
  3879. goto out_delete_context_key;
  3880. }
  3881. fs = fuse_fs_new(op, op_size, user_data);
  3882. if (!fs)
  3883. goto out_free;
  3884. fs->compat = compat;
  3885. f->fs = fs;
  3886. f->nullpath_ok = fs->op.flag_nullpath_ok;
  3887. f->conf.nopath = fs->op.flag_nopath;
  3888. f->utime_omit_ok = fs->op.flag_utime_omit_ok;
  3889. /* Oh f**k, this is ugly! */
  3890. if (!fs->op.lock) {
  3891. llop.getlk = NULL;
  3892. llop.setlk = NULL;
  3893. }
  3894. f->conf.entry_timeout = 1.0;
  3895. f->conf.attr_timeout = 1.0;
  3896. f->conf.negative_timeout = 0.0;
  3897. f->conf.intr_signal = FUSE_DEFAULT_INTR_SIGNAL;
  3898. f->pagesize = getpagesize();
  3899. init_list_head(&f->partial_slabs);
  3900. init_list_head(&f->full_slabs);
  3901. init_list_head(&f->lru_table);
  3902. if (fuse_opt_parse(args, &f->conf, fuse_lib_opts,
  3903. fuse_lib_opt_proc) == -1)
  3904. goto out_free_fs;
  3905. if (!f->conf.ac_attr_timeout_set)
  3906. f->conf.ac_attr_timeout = f->conf.attr_timeout;
  3907. #if defined(__FreeBSD__) || defined(__NetBSD__)
  3908. /*
  3909. * In FreeBSD, we always use these settings as inode numbers
  3910. * are needed to make getcwd(3) work.
  3911. */
  3912. f->conf.readdir_ino = 1;
  3913. #endif
  3914. if (compat && compat <= 25) {
  3915. if (fuse_sync_compat_args(args) == -1)
  3916. goto out_free_fs;
  3917. }
  3918. f->se = fuse_lowlevel_new_common(args, &llop, sizeof(llop), f);
  3919. if (f->se == NULL) {
  3920. goto out_free_fs;
  3921. }
  3922. fuse_session_add_chan(f->se, ch);
  3923. if (f->conf.debug) {
  3924. fprintf(stderr, "nullpath_ok: %i\n", f->nullpath_ok);
  3925. fprintf(stderr, "nopath: %i\n", f->conf.nopath);
  3926. fprintf(stderr, "utime_omit_ok: %i\n", f->utime_omit_ok);
  3927. }
  3928. /* Trace topmost layer by default */
  3929. f->fs->debug = f->conf.debug;
  3930. f->ctr = 0;
  3931. f->generation = 0;
  3932. if (node_table_init(&f->name_table) == -1)
  3933. goto out_free_session;
  3934. if (node_table_init(&f->id_table) == -1)
  3935. goto out_free_name_table;
  3936. fuse_mutex_init(&f->lock);
  3937. root = alloc_node(f);
  3938. if (root == NULL) {
  3939. fprintf(stderr, "fuse: memory allocation failed\n");
  3940. goto out_free_id_table;
  3941. }
  3942. if (lru_enabled(f)) {
  3943. struct node_lru *lnode = node_lru(root);
  3944. init_list_head(&lnode->lru);
  3945. }
  3946. strcpy(root->inline_name, "/");
  3947. root->name = root->inline_name;
  3948. if (f->conf.intr &&
  3949. fuse_init_intr_signal(f->conf.intr_signal,
  3950. &f->intr_installed) == -1)
  3951. goto out_free_root;
  3952. root->parent = NULL;
  3953. root->nodeid = FUSE_ROOT_ID;
  3954. inc_nlookup(root);
  3955. hash_id(f, root);
  3956. return f;
  3957. out_free_root:
  3958. free(root);
  3959. out_free_id_table:
  3960. free(f->id_table.array);
  3961. out_free_name_table:
  3962. free(f->name_table.array);
  3963. out_free_session:
  3964. fuse_session_destroy(f->se);
  3965. out_free_fs:
  3966. /* Horrible compatibility hack to stop the destructor from being
  3967. called on the filesystem without init being called first */
  3968. fs->op.destroy = NULL;
  3969. fuse_fs_destroy(f->fs);
  3970. out_free:
  3971. free(f);
  3972. out_delete_context_key:
  3973. fuse_delete_context_key();
  3974. out:
  3975. return NULL;
  3976. }
  3977. struct fuse *fuse_new(struct fuse_chan *ch, struct fuse_args *args,
  3978. const struct fuse_operations *op, size_t op_size,
  3979. void *user_data)
  3980. {
  3981. return fuse_new_common(ch, args, op, op_size, user_data, 0);
  3982. }
  3983. void fuse_destroy(struct fuse *f)
  3984. {
  3985. size_t i;
  3986. if (f->conf.intr && f->intr_installed)
  3987. fuse_restore_intr_signal(f->conf.intr_signal);
  3988. if (f->fs) {
  3989. struct fuse_context_i *c = fuse_get_context_internal();
  3990. memset(c, 0, sizeof(*c));
  3991. c->ctx.fuse = f;
  3992. for (i = 0; i < f->id_table.size; i++) {
  3993. struct node *node;
  3994. for (node = f->id_table.array[i]; node != NULL; node = node->id_next)
  3995. {
  3996. if (node->is_hidden)
  3997. fuse_fs_free_hide(f->fs,node->hidden_fh);
  3998. }
  3999. }
  4000. }
  4001. for (i = 0; i < f->id_table.size; i++) {
  4002. struct node *node;
  4003. struct node *next;
  4004. for (node = f->id_table.array[i]; node != NULL; node = next) {
  4005. next = node->id_next;
  4006. free_node(f, node);
  4007. f->id_table.use--;
  4008. }
  4009. }
  4010. assert(list_empty(&f->partial_slabs));
  4011. assert(list_empty(&f->full_slabs));
  4012. free(f->id_table.array);
  4013. free(f->name_table.array);
  4014. pthread_mutex_destroy(&f->lock);
  4015. fuse_session_destroy(f->se);
  4016. free(f);
  4017. fuse_delete_context_key();
  4018. }
  4019. static struct fuse *fuse_new_common_compat25(int fd, struct fuse_args *args,
  4020. const struct fuse_operations *op,
  4021. size_t op_size, int compat)
  4022. {
  4023. struct fuse *f = NULL;
  4024. struct fuse_chan *ch = fuse_kern_chan_new(fd);
  4025. if (ch)
  4026. f = fuse_new_common(ch, args, op, op_size, NULL, compat);
  4027. return f;
  4028. }
  4029. #if !defined(__FreeBSD__) && !defined(__NetBSD__)
  4030. static struct fuse *fuse_new_common_compat(int fd, const char *opts,
  4031. const struct fuse_operations *op,
  4032. size_t op_size, int compat)
  4033. {
  4034. struct fuse *f;
  4035. struct fuse_args args = FUSE_ARGS_INIT(0, NULL);
  4036. if (fuse_opt_add_arg(&args, "") == -1)
  4037. return NULL;
  4038. if (opts &&
  4039. (fuse_opt_add_arg(&args, "-o") == -1 ||
  4040. fuse_opt_add_arg(&args, opts) == -1)) {
  4041. fuse_opt_free_args(&args);
  4042. return NULL;
  4043. }
  4044. f = fuse_new_common_compat25(fd, &args, op, op_size, compat);
  4045. fuse_opt_free_args(&args);
  4046. return f;
  4047. }
  4048. struct fuse *fuse_new_compat22(int fd, const char *opts,
  4049. const struct fuse_operations_compat22 *op,
  4050. size_t op_size)
  4051. {
  4052. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4053. op_size, 22);
  4054. }
  4055. struct fuse *fuse_new_compat2(int fd, const char *opts,
  4056. const struct fuse_operations_compat2 *op)
  4057. {
  4058. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4059. sizeof(struct fuse_operations_compat2),
  4060. 21);
  4061. }
  4062. struct fuse *fuse_new_compat1(int fd, int flags,
  4063. const struct fuse_operations_compat1 *op)
  4064. {
  4065. const char *opts = NULL;
  4066. if (flags & FUSE_DEBUG_COMPAT1)
  4067. opts = "debug";
  4068. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4069. sizeof(struct fuse_operations_compat1),
  4070. 11);
  4071. }
  4072. FUSE_SYMVER(".symver fuse_exited,__fuse_exited@");
  4073. FUSE_SYMVER(".symver fuse_process_cmd,__fuse_process_cmd@");
  4074. FUSE_SYMVER(".symver fuse_read_cmd,__fuse_read_cmd@");
  4075. FUSE_SYMVER(".symver fuse_set_getcontext_func,__fuse_set_getcontext_func@");
  4076. FUSE_SYMVER(".symver fuse_new_compat2,fuse_new@");
  4077. FUSE_SYMVER(".symver fuse_new_compat22,fuse_new@FUSE_2.2");
  4078. #endif /* __FreeBSD__ || __NetBSD__ */
  4079. struct fuse *fuse_new_compat25(int fd, struct fuse_args *args,
  4080. const struct fuse_operations_compat25 *op,
  4081. size_t op_size)
  4082. {
  4083. return fuse_new_common_compat25(fd, args, (struct fuse_operations *) op,
  4084. op_size, 25);
  4085. }
  4086. FUSE_SYMVER(".symver fuse_new_compat25,fuse_new@FUSE_2.5");
  4087. int
  4088. fuse_config_num_threads(const struct fuse *fuse_)
  4089. {
  4090. return fuse_->conf.threads;
  4091. }
  4092. void
  4093. fuse_config_set_entry_timeout(struct fuse *fuse_,
  4094. const double entry_timeout_)
  4095. {
  4096. fuse_->conf.entry_timeout = entry_timeout_;
  4097. }
  4098. double
  4099. fuse_config_get_entry_timeout(const struct fuse *fuse_)
  4100. {
  4101. return fuse_->conf.entry_timeout;
  4102. }
  4103. void
  4104. fuse_config_set_negative_entry_timeout(struct fuse *fuse_,
  4105. const double entry_timeout_)
  4106. {
  4107. fuse_->conf.negative_timeout = entry_timeout_;
  4108. }
  4109. double
  4110. fuse_config_get_negative_entry_timeout(const struct fuse *fuse_)
  4111. {
  4112. return fuse_->conf.negative_timeout;
  4113. }
  4114. void
  4115. fuse_config_set_attr_timeout(struct fuse *fuse_,
  4116. const double attr_timeout_)
  4117. {
  4118. fuse_->conf.attr_timeout = attr_timeout_;
  4119. }
  4120. double
  4121. fuse_config_get_attr_timeout(const struct fuse *fuse_)
  4122. {
  4123. return fuse_->conf.attr_timeout;
  4124. }