You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4902 lines
110 KiB

  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. /* For pthread_rwlock_t */
  8. #define _GNU_SOURCE
  9. #include "config.h"
  10. #include "fuse_i.h"
  11. #include "fuse_lowlevel.h"
  12. #include "fuse_opt.h"
  13. #include "fuse_misc.h"
  14. #include "fuse_common_compat.h"
  15. #include "fuse_compat.h"
  16. #include "fuse_kernel.h"
  17. #include <stdio.h>
  18. #include <string.h>
  19. #include <stdlib.h>
  20. #include <stddef.h>
  21. #include <stdbool.h>
  22. #include <unistd.h>
  23. #include <time.h>
  24. #include <fcntl.h>
  25. #include <limits.h>
  26. #include <errno.h>
  27. #include <signal.h>
  28. #include <dlfcn.h>
  29. #include <assert.h>
  30. #include <poll.h>
  31. #include <sys/param.h>
  32. #include <sys/uio.h>
  33. #include <sys/time.h>
  34. #include <sys/mman.h>
  35. #include <sys/file.h>
  36. #define FUSE_NODE_SLAB 1
  37. #ifndef MAP_ANONYMOUS
  38. #undef FUSE_NODE_SLAB
  39. #endif
  40. #define FUSE_DEFAULT_INTR_SIGNAL SIGUSR1
  41. #define FUSE_UNKNOWN_INO 0xffffffff
  42. #define OFFSET_MAX 0x7fffffffffffffffLL
  43. #define NODE_TABLE_MIN_SIZE 8192
  44. struct fuse_config {
  45. unsigned int uid;
  46. unsigned int gid;
  47. unsigned int umask;
  48. double entry_timeout;
  49. double negative_timeout;
  50. double attr_timeout;
  51. double ac_attr_timeout;
  52. int ac_attr_timeout_set;
  53. int remember;
  54. int nopath;
  55. int debug;
  56. int hard_remove;
  57. int use_ino;
  58. int readdir_ino;
  59. int set_mode;
  60. int set_uid;
  61. int set_gid;
  62. int direct_io;
  63. int kernel_cache;
  64. int auto_cache;
  65. int intr;
  66. int intr_signal;
  67. int help;
  68. char *modules;
  69. };
  70. struct fuse_fs {
  71. struct fuse_operations op;
  72. struct fuse_module *m;
  73. void *user_data;
  74. int compat;
  75. int debug;
  76. };
  77. struct fusemod_so {
  78. void *handle;
  79. int ctr;
  80. };
  81. struct lock_queue_element {
  82. struct lock_queue_element *next;
  83. pthread_cond_t cond;
  84. fuse_ino_t nodeid1;
  85. const char *name1;
  86. char **path1;
  87. struct node **wnode1;
  88. fuse_ino_t nodeid2;
  89. const char *name2;
  90. char **path2;
  91. struct node **wnode2;
  92. int err;
  93. bool first_locked : 1;
  94. bool second_locked : 1;
  95. bool done : 1;
  96. };
  97. struct node_table {
  98. struct node **array;
  99. size_t use;
  100. size_t size;
  101. size_t split;
  102. };
  103. #define container_of(ptr, type, member) ({ \
  104. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  105. (type *)( (char *)__mptr - offsetof(type,member) );})
  106. #define list_entry(ptr, type, member) \
  107. container_of(ptr, type, member)
  108. struct list_head {
  109. struct list_head *next;
  110. struct list_head *prev;
  111. };
  112. struct node_slab {
  113. struct list_head list; /* must be the first member */
  114. struct list_head freelist;
  115. int used;
  116. };
  117. struct fuse {
  118. struct fuse_session *se;
  119. struct node_table name_table;
  120. struct node_table id_table;
  121. struct list_head lru_table;
  122. fuse_ino_t ctr;
  123. unsigned int generation;
  124. unsigned int hidectr;
  125. pthread_mutex_t lock;
  126. struct fuse_config conf;
  127. int intr_installed;
  128. struct fuse_fs *fs;
  129. int nullpath_ok;
  130. int utime_omit_ok;
  131. struct lock_queue_element *lockq;
  132. int pagesize;
  133. struct list_head partial_slabs;
  134. struct list_head full_slabs;
  135. pthread_t prune_thread;
  136. };
  137. struct lock {
  138. int type;
  139. off_t start;
  140. off_t end;
  141. pid_t pid;
  142. uint64_t owner;
  143. struct lock *next;
  144. };
  145. struct node {
  146. struct node *name_next;
  147. struct node *id_next;
  148. fuse_ino_t nodeid;
  149. unsigned int generation;
  150. int refctr;
  151. struct node *parent;
  152. char *name;
  153. uint64_t nlookup;
  154. int open_count;
  155. struct timespec stat_updated;
  156. struct timespec mtime;
  157. off_t size;
  158. struct lock *locks;
  159. unsigned int is_hidden : 1;
  160. unsigned int cache_valid : 1;
  161. int treelock;
  162. char inline_name[32];
  163. };
  164. #define TREELOCK_WRITE -1
  165. #define TREELOCK_WAIT_OFFSET INT_MIN
  166. struct node_lru {
  167. struct node node;
  168. struct list_head lru;
  169. struct timespec forget_time;
  170. };
  171. struct fuse_dh {
  172. pthread_mutex_t lock;
  173. struct fuse *fuse;
  174. fuse_req_t req;
  175. char *contents;
  176. int allocated;
  177. unsigned len;
  178. unsigned size;
  179. unsigned needlen;
  180. int filled;
  181. uint64_t fh;
  182. int error;
  183. fuse_ino_t nodeid;
  184. };
  185. /* old dir handle */
  186. struct fuse_dirhandle {
  187. fuse_fill_dir_t filler;
  188. void *buf;
  189. };
  190. struct fuse_context_i {
  191. struct fuse_context ctx;
  192. fuse_req_t req;
  193. };
  194. static pthread_key_t fuse_context_key;
  195. static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
  196. static int fuse_context_ref;
  197. static struct fusemod_so *fuse_current_so;
  198. static struct fuse_module *fuse_modules;
  199. static int fuse_load_so_name(const char *soname)
  200. {
  201. struct fusemod_so *so;
  202. so = calloc(1, sizeof(struct fusemod_so));
  203. if (!so) {
  204. fprintf(stderr, "fuse: memory allocation failed\n");
  205. return -1;
  206. }
  207. fuse_current_so = so;
  208. so->handle = dlopen(soname, RTLD_NOW);
  209. fuse_current_so = NULL;
  210. if (!so->handle) {
  211. fprintf(stderr, "fuse: %s\n", dlerror());
  212. goto err;
  213. }
  214. if (!so->ctr) {
  215. fprintf(stderr, "fuse: %s did not register any modules\n",
  216. soname);
  217. goto err;
  218. }
  219. return 0;
  220. err:
  221. if (so->handle)
  222. dlclose(so->handle);
  223. free(so);
  224. return -1;
  225. }
  226. static int fuse_load_so_module(const char *module)
  227. {
  228. int res;
  229. char *soname = malloc(strlen(module) + 64);
  230. if (!soname) {
  231. fprintf(stderr, "fuse: memory allocation failed\n");
  232. return -1;
  233. }
  234. sprintf(soname, "libfusemod_%s.so", module);
  235. res = fuse_load_so_name(soname);
  236. free(soname);
  237. return res;
  238. }
  239. static struct fuse_module *fuse_find_module(const char *module)
  240. {
  241. struct fuse_module *m;
  242. for (m = fuse_modules; m; m = m->next) {
  243. if (strcmp(module, m->name) == 0) {
  244. m->ctr++;
  245. break;
  246. }
  247. }
  248. return m;
  249. }
  250. static struct fuse_module *fuse_get_module(const char *module)
  251. {
  252. struct fuse_module *m;
  253. pthread_mutex_lock(&fuse_context_lock);
  254. m = fuse_find_module(module);
  255. if (!m) {
  256. int err = fuse_load_so_module(module);
  257. if (!err)
  258. m = fuse_find_module(module);
  259. }
  260. pthread_mutex_unlock(&fuse_context_lock);
  261. return m;
  262. }
  263. static void fuse_put_module(struct fuse_module *m)
  264. {
  265. pthread_mutex_lock(&fuse_context_lock);
  266. assert(m->ctr > 0);
  267. m->ctr--;
  268. if (!m->ctr && m->so) {
  269. struct fusemod_so *so = m->so;
  270. assert(so->ctr > 0);
  271. so->ctr--;
  272. if (!so->ctr) {
  273. struct fuse_module **mp;
  274. for (mp = &fuse_modules; *mp;) {
  275. if ((*mp)->so == so)
  276. *mp = (*mp)->next;
  277. else
  278. mp = &(*mp)->next;
  279. }
  280. dlclose(so->handle);
  281. free(so);
  282. }
  283. }
  284. pthread_mutex_unlock(&fuse_context_lock);
  285. }
  286. static void init_list_head(struct list_head *list)
  287. {
  288. list->next = list;
  289. list->prev = list;
  290. }
  291. static int list_empty(const struct list_head *head)
  292. {
  293. return head->next == head;
  294. }
  295. static void list_add(struct list_head *new, struct list_head *prev,
  296. struct list_head *next)
  297. {
  298. next->prev = new;
  299. new->next = next;
  300. new->prev = prev;
  301. prev->next = new;
  302. }
  303. static inline void list_add_head(struct list_head *new, struct list_head *head)
  304. {
  305. list_add(new, head, head->next);
  306. }
  307. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  308. {
  309. list_add(new, head->prev, head);
  310. }
  311. static inline void list_del(struct list_head *entry)
  312. {
  313. struct list_head *prev = entry->prev;
  314. struct list_head *next = entry->next;
  315. next->prev = prev;
  316. prev->next = next;
  317. }
  318. static inline int lru_enabled(struct fuse *f)
  319. {
  320. return f->conf.remember > 0;
  321. }
  322. static struct node_lru *node_lru(struct node *node)
  323. {
  324. return (struct node_lru *) node;
  325. }
  326. static size_t get_node_size(struct fuse *f)
  327. {
  328. if (lru_enabled(f))
  329. return sizeof(struct node_lru);
  330. else
  331. return sizeof(struct node);
  332. }
  333. #ifdef FUSE_NODE_SLAB
  334. static struct node_slab *list_to_slab(struct list_head *head)
  335. {
  336. return (struct node_slab *) head;
  337. }
  338. static struct node_slab *node_to_slab(struct fuse *f, struct node *node)
  339. {
  340. return (struct node_slab *) (((uintptr_t) node) & ~((uintptr_t) f->pagesize - 1));
  341. }
  342. static int alloc_slab(struct fuse *f)
  343. {
  344. void *mem;
  345. struct node_slab *slab;
  346. char *start;
  347. size_t num;
  348. size_t i;
  349. size_t node_size = get_node_size(f);
  350. mem = mmap(NULL, f->pagesize, PROT_READ | PROT_WRITE,
  351. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  352. if (mem == MAP_FAILED)
  353. return -1;
  354. slab = mem;
  355. init_list_head(&slab->freelist);
  356. slab->used = 0;
  357. num = (f->pagesize - sizeof(struct node_slab)) / node_size;
  358. start = (char *) mem + f->pagesize - num * node_size;
  359. for (i = 0; i < num; i++) {
  360. struct list_head *n;
  361. n = (struct list_head *) (start + i * node_size);
  362. list_add_tail(n, &slab->freelist);
  363. }
  364. list_add_tail(&slab->list, &f->partial_slabs);
  365. return 0;
  366. }
  367. static struct node *alloc_node(struct fuse *f)
  368. {
  369. struct node_slab *slab;
  370. struct list_head *node;
  371. if (list_empty(&f->partial_slabs)) {
  372. int res = alloc_slab(f);
  373. if (res != 0)
  374. return NULL;
  375. }
  376. slab = list_to_slab(f->partial_slabs.next);
  377. slab->used++;
  378. node = slab->freelist.next;
  379. list_del(node);
  380. if (list_empty(&slab->freelist)) {
  381. list_del(&slab->list);
  382. list_add_tail(&slab->list, &f->full_slabs);
  383. }
  384. memset(node, 0, sizeof(struct node));
  385. return (struct node *) node;
  386. }
  387. static void free_slab(struct fuse *f, struct node_slab *slab)
  388. {
  389. int res;
  390. list_del(&slab->list);
  391. res = munmap(slab, f->pagesize);
  392. if (res == -1)
  393. fprintf(stderr, "fuse warning: munmap(%p) failed\n", slab);
  394. }
  395. static void free_node_mem(struct fuse *f, struct node *node)
  396. {
  397. struct node_slab *slab = node_to_slab(f, node);
  398. struct list_head *n = (struct list_head *) node;
  399. slab->used--;
  400. if (slab->used) {
  401. if (list_empty(&slab->freelist)) {
  402. list_del(&slab->list);
  403. list_add_tail(&slab->list, &f->partial_slabs);
  404. }
  405. list_add_head(n, &slab->freelist);
  406. } else {
  407. free_slab(f, slab);
  408. }
  409. }
  410. #else
  411. static struct node *alloc_node(struct fuse *f)
  412. {
  413. return (struct node *) calloc(1, get_node_size(f));
  414. }
  415. static void free_node_mem(struct fuse *f, struct node *node)
  416. {
  417. (void) f;
  418. free(node);
  419. }
  420. #endif
  421. static size_t id_hash(struct fuse *f, fuse_ino_t ino)
  422. {
  423. uint64_t hash = ((uint32_t) ino * 2654435761U) % f->id_table.size;
  424. uint64_t oldhash = hash % (f->id_table.size / 2);
  425. if (oldhash >= f->id_table.split)
  426. return oldhash;
  427. else
  428. return hash;
  429. }
  430. static struct node *get_node_nocheck(struct fuse *f, fuse_ino_t nodeid)
  431. {
  432. size_t hash = id_hash(f, nodeid);
  433. struct node *node;
  434. for (node = f->id_table.array[hash]; node != NULL; node = node->id_next)
  435. if (node->nodeid == nodeid)
  436. return node;
  437. return NULL;
  438. }
  439. static struct node *get_node(struct fuse *f, fuse_ino_t nodeid)
  440. {
  441. struct node *node = get_node_nocheck(f, nodeid);
  442. if (!node) {
  443. fprintf(stderr, "fuse internal error: node %llu not found\n",
  444. (unsigned long long) nodeid);
  445. abort();
  446. }
  447. return node;
  448. }
  449. static void curr_time(struct timespec *now);
  450. static double diff_timespec(const struct timespec *t1,
  451. const struct timespec *t2);
  452. static void remove_node_lru(struct node *node)
  453. {
  454. struct node_lru *lnode = node_lru(node);
  455. list_del(&lnode->lru);
  456. init_list_head(&lnode->lru);
  457. }
  458. static void set_forget_time(struct fuse *f, struct node *node)
  459. {
  460. struct node_lru *lnode = node_lru(node);
  461. list_del(&lnode->lru);
  462. list_add_tail(&lnode->lru, &f->lru_table);
  463. curr_time(&lnode->forget_time);
  464. }
  465. static void free_node(struct fuse *f, struct node *node)
  466. {
  467. if (node->name != node->inline_name)
  468. free(node->name);
  469. free_node_mem(f, node);
  470. }
  471. static void node_table_reduce(struct node_table *t)
  472. {
  473. size_t newsize = t->size / 2;
  474. void *newarray;
  475. if (newsize < NODE_TABLE_MIN_SIZE)
  476. return;
  477. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  478. if (newarray != NULL)
  479. t->array = newarray;
  480. t->size = newsize;
  481. t->split = t->size / 2;
  482. }
  483. static void remerge_id(struct fuse *f)
  484. {
  485. struct node_table *t = &f->id_table;
  486. int iter;
  487. if (t->split == 0)
  488. node_table_reduce(t);
  489. for (iter = 8; t->split > 0 && iter; iter--) {
  490. struct node **upper;
  491. t->split--;
  492. upper = &t->array[t->split + t->size / 2];
  493. if (*upper) {
  494. struct node **nodep;
  495. for (nodep = &t->array[t->split]; *nodep;
  496. nodep = &(*nodep)->id_next);
  497. *nodep = *upper;
  498. *upper = NULL;
  499. break;
  500. }
  501. }
  502. }
  503. static void unhash_id(struct fuse *f, struct node *node)
  504. {
  505. struct node **nodep = &f->id_table.array[id_hash(f, node->nodeid)];
  506. for (; *nodep != NULL; nodep = &(*nodep)->id_next)
  507. if (*nodep == node) {
  508. *nodep = node->id_next;
  509. f->id_table.use--;
  510. if(f->id_table.use < f->id_table.size / 4)
  511. remerge_id(f);
  512. return;
  513. }
  514. }
  515. static int node_table_resize(struct node_table *t)
  516. {
  517. size_t newsize = t->size * 2;
  518. void *newarray;
  519. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  520. if (newarray == NULL)
  521. return -1;
  522. t->array = newarray;
  523. memset(t->array + t->size, 0, t->size * sizeof(struct node *));
  524. t->size = newsize;
  525. t->split = 0;
  526. return 0;
  527. }
  528. static void rehash_id(struct fuse *f)
  529. {
  530. struct node_table *t = &f->id_table;
  531. struct node **nodep;
  532. struct node **next;
  533. size_t hash;
  534. if (t->split == t->size / 2)
  535. return;
  536. hash = t->split;
  537. t->split++;
  538. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  539. struct node *node = *nodep;
  540. size_t newhash = id_hash(f, node->nodeid);
  541. if (newhash != hash) {
  542. next = nodep;
  543. *nodep = node->id_next;
  544. node->id_next = t->array[newhash];
  545. t->array[newhash] = node;
  546. } else {
  547. next = &node->id_next;
  548. }
  549. }
  550. if (t->split == t->size / 2)
  551. node_table_resize(t);
  552. }
  553. static void hash_id(struct fuse *f, struct node *node)
  554. {
  555. size_t hash = id_hash(f, node->nodeid);
  556. node->id_next = f->id_table.array[hash];
  557. f->id_table.array[hash] = node;
  558. f->id_table.use++;
  559. if (f->id_table.use >= f->id_table.size / 2)
  560. rehash_id(f);
  561. }
  562. static size_t name_hash(struct fuse *f, fuse_ino_t parent,
  563. const char *name)
  564. {
  565. uint64_t hash = parent;
  566. uint64_t oldhash;
  567. for (; *name; name++)
  568. hash = hash * 31 + (unsigned char) *name;
  569. hash %= f->name_table.size;
  570. oldhash = hash % (f->name_table.size / 2);
  571. if (oldhash >= f->name_table.split)
  572. return oldhash;
  573. else
  574. return hash;
  575. }
  576. static void unref_node(struct fuse *f, struct node *node);
  577. static void remerge_name(struct fuse *f)
  578. {
  579. struct node_table *t = &f->name_table;
  580. int iter;
  581. if (t->split == 0)
  582. node_table_reduce(t);
  583. for (iter = 8; t->split > 0 && iter; iter--) {
  584. struct node **upper;
  585. t->split--;
  586. upper = &t->array[t->split + t->size / 2];
  587. if (*upper) {
  588. struct node **nodep;
  589. for (nodep = &t->array[t->split]; *nodep;
  590. nodep = &(*nodep)->name_next);
  591. *nodep = *upper;
  592. *upper = NULL;
  593. break;
  594. }
  595. }
  596. }
  597. static void unhash_name(struct fuse *f, struct node *node)
  598. {
  599. if (node->name) {
  600. size_t hash = name_hash(f, node->parent->nodeid, node->name);
  601. struct node **nodep = &f->name_table.array[hash];
  602. for (; *nodep != NULL; nodep = &(*nodep)->name_next)
  603. if (*nodep == node) {
  604. *nodep = node->name_next;
  605. node->name_next = NULL;
  606. unref_node(f, node->parent);
  607. if (node->name != node->inline_name)
  608. free(node->name);
  609. node->name = NULL;
  610. node->parent = NULL;
  611. f->name_table.use--;
  612. if (f->name_table.use < f->name_table.size / 4)
  613. remerge_name(f);
  614. return;
  615. }
  616. fprintf(stderr,
  617. "fuse internal error: unable to unhash node: %llu\n",
  618. (unsigned long long) node->nodeid);
  619. abort();
  620. }
  621. }
  622. static void rehash_name(struct fuse *f)
  623. {
  624. struct node_table *t = &f->name_table;
  625. struct node **nodep;
  626. struct node **next;
  627. size_t hash;
  628. if (t->split == t->size / 2)
  629. return;
  630. hash = t->split;
  631. t->split++;
  632. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  633. struct node *node = *nodep;
  634. size_t newhash = name_hash(f, node->parent->nodeid, node->name);
  635. if (newhash != hash) {
  636. next = nodep;
  637. *nodep = node->name_next;
  638. node->name_next = t->array[newhash];
  639. t->array[newhash] = node;
  640. } else {
  641. next = &node->name_next;
  642. }
  643. }
  644. if (t->split == t->size / 2)
  645. node_table_resize(t);
  646. }
  647. static int hash_name(struct fuse *f, struct node *node, fuse_ino_t parentid,
  648. const char *name)
  649. {
  650. size_t hash = name_hash(f, parentid, name);
  651. struct node *parent = get_node(f, parentid);
  652. if (strlen(name) < sizeof(node->inline_name)) {
  653. strcpy(node->inline_name, name);
  654. node->name = node->inline_name;
  655. } else {
  656. node->name = strdup(name);
  657. if (node->name == NULL)
  658. return -1;
  659. }
  660. parent->refctr ++;
  661. node->parent = parent;
  662. node->name_next = f->name_table.array[hash];
  663. f->name_table.array[hash] = node;
  664. f->name_table.use++;
  665. if (f->name_table.use >= f->name_table.size / 2)
  666. rehash_name(f);
  667. return 0;
  668. }
  669. static void delete_node(struct fuse *f, struct node *node)
  670. {
  671. if (f->conf.debug)
  672. fprintf(stderr, "DELETE: %llu\n",
  673. (unsigned long long) node->nodeid);
  674. assert(node->treelock == 0);
  675. unhash_name(f, node);
  676. if (lru_enabled(f))
  677. remove_node_lru(node);
  678. unhash_id(f, node);
  679. free_node(f, node);
  680. }
  681. static void unref_node(struct fuse *f, struct node *node)
  682. {
  683. assert(node->refctr > 0);
  684. node->refctr --;
  685. if (!node->refctr)
  686. delete_node(f, node);
  687. }
  688. static fuse_ino_t next_id(struct fuse *f)
  689. {
  690. do {
  691. f->ctr = (f->ctr + 1) & 0xffffffff;
  692. if (!f->ctr)
  693. f->generation ++;
  694. } while (f->ctr == 0 || f->ctr == FUSE_UNKNOWN_INO ||
  695. get_node_nocheck(f, f->ctr) != NULL);
  696. return f->ctr;
  697. }
  698. static struct node *lookup_node(struct fuse *f, fuse_ino_t parent,
  699. const char *name)
  700. {
  701. size_t hash = name_hash(f, parent, name);
  702. struct node *node;
  703. for (node = f->name_table.array[hash]; node != NULL; node = node->name_next)
  704. if (node->parent->nodeid == parent &&
  705. strcmp(node->name, name) == 0)
  706. return node;
  707. return NULL;
  708. }
  709. static void inc_nlookup(struct node *node)
  710. {
  711. if (!node->nlookup)
  712. node->refctr++;
  713. node->nlookup++;
  714. }
  715. static struct node *find_node(struct fuse *f, fuse_ino_t parent,
  716. const char *name)
  717. {
  718. struct node *node;
  719. pthread_mutex_lock(&f->lock);
  720. if (!name)
  721. node = get_node(f, parent);
  722. else
  723. node = lookup_node(f, parent, name);
  724. if (node == NULL) {
  725. node = alloc_node(f);
  726. if (node == NULL)
  727. goto out_err;
  728. node->nodeid = next_id(f);
  729. node->generation = f->generation;
  730. if (f->conf.remember)
  731. inc_nlookup(node);
  732. if (hash_name(f, node, parent, name) == -1) {
  733. free_node(f, node);
  734. node = NULL;
  735. goto out_err;
  736. }
  737. hash_id(f, node);
  738. if (lru_enabled(f)) {
  739. struct node_lru *lnode = node_lru(node);
  740. init_list_head(&lnode->lru);
  741. }
  742. } else if (lru_enabled(f) && node->nlookup == 1) {
  743. remove_node_lru(node);
  744. }
  745. inc_nlookup(node);
  746. out_err:
  747. pthread_mutex_unlock(&f->lock);
  748. return node;
  749. }
  750. static char *add_name(char **buf, unsigned *bufsize, char *s, const char *name)
  751. {
  752. size_t len = strlen(name);
  753. if (s - len <= *buf) {
  754. unsigned pathlen = *bufsize - (s - *buf);
  755. unsigned newbufsize = *bufsize;
  756. char *newbuf;
  757. while (newbufsize < pathlen + len + 1) {
  758. if (newbufsize >= 0x80000000)
  759. newbufsize = 0xffffffff;
  760. else
  761. newbufsize *= 2;
  762. }
  763. newbuf = realloc(*buf, newbufsize);
  764. if (newbuf == NULL)
  765. return NULL;
  766. *buf = newbuf;
  767. s = newbuf + newbufsize - pathlen;
  768. memmove(s, newbuf + *bufsize - pathlen, pathlen);
  769. *bufsize = newbufsize;
  770. }
  771. s -= len;
  772. strncpy(s, name, len);
  773. s--;
  774. *s = '/';
  775. return s;
  776. }
  777. static void unlock_path(struct fuse *f, fuse_ino_t nodeid, struct node *wnode,
  778. struct node *end)
  779. {
  780. struct node *node;
  781. if (wnode) {
  782. assert(wnode->treelock == TREELOCK_WRITE);
  783. wnode->treelock = 0;
  784. }
  785. for (node = get_node(f, nodeid);
  786. node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent) {
  787. assert(node->treelock != 0);
  788. assert(node->treelock != TREELOCK_WAIT_OFFSET);
  789. assert(node->treelock != TREELOCK_WRITE);
  790. node->treelock--;
  791. if (node->treelock == TREELOCK_WAIT_OFFSET)
  792. node->treelock = 0;
  793. }
  794. }
  795. static int try_get_path(struct fuse *f, fuse_ino_t nodeid, const char *name,
  796. char **path, struct node **wnodep, bool need_lock)
  797. {
  798. unsigned bufsize = 256;
  799. char *buf;
  800. char *s;
  801. struct node *node;
  802. struct node *wnode = NULL;
  803. int err;
  804. *path = NULL;
  805. err = -ENOMEM;
  806. buf = malloc(bufsize);
  807. if (buf == NULL)
  808. goto out_err;
  809. s = buf + bufsize - 1;
  810. *s = '\0';
  811. if (name != NULL) {
  812. s = add_name(&buf, &bufsize, s, name);
  813. err = -ENOMEM;
  814. if (s == NULL)
  815. goto out_free;
  816. }
  817. if (wnodep) {
  818. assert(need_lock);
  819. wnode = lookup_node(f, nodeid, name);
  820. if (wnode) {
  821. if (wnode->treelock != 0) {
  822. if (wnode->treelock > 0)
  823. wnode->treelock += TREELOCK_WAIT_OFFSET;
  824. err = -EAGAIN;
  825. goto out_free;
  826. }
  827. wnode->treelock = TREELOCK_WRITE;
  828. }
  829. }
  830. for (node = get_node(f, nodeid); node->nodeid != FUSE_ROOT_ID;
  831. node = node->parent) {
  832. err = -ENOENT;
  833. if (node->name == NULL || node->parent == NULL)
  834. goto out_unlock;
  835. err = -ENOMEM;
  836. s = add_name(&buf, &bufsize, s, node->name);
  837. if (s == NULL)
  838. goto out_unlock;
  839. if (need_lock) {
  840. err = -EAGAIN;
  841. if (node->treelock < 0)
  842. goto out_unlock;
  843. node->treelock++;
  844. }
  845. }
  846. if (s[0])
  847. memmove(buf, s, bufsize - (s - buf));
  848. else
  849. strcpy(buf, "/");
  850. *path = buf;
  851. if (wnodep)
  852. *wnodep = wnode;
  853. return 0;
  854. out_unlock:
  855. if (need_lock)
  856. unlock_path(f, nodeid, wnode, node);
  857. out_free:
  858. free(buf);
  859. out_err:
  860. return err;
  861. }
  862. static void queue_element_unlock(struct fuse *f, struct lock_queue_element *qe)
  863. {
  864. struct node *wnode;
  865. if (qe->first_locked) {
  866. wnode = qe->wnode1 ? *qe->wnode1 : NULL;
  867. unlock_path(f, qe->nodeid1, wnode, NULL);
  868. qe->first_locked = false;
  869. }
  870. if (qe->second_locked) {
  871. wnode = qe->wnode2 ? *qe->wnode2 : NULL;
  872. unlock_path(f, qe->nodeid2, wnode, NULL);
  873. qe->second_locked = false;
  874. }
  875. }
  876. static void queue_element_wakeup(struct fuse *f, struct lock_queue_element *qe)
  877. {
  878. int err;
  879. bool first = (qe == f->lockq);
  880. if (!qe->path1) {
  881. /* Just waiting for it to be unlocked */
  882. if (get_node(f, qe->nodeid1)->treelock == 0)
  883. pthread_cond_signal(&qe->cond);
  884. return;
  885. }
  886. if (!qe->first_locked) {
  887. err = try_get_path(f, qe->nodeid1, qe->name1, qe->path1,
  888. qe->wnode1, true);
  889. if (!err)
  890. qe->first_locked = true;
  891. else if (err != -EAGAIN)
  892. goto err_unlock;
  893. }
  894. if (!qe->second_locked && qe->path2) {
  895. err = try_get_path(f, qe->nodeid2, qe->name2, qe->path2,
  896. qe->wnode2, true);
  897. if (!err)
  898. qe->second_locked = true;
  899. else if (err != -EAGAIN)
  900. goto err_unlock;
  901. }
  902. if (qe->first_locked && (qe->second_locked || !qe->path2)) {
  903. err = 0;
  904. goto done;
  905. }
  906. /*
  907. * Only let the first element be partially locked otherwise there could
  908. * be a deadlock.
  909. *
  910. * But do allow the first element to be partially locked to prevent
  911. * starvation.
  912. */
  913. if (!first)
  914. queue_element_unlock(f, qe);
  915. /* keep trying */
  916. return;
  917. err_unlock:
  918. queue_element_unlock(f, qe);
  919. done:
  920. qe->err = err;
  921. qe->done = true;
  922. pthread_cond_signal(&qe->cond);
  923. }
  924. static void wake_up_queued(struct fuse *f)
  925. {
  926. struct lock_queue_element *qe;
  927. for (qe = f->lockq; qe != NULL; qe = qe->next)
  928. queue_element_wakeup(f, qe);
  929. }
  930. static void debug_path(struct fuse *f, const char *msg, fuse_ino_t nodeid,
  931. const char *name, bool wr)
  932. {
  933. if (f->conf.debug) {
  934. struct node *wnode = NULL;
  935. if (wr)
  936. wnode = lookup_node(f, nodeid, name);
  937. if (wnode)
  938. fprintf(stderr, "%s %li (w)\n", msg, wnode->nodeid);
  939. else
  940. fprintf(stderr, "%s %li\n", msg, nodeid);
  941. }
  942. }
  943. static void queue_path(struct fuse *f, struct lock_queue_element *qe)
  944. {
  945. struct lock_queue_element **qp;
  946. qe->done = false;
  947. qe->first_locked = false;
  948. qe->second_locked = false;
  949. pthread_cond_init(&qe->cond, NULL);
  950. qe->next = NULL;
  951. for (qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
  952. *qp = qe;
  953. }
  954. static void dequeue_path(struct fuse *f, struct lock_queue_element *qe)
  955. {
  956. struct lock_queue_element **qp;
  957. pthread_cond_destroy(&qe->cond);
  958. for (qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
  959. *qp = qe->next;
  960. }
  961. static int wait_path(struct fuse *f, struct lock_queue_element *qe)
  962. {
  963. queue_path(f, qe);
  964. do {
  965. pthread_cond_wait(&qe->cond, &f->lock);
  966. } while (!qe->done);
  967. dequeue_path(f, qe);
  968. return qe->err;
  969. }
  970. static int get_path_common(struct fuse *f, fuse_ino_t nodeid, const char *name,
  971. char **path, struct node **wnode)
  972. {
  973. int err;
  974. pthread_mutex_lock(&f->lock);
  975. err = try_get_path(f, nodeid, name, path, wnode, true);
  976. if (err == -EAGAIN) {
  977. struct lock_queue_element qe = {
  978. .nodeid1 = nodeid,
  979. .name1 = name,
  980. .path1 = path,
  981. .wnode1 = wnode,
  982. };
  983. debug_path(f, "QUEUE PATH", nodeid, name, !!wnode);
  984. err = wait_path(f, &qe);
  985. debug_path(f, "DEQUEUE PATH", nodeid, name, !!wnode);
  986. }
  987. pthread_mutex_unlock(&f->lock);
  988. return err;
  989. }
  990. static int get_path(struct fuse *f, fuse_ino_t nodeid, char **path)
  991. {
  992. return get_path_common(f, nodeid, NULL, path, NULL);
  993. }
  994. static int get_path_nullok(struct fuse *f, fuse_ino_t nodeid, char **path)
  995. {
  996. int err = 0;
  997. if (f->conf.nopath) {
  998. *path = NULL;
  999. } else {
  1000. err = get_path_common(f, nodeid, NULL, path, NULL);
  1001. if (err == -ENOENT && f->nullpath_ok)
  1002. err = 0;
  1003. }
  1004. return err;
  1005. }
  1006. static int get_path_name(struct fuse *f, fuse_ino_t nodeid, const char *name,
  1007. char **path)
  1008. {
  1009. return get_path_common(f, nodeid, name, path, NULL);
  1010. }
  1011. static int get_path_wrlock(struct fuse *f, fuse_ino_t nodeid, const char *name,
  1012. char **path, struct node **wnode)
  1013. {
  1014. return get_path_common(f, nodeid, name, path, wnode);
  1015. }
  1016. static int try_get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  1017. fuse_ino_t nodeid2, const char *name2,
  1018. char **path1, char **path2,
  1019. struct node **wnode1, struct node **wnode2)
  1020. {
  1021. int err;
  1022. /* FIXME: locking two paths needs deadlock checking */
  1023. err = try_get_path(f, nodeid1, name1, path1, wnode1, true);
  1024. if (!err) {
  1025. err = try_get_path(f, nodeid2, name2, path2, wnode2, true);
  1026. if (err) {
  1027. struct node *wn1 = wnode1 ? *wnode1 : NULL;
  1028. unlock_path(f, nodeid1, wn1, NULL);
  1029. free(*path1);
  1030. }
  1031. }
  1032. return err;
  1033. }
  1034. static int get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  1035. fuse_ino_t nodeid2, const char *name2,
  1036. char **path1, char **path2,
  1037. struct node **wnode1, struct node **wnode2)
  1038. {
  1039. int err;
  1040. pthread_mutex_lock(&f->lock);
  1041. err = try_get_path2(f, nodeid1, name1, nodeid2, name2,
  1042. path1, path2, wnode1, wnode2);
  1043. if (err == -EAGAIN) {
  1044. struct lock_queue_element qe = {
  1045. .nodeid1 = nodeid1,
  1046. .name1 = name1,
  1047. .path1 = path1,
  1048. .wnode1 = wnode1,
  1049. .nodeid2 = nodeid2,
  1050. .name2 = name2,
  1051. .path2 = path2,
  1052. .wnode2 = wnode2,
  1053. };
  1054. debug_path(f, "QUEUE PATH1", nodeid1, name1, !!wnode1);
  1055. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  1056. err = wait_path(f, &qe);
  1057. debug_path(f, "DEQUEUE PATH1", nodeid1, name1, !!wnode1);
  1058. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  1059. }
  1060. pthread_mutex_unlock(&f->lock);
  1061. return err;
  1062. }
  1063. static void free_path_wrlock(struct fuse *f, fuse_ino_t nodeid,
  1064. struct node *wnode, char *path)
  1065. {
  1066. pthread_mutex_lock(&f->lock);
  1067. unlock_path(f, nodeid, wnode, NULL);
  1068. if (f->lockq)
  1069. wake_up_queued(f);
  1070. pthread_mutex_unlock(&f->lock);
  1071. free(path);
  1072. }
  1073. static void free_path(struct fuse *f, fuse_ino_t nodeid, char *path)
  1074. {
  1075. if (path)
  1076. free_path_wrlock(f, nodeid, NULL, path);
  1077. }
  1078. static void free_path2(struct fuse *f, fuse_ino_t nodeid1, fuse_ino_t nodeid2,
  1079. struct node *wnode1, struct node *wnode2,
  1080. char *path1, char *path2)
  1081. {
  1082. pthread_mutex_lock(&f->lock);
  1083. unlock_path(f, nodeid1, wnode1, NULL);
  1084. unlock_path(f, nodeid2, wnode2, NULL);
  1085. wake_up_queued(f);
  1086. pthread_mutex_unlock(&f->lock);
  1087. free(path1);
  1088. free(path2);
  1089. }
  1090. static void forget_node(struct fuse *f, fuse_ino_t nodeid, uint64_t nlookup)
  1091. {
  1092. struct node *node;
  1093. if (nodeid == FUSE_ROOT_ID)
  1094. return;
  1095. pthread_mutex_lock(&f->lock);
  1096. node = get_node(f, nodeid);
  1097. /*
  1098. * Node may still be locked due to interrupt idiocy in open,
  1099. * create and opendir
  1100. */
  1101. while (node->nlookup == nlookup && node->treelock) {
  1102. struct lock_queue_element qe = {
  1103. .nodeid1 = nodeid,
  1104. };
  1105. debug_path(f, "QUEUE PATH (forget)", nodeid, NULL, false);
  1106. queue_path(f, &qe);
  1107. do {
  1108. pthread_cond_wait(&qe.cond, &f->lock);
  1109. } while (node->nlookup == nlookup && node->treelock);
  1110. dequeue_path(f, &qe);
  1111. debug_path(f, "DEQUEUE_PATH (forget)", nodeid, NULL, false);
  1112. }
  1113. assert(node->nlookup >= nlookup);
  1114. node->nlookup -= nlookup;
  1115. if (!node->nlookup) {
  1116. unref_node(f, node);
  1117. } else if (lru_enabled(f) && node->nlookup == 1) {
  1118. set_forget_time(f, node);
  1119. }
  1120. pthread_mutex_unlock(&f->lock);
  1121. }
  1122. static void unlink_node(struct fuse *f, struct node *node)
  1123. {
  1124. if (f->conf.remember) {
  1125. assert(node->nlookup > 1);
  1126. node->nlookup--;
  1127. }
  1128. unhash_name(f, node);
  1129. }
  1130. static void remove_node(struct fuse *f, fuse_ino_t dir, const char *name)
  1131. {
  1132. struct node *node;
  1133. pthread_mutex_lock(&f->lock);
  1134. node = lookup_node(f, dir, name);
  1135. if (node != NULL)
  1136. unlink_node(f, node);
  1137. pthread_mutex_unlock(&f->lock);
  1138. }
  1139. static int rename_node(struct fuse *f, fuse_ino_t olddir, const char *oldname,
  1140. fuse_ino_t newdir, const char *newname, int hide)
  1141. {
  1142. struct node *node;
  1143. struct node *newnode;
  1144. int err = 0;
  1145. pthread_mutex_lock(&f->lock);
  1146. node = lookup_node(f, olddir, oldname);
  1147. newnode = lookup_node(f, newdir, newname);
  1148. if (node == NULL)
  1149. goto out;
  1150. if (newnode != NULL) {
  1151. if (hide) {
  1152. fprintf(stderr, "fuse: hidden file got created during hiding\n");
  1153. err = -EBUSY;
  1154. goto out;
  1155. }
  1156. unlink_node(f, newnode);
  1157. }
  1158. unhash_name(f, node);
  1159. if (hash_name(f, node, newdir, newname) == -1) {
  1160. err = -ENOMEM;
  1161. goto out;
  1162. }
  1163. if (hide)
  1164. node->is_hidden = 1;
  1165. out:
  1166. pthread_mutex_unlock(&f->lock);
  1167. return err;
  1168. }
  1169. static void set_stat(struct fuse *f, fuse_ino_t nodeid, struct stat *stbuf)
  1170. {
  1171. if (!f->conf.use_ino)
  1172. stbuf->st_ino = nodeid;
  1173. if (f->conf.set_mode)
  1174. stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
  1175. (0777 & ~f->conf.umask);
  1176. if (f->conf.set_uid)
  1177. stbuf->st_uid = f->conf.uid;
  1178. if (f->conf.set_gid)
  1179. stbuf->st_gid = f->conf.gid;
  1180. }
  1181. static struct fuse *req_fuse(fuse_req_t req)
  1182. {
  1183. return (struct fuse *) fuse_req_userdata(req);
  1184. }
  1185. static void fuse_intr_sighandler(int sig)
  1186. {
  1187. (void) sig;
  1188. /* Nothing to do */
  1189. }
  1190. struct fuse_intr_data {
  1191. pthread_t id;
  1192. pthread_cond_t cond;
  1193. int finished;
  1194. };
  1195. static void fuse_interrupt(fuse_req_t req, void *d_)
  1196. {
  1197. struct fuse_intr_data *d = d_;
  1198. struct fuse *f = req_fuse(req);
  1199. if (d->id == pthread_self())
  1200. return;
  1201. pthread_mutex_lock(&f->lock);
  1202. while (!d->finished) {
  1203. struct timeval now;
  1204. struct timespec timeout;
  1205. pthread_kill(d->id, f->conf.intr_signal);
  1206. gettimeofday(&now, NULL);
  1207. timeout.tv_sec = now.tv_sec + 1;
  1208. timeout.tv_nsec = now.tv_usec * 1000;
  1209. pthread_cond_timedwait(&d->cond, &f->lock, &timeout);
  1210. }
  1211. pthread_mutex_unlock(&f->lock);
  1212. }
  1213. static void fuse_do_finish_interrupt(struct fuse *f, fuse_req_t req,
  1214. struct fuse_intr_data *d)
  1215. {
  1216. pthread_mutex_lock(&f->lock);
  1217. d->finished = 1;
  1218. pthread_cond_broadcast(&d->cond);
  1219. pthread_mutex_unlock(&f->lock);
  1220. fuse_req_interrupt_func(req, NULL, NULL);
  1221. pthread_cond_destroy(&d->cond);
  1222. }
  1223. static void fuse_do_prepare_interrupt(fuse_req_t req, struct fuse_intr_data *d)
  1224. {
  1225. d->id = pthread_self();
  1226. pthread_cond_init(&d->cond, NULL);
  1227. d->finished = 0;
  1228. fuse_req_interrupt_func(req, fuse_interrupt, d);
  1229. }
  1230. static inline void fuse_finish_interrupt(struct fuse *f, fuse_req_t req,
  1231. struct fuse_intr_data *d)
  1232. {
  1233. if (f->conf.intr)
  1234. fuse_do_finish_interrupt(f, req, d);
  1235. }
  1236. static inline void fuse_prepare_interrupt(struct fuse *f, fuse_req_t req,
  1237. struct fuse_intr_data *d)
  1238. {
  1239. if (f->conf.intr)
  1240. fuse_do_prepare_interrupt(req, d);
  1241. }
  1242. #if !defined(__FreeBSD__) && !defined(__NetBSD__)
  1243. static int fuse_compat_open(struct fuse_fs *fs, const char *path,
  1244. struct fuse_file_info *fi)
  1245. {
  1246. int err;
  1247. if (!fs->compat || fs->compat >= 25)
  1248. err = fs->op.open(path, fi);
  1249. else if (fs->compat == 22) {
  1250. struct fuse_file_info_compat tmp;
  1251. memcpy(&tmp, fi, sizeof(tmp));
  1252. err = ((struct fuse_operations_compat22 *) &fs->op)->open(path,
  1253. &tmp);
  1254. memcpy(fi, &tmp, sizeof(tmp));
  1255. fi->fh = tmp.fh;
  1256. } else
  1257. err = ((struct fuse_operations_compat2 *) &fs->op)
  1258. ->open(path, fi->flags);
  1259. return err;
  1260. }
  1261. static int fuse_compat_release(struct fuse_fs *fs, const char *path,
  1262. struct fuse_file_info *fi)
  1263. {
  1264. if (!fs->compat || fs->compat >= 22)
  1265. return fs->op.release(path, fi);
  1266. else
  1267. return ((struct fuse_operations_compat2 *) &fs->op)
  1268. ->release(path, fi->flags);
  1269. }
  1270. static int fuse_compat_opendir(struct fuse_fs *fs, const char *path,
  1271. struct fuse_file_info *fi)
  1272. {
  1273. if (!fs->compat || fs->compat >= 25)
  1274. return fs->op.opendir(path, fi);
  1275. else {
  1276. int err;
  1277. struct fuse_file_info_compat tmp;
  1278. memcpy(&tmp, fi, sizeof(tmp));
  1279. err = ((struct fuse_operations_compat22 *) &fs->op)
  1280. ->opendir(path, &tmp);
  1281. memcpy(fi, &tmp, sizeof(tmp));
  1282. fi->fh = tmp.fh;
  1283. return err;
  1284. }
  1285. }
  1286. static void convert_statfs_compat(struct fuse_statfs_compat1 *compatbuf,
  1287. struct statvfs *stbuf)
  1288. {
  1289. stbuf->f_bsize = compatbuf->block_size;
  1290. stbuf->f_blocks = compatbuf->blocks;
  1291. stbuf->f_bfree = compatbuf->blocks_free;
  1292. stbuf->f_bavail = compatbuf->blocks_free;
  1293. stbuf->f_files = compatbuf->files;
  1294. stbuf->f_ffree = compatbuf->files_free;
  1295. stbuf->f_namemax = compatbuf->namelen;
  1296. }
  1297. static void convert_statfs_old(struct statfs *oldbuf, struct statvfs *stbuf)
  1298. {
  1299. stbuf->f_bsize = oldbuf->f_bsize;
  1300. stbuf->f_blocks = oldbuf->f_blocks;
  1301. stbuf->f_bfree = oldbuf->f_bfree;
  1302. stbuf->f_bavail = oldbuf->f_bavail;
  1303. stbuf->f_files = oldbuf->f_files;
  1304. stbuf->f_ffree = oldbuf->f_ffree;
  1305. stbuf->f_namemax = oldbuf->f_namelen;
  1306. }
  1307. static int fuse_compat_statfs(struct fuse_fs *fs, const char *path,
  1308. struct statvfs *buf)
  1309. {
  1310. int err;
  1311. if (!fs->compat || fs->compat >= 25) {
  1312. err = fs->op.statfs(fs->compat == 25 ? "/" : path, buf);
  1313. } else if (fs->compat > 11) {
  1314. struct statfs oldbuf;
  1315. err = ((struct fuse_operations_compat22 *) &fs->op)
  1316. ->statfs("/", &oldbuf);
  1317. if (!err)
  1318. convert_statfs_old(&oldbuf, buf);
  1319. } else {
  1320. struct fuse_statfs_compat1 compatbuf;
  1321. memset(&compatbuf, 0, sizeof(struct fuse_statfs_compat1));
  1322. err = ((struct fuse_operations_compat1 *) &fs->op)
  1323. ->statfs(&compatbuf);
  1324. if (!err)
  1325. convert_statfs_compat(&compatbuf, buf);
  1326. }
  1327. return err;
  1328. }
  1329. #else /* __FreeBSD__ || __NetBSD__ */
  1330. static inline int fuse_compat_open(struct fuse_fs *fs, char *path,
  1331. struct fuse_file_info *fi)
  1332. {
  1333. return fs->op.open(path, fi);
  1334. }
  1335. static inline int fuse_compat_release(struct fuse_fs *fs, const char *path,
  1336. struct fuse_file_info *fi)
  1337. {
  1338. return fs->op.release(path, fi);
  1339. }
  1340. static inline int fuse_compat_opendir(struct fuse_fs *fs, const char *path,
  1341. struct fuse_file_info *fi)
  1342. {
  1343. return fs->op.opendir(path, fi);
  1344. }
  1345. static inline int fuse_compat_statfs(struct fuse_fs *fs, const char *path,
  1346. struct statvfs *buf)
  1347. {
  1348. return fs->op.statfs(fs->compat == 25 ? "/" : path, buf);
  1349. }
  1350. #endif /* __FreeBSD__ || __NetBSD__ */
  1351. int fuse_fs_getattr(struct fuse_fs *fs, const char *path, struct stat *buf)
  1352. {
  1353. fuse_get_context()->private_data = fs->user_data;
  1354. if (fs->op.getattr) {
  1355. if (fs->debug)
  1356. fprintf(stderr, "getattr %s\n", path);
  1357. return fs->op.getattr(path, buf);
  1358. } else {
  1359. return -ENOSYS;
  1360. }
  1361. }
  1362. int fuse_fs_fgetattr(struct fuse_fs *fs, const char *path, struct stat *buf,
  1363. struct fuse_file_info *fi)
  1364. {
  1365. fuse_get_context()->private_data = fs->user_data;
  1366. if (fs->op.fgetattr) {
  1367. if (fs->debug)
  1368. fprintf(stderr, "fgetattr[%llu] %s\n",
  1369. (unsigned long long) fi->fh, path);
  1370. return fs->op.fgetattr(path, buf, fi);
  1371. } else if (path && fs->op.getattr) {
  1372. if (fs->debug)
  1373. fprintf(stderr, "getattr %s\n", path);
  1374. return fs->op.getattr(path, buf);
  1375. } else {
  1376. return -ENOSYS;
  1377. }
  1378. }
  1379. int fuse_fs_rename(struct fuse_fs *fs, const char *oldpath,
  1380. const char *newpath)
  1381. {
  1382. fuse_get_context()->private_data = fs->user_data;
  1383. if (fs->op.rename) {
  1384. if (fs->debug)
  1385. fprintf(stderr, "rename %s %s\n", oldpath, newpath);
  1386. return fs->op.rename(oldpath, newpath);
  1387. } else {
  1388. return -ENOSYS;
  1389. }
  1390. }
  1391. int fuse_fs_unlink(struct fuse_fs *fs, const char *path)
  1392. {
  1393. fuse_get_context()->private_data = fs->user_data;
  1394. if (fs->op.unlink) {
  1395. if (fs->debug)
  1396. fprintf(stderr, "unlink %s\n", path);
  1397. return fs->op.unlink(path);
  1398. } else {
  1399. return -ENOSYS;
  1400. }
  1401. }
  1402. int fuse_fs_rmdir(struct fuse_fs *fs, const char *path)
  1403. {
  1404. fuse_get_context()->private_data = fs->user_data;
  1405. if (fs->op.rmdir) {
  1406. if (fs->debug)
  1407. fprintf(stderr, "rmdir %s\n", path);
  1408. return fs->op.rmdir(path);
  1409. } else {
  1410. return -ENOSYS;
  1411. }
  1412. }
  1413. int fuse_fs_symlink(struct fuse_fs *fs, const char *linkname, const char *path)
  1414. {
  1415. fuse_get_context()->private_data = fs->user_data;
  1416. if (fs->op.symlink) {
  1417. if (fs->debug)
  1418. fprintf(stderr, "symlink %s %s\n", linkname, path);
  1419. return fs->op.symlink(linkname, path);
  1420. } else {
  1421. return -ENOSYS;
  1422. }
  1423. }
  1424. int fuse_fs_link(struct fuse_fs *fs, const char *oldpath, const char *newpath)
  1425. {
  1426. fuse_get_context()->private_data = fs->user_data;
  1427. if (fs->op.link) {
  1428. if (fs->debug)
  1429. fprintf(stderr, "link %s %s\n", oldpath, newpath);
  1430. return fs->op.link(oldpath, newpath);
  1431. } else {
  1432. return -ENOSYS;
  1433. }
  1434. }
  1435. int fuse_fs_release(struct fuse_fs *fs, const char *path,
  1436. struct fuse_file_info *fi)
  1437. {
  1438. fuse_get_context()->private_data = fs->user_data;
  1439. if (fs->op.release) {
  1440. if (fs->debug)
  1441. fprintf(stderr, "release%s[%llu] flags: 0x%x\n",
  1442. fi->flush ? "+flush" : "",
  1443. (unsigned long long) fi->fh, fi->flags);
  1444. return fuse_compat_release(fs, path, fi);
  1445. } else {
  1446. return 0;
  1447. }
  1448. }
  1449. int fuse_fs_opendir(struct fuse_fs *fs, const char *path,
  1450. struct fuse_file_info *fi)
  1451. {
  1452. fuse_get_context()->private_data = fs->user_data;
  1453. if (fs->op.opendir) {
  1454. int err;
  1455. if (fs->debug)
  1456. fprintf(stderr, "opendir flags: 0x%x %s\n", fi->flags,
  1457. path);
  1458. err = fuse_compat_opendir(fs, path, fi);
  1459. if (fs->debug && !err)
  1460. fprintf(stderr, " opendir[%lli] flags: 0x%x %s\n",
  1461. (unsigned long long) fi->fh, fi->flags, path);
  1462. return err;
  1463. } else {
  1464. return 0;
  1465. }
  1466. }
  1467. int fuse_fs_open(struct fuse_fs *fs, const char *path,
  1468. struct fuse_file_info *fi)
  1469. {
  1470. fuse_get_context()->private_data = fs->user_data;
  1471. if (fs->op.open) {
  1472. int err;
  1473. if (fs->debug)
  1474. fprintf(stderr, "open flags: 0x%x %s\n", fi->flags,
  1475. path);
  1476. err = fuse_compat_open(fs, path, fi);
  1477. if (fs->debug && !err)
  1478. fprintf(stderr, " open[%lli] flags: 0x%x %s\n",
  1479. (unsigned long long) fi->fh, fi->flags, path);
  1480. return err;
  1481. } else {
  1482. return 0;
  1483. }
  1484. }
  1485. static void fuse_free_buf(struct fuse_bufvec *buf)
  1486. {
  1487. if (buf != NULL) {
  1488. size_t i;
  1489. for (i = 0; i < buf->count; i++)
  1490. free(buf->buf[i].mem);
  1491. free(buf);
  1492. }
  1493. }
  1494. int fuse_fs_read_buf(struct fuse_fs *fs, const char *path,
  1495. struct fuse_bufvec **bufp, size_t size, off_t off,
  1496. struct fuse_file_info *fi)
  1497. {
  1498. fuse_get_context()->private_data = fs->user_data;
  1499. if (fs->op.read || fs->op.read_buf) {
  1500. int res;
  1501. if (fs->debug)
  1502. fprintf(stderr,
  1503. "read[%llu] %zu bytes from %llu flags: 0x%x\n",
  1504. (unsigned long long) fi->fh,
  1505. size, (unsigned long long) off, fi->flags);
  1506. if (fs->op.read_buf) {
  1507. res = fs->op.read_buf(path, bufp, size, off, fi);
  1508. } else {
  1509. struct fuse_bufvec *buf;
  1510. void *mem;
  1511. buf = malloc(sizeof(struct fuse_bufvec));
  1512. if (buf == NULL)
  1513. return -ENOMEM;
  1514. mem = malloc(size);
  1515. if (mem == NULL) {
  1516. free(buf);
  1517. return -ENOMEM;
  1518. }
  1519. *buf = FUSE_BUFVEC_INIT(size);
  1520. buf->buf[0].mem = mem;
  1521. *bufp = buf;
  1522. res = fs->op.read(path, mem, size, off, fi);
  1523. if (res >= 0)
  1524. buf->buf[0].size = res;
  1525. }
  1526. if (fs->debug && res >= 0)
  1527. fprintf(stderr, " read[%llu] %zu bytes from %llu\n",
  1528. (unsigned long long) fi->fh,
  1529. fuse_buf_size(*bufp),
  1530. (unsigned long long) off);
  1531. if (res >= 0 && fuse_buf_size(*bufp) > (int) size)
  1532. fprintf(stderr, "fuse: read too many bytes\n");
  1533. if (res < 0)
  1534. return res;
  1535. return 0;
  1536. } else {
  1537. return -ENOSYS;
  1538. }
  1539. }
  1540. int fuse_fs_read(struct fuse_fs *fs, const char *path, char *mem, size_t size,
  1541. off_t off, struct fuse_file_info *fi)
  1542. {
  1543. int res;
  1544. struct fuse_bufvec *buf = NULL;
  1545. res = fuse_fs_read_buf(fs, path, &buf, size, off, fi);
  1546. if (res == 0) {
  1547. struct fuse_bufvec dst = FUSE_BUFVEC_INIT(size);
  1548. dst.buf[0].mem = mem;
  1549. res = fuse_buf_copy(&dst, buf, 0);
  1550. }
  1551. fuse_free_buf(buf);
  1552. return res;
  1553. }
  1554. int fuse_fs_write_buf(struct fuse_fs *fs, const char *path,
  1555. struct fuse_bufvec *buf, off_t off,
  1556. struct fuse_file_info *fi)
  1557. {
  1558. fuse_get_context()->private_data = fs->user_data;
  1559. if (fs->op.write_buf || fs->op.write) {
  1560. int res;
  1561. size_t size = fuse_buf_size(buf);
  1562. assert(buf->idx == 0 && buf->off == 0);
  1563. if (fs->debug)
  1564. fprintf(stderr,
  1565. "write%s[%llu] %zu bytes to %llu flags: 0x%x\n",
  1566. fi->writepage ? "page" : "",
  1567. (unsigned long long) fi->fh,
  1568. size,
  1569. (unsigned long long) off,
  1570. fi->flags);
  1571. if (fs->op.write_buf) {
  1572. res = fs->op.write_buf(path, buf, off, fi);
  1573. } else {
  1574. void *mem = NULL;
  1575. struct fuse_buf *flatbuf;
  1576. struct fuse_bufvec tmp = FUSE_BUFVEC_INIT(size);
  1577. if (buf->count == 1 &&
  1578. !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
  1579. flatbuf = &buf->buf[0];
  1580. } else {
  1581. res = -ENOMEM;
  1582. mem = malloc(size);
  1583. if (mem == NULL)
  1584. goto out;
  1585. tmp.buf[0].mem = mem;
  1586. res = fuse_buf_copy(&tmp, buf, 0);
  1587. if (res <= 0)
  1588. goto out_free;
  1589. tmp.buf[0].size = res;
  1590. flatbuf = &tmp.buf[0];
  1591. }
  1592. res = fs->op.write(path, flatbuf->mem, flatbuf->size,
  1593. off, fi);
  1594. out_free:
  1595. free(mem);
  1596. }
  1597. out:
  1598. if (fs->debug && res >= 0)
  1599. fprintf(stderr, " write%s[%llu] %u bytes to %llu\n",
  1600. fi->writepage ? "page" : "",
  1601. (unsigned long long) fi->fh, res,
  1602. (unsigned long long) off);
  1603. if (res > (int) size)
  1604. fprintf(stderr, "fuse: wrote too many bytes\n");
  1605. return res;
  1606. } else {
  1607. return -ENOSYS;
  1608. }
  1609. }
  1610. int fuse_fs_write(struct fuse_fs *fs, const char *path, const char *mem,
  1611. size_t size, off_t off, struct fuse_file_info *fi)
  1612. {
  1613. struct fuse_bufvec bufv = FUSE_BUFVEC_INIT(size);
  1614. bufv.buf[0].mem = (void *) mem;
  1615. return fuse_fs_write_buf(fs, path, &bufv, off, fi);
  1616. }
  1617. int fuse_fs_fsync(struct fuse_fs *fs, const char *path, int datasync,
  1618. struct fuse_file_info *fi)
  1619. {
  1620. fuse_get_context()->private_data = fs->user_data;
  1621. if (fs->op.fsync) {
  1622. if (fs->debug)
  1623. fprintf(stderr, "fsync[%llu] datasync: %i\n",
  1624. (unsigned long long) fi->fh, datasync);
  1625. return fs->op.fsync(path, datasync, fi);
  1626. } else {
  1627. return -ENOSYS;
  1628. }
  1629. }
  1630. int fuse_fs_fsyncdir(struct fuse_fs *fs, const char *path, int datasync,
  1631. struct fuse_file_info *fi)
  1632. {
  1633. fuse_get_context()->private_data = fs->user_data;
  1634. if (fs->op.fsyncdir) {
  1635. if (fs->debug)
  1636. fprintf(stderr, "fsyncdir[%llu] datasync: %i\n",
  1637. (unsigned long long) fi->fh, datasync);
  1638. return fs->op.fsyncdir(path, datasync, fi);
  1639. } else {
  1640. return -ENOSYS;
  1641. }
  1642. }
  1643. int fuse_fs_flush(struct fuse_fs *fs, const char *path,
  1644. struct fuse_file_info *fi)
  1645. {
  1646. fuse_get_context()->private_data = fs->user_data;
  1647. if (fs->op.flush) {
  1648. if (fs->debug)
  1649. fprintf(stderr, "flush[%llu]\n",
  1650. (unsigned long long) fi->fh);
  1651. return fs->op.flush(path, fi);
  1652. } else {
  1653. return -ENOSYS;
  1654. }
  1655. }
  1656. int fuse_fs_statfs(struct fuse_fs *fs, const char *path, struct statvfs *buf)
  1657. {
  1658. fuse_get_context()->private_data = fs->user_data;
  1659. if (fs->op.statfs) {
  1660. if (fs->debug)
  1661. fprintf(stderr, "statfs %s\n", path);
  1662. return fuse_compat_statfs(fs, path, buf);
  1663. } else {
  1664. buf->f_namemax = 255;
  1665. buf->f_bsize = 512;
  1666. return 0;
  1667. }
  1668. }
  1669. int fuse_fs_releasedir(struct fuse_fs *fs, const char *path,
  1670. struct fuse_file_info *fi)
  1671. {
  1672. fuse_get_context()->private_data = fs->user_data;
  1673. if (fs->op.releasedir) {
  1674. if (fs->debug)
  1675. fprintf(stderr, "releasedir[%llu] flags: 0x%x\n",
  1676. (unsigned long long) fi->fh, fi->flags);
  1677. return fs->op.releasedir(path, fi);
  1678. } else {
  1679. return 0;
  1680. }
  1681. }
  1682. static int fill_dir_old(struct fuse_dirhandle *dh, const char *name, int type,
  1683. ino_t ino)
  1684. {
  1685. int res;
  1686. struct stat stbuf;
  1687. memset(&stbuf, 0, sizeof(stbuf));
  1688. stbuf.st_mode = type << 12;
  1689. stbuf.st_ino = ino;
  1690. res = dh->filler(dh->buf, name, &stbuf, 0);
  1691. return res ? -ENOMEM : 0;
  1692. }
  1693. int fuse_fs_readdir(struct fuse_fs *fs, const char *path, void *buf,
  1694. fuse_fill_dir_t filler, off_t off,
  1695. struct fuse_file_info *fi)
  1696. {
  1697. fuse_get_context()->private_data = fs->user_data;
  1698. if (fs->op.readdir) {
  1699. if (fs->debug)
  1700. fprintf(stderr, "readdir[%llu] from %llu\n",
  1701. (unsigned long long) fi->fh,
  1702. (unsigned long long) off);
  1703. return fs->op.readdir(path, buf, filler, off, fi);
  1704. } else if (fs->op.getdir) {
  1705. struct fuse_dirhandle dh;
  1706. if (fs->debug)
  1707. fprintf(stderr, "getdir[%llu]\n",
  1708. (unsigned long long) fi->fh);
  1709. dh.filler = filler;
  1710. dh.buf = buf;
  1711. return fs->op.getdir(path, &dh, fill_dir_old);
  1712. } else {
  1713. return -ENOSYS;
  1714. }
  1715. }
  1716. int fuse_fs_create(struct fuse_fs *fs, const char *path, mode_t mode,
  1717. struct fuse_file_info *fi)
  1718. {
  1719. fuse_get_context()->private_data = fs->user_data;
  1720. if (fs->op.create) {
  1721. int err;
  1722. if (fs->debug)
  1723. fprintf(stderr,
  1724. "create flags: 0x%x %s 0%o umask=0%03o\n",
  1725. fi->flags, path, mode,
  1726. fuse_get_context()->umask);
  1727. err = fs->op.create(path, mode, fi);
  1728. if (fs->debug && !err)
  1729. fprintf(stderr, " create[%llu] flags: 0x%x %s\n",
  1730. (unsigned long long) fi->fh, fi->flags, path);
  1731. return err;
  1732. } else {
  1733. return -ENOSYS;
  1734. }
  1735. }
  1736. int fuse_fs_lock(struct fuse_fs *fs, const char *path,
  1737. struct fuse_file_info *fi, int cmd, struct flock *lock)
  1738. {
  1739. fuse_get_context()->private_data = fs->user_data;
  1740. if (fs->op.lock) {
  1741. if (fs->debug)
  1742. fprintf(stderr, "lock[%llu] %s %s start: %llu len: %llu pid: %llu\n",
  1743. (unsigned long long) fi->fh,
  1744. (cmd == F_GETLK ? "F_GETLK" :
  1745. (cmd == F_SETLK ? "F_SETLK" :
  1746. (cmd == F_SETLKW ? "F_SETLKW" : "???"))),
  1747. (lock->l_type == F_RDLCK ? "F_RDLCK" :
  1748. (lock->l_type == F_WRLCK ? "F_WRLCK" :
  1749. (lock->l_type == F_UNLCK ? "F_UNLCK" :
  1750. "???"))),
  1751. (unsigned long long) lock->l_start,
  1752. (unsigned long long) lock->l_len,
  1753. (unsigned long long) lock->l_pid);
  1754. return fs->op.lock(path, fi, cmd, lock);
  1755. } else {
  1756. return -ENOSYS;
  1757. }
  1758. }
  1759. int fuse_fs_flock(struct fuse_fs *fs, const char *path,
  1760. struct fuse_file_info *fi, int op)
  1761. {
  1762. fuse_get_context()->private_data = fs->user_data;
  1763. if (fs->op.flock) {
  1764. if (fs->debug) {
  1765. int xop = op & ~LOCK_NB;
  1766. fprintf(stderr, "lock[%llu] %s%s\n",
  1767. (unsigned long long) fi->fh,
  1768. xop == LOCK_SH ? "LOCK_SH" :
  1769. (xop == LOCK_EX ? "LOCK_EX" :
  1770. (xop == LOCK_UN ? "LOCK_UN" : "???")),
  1771. (op & LOCK_NB) ? "|LOCK_NB" : "");
  1772. }
  1773. return fs->op.flock(path, fi, op);
  1774. } else {
  1775. return -ENOSYS;
  1776. }
  1777. }
  1778. int fuse_fs_chown(struct fuse_fs *fs, const char *path, uid_t uid, gid_t gid)
  1779. {
  1780. fuse_get_context()->private_data = fs->user_data;
  1781. if (fs->op.chown) {
  1782. if (fs->debug)
  1783. fprintf(stderr, "chown %s %lu %lu\n", path,
  1784. (unsigned long) uid, (unsigned long) gid);
  1785. return fs->op.chown(path, uid, gid);
  1786. } else {
  1787. return -ENOSYS;
  1788. }
  1789. }
  1790. int fuse_fs_truncate(struct fuse_fs *fs, const char *path, off_t size)
  1791. {
  1792. fuse_get_context()->private_data = fs->user_data;
  1793. if (fs->op.truncate) {
  1794. if (fs->debug)
  1795. fprintf(stderr, "truncate %s %llu\n", path,
  1796. (unsigned long long) size);
  1797. return fs->op.truncate(path, size);
  1798. } else {
  1799. return -ENOSYS;
  1800. }
  1801. }
  1802. int fuse_fs_ftruncate(struct fuse_fs *fs, const char *path, off_t size,
  1803. struct fuse_file_info *fi)
  1804. {
  1805. fuse_get_context()->private_data = fs->user_data;
  1806. if (fs->op.ftruncate) {
  1807. if (fs->debug)
  1808. fprintf(stderr, "ftruncate[%llu] %llu\n",
  1809. (unsigned long long) fi->fh,
  1810. (unsigned long long) size);
  1811. return fs->op.ftruncate(path, size, fi);
  1812. } else if (path && fs->op.truncate) {
  1813. if (fs->debug)
  1814. fprintf(stderr, "truncate %s %llu\n", path,
  1815. (unsigned long long) size);
  1816. return fs->op.truncate(path, size);
  1817. } else {
  1818. return -ENOSYS;
  1819. }
  1820. }
  1821. int fuse_fs_utimens(struct fuse_fs *fs, const char *path,
  1822. const struct timespec tv[2])
  1823. {
  1824. fuse_get_context()->private_data = fs->user_data;
  1825. if (fs->op.utimens) {
  1826. if (fs->debug)
  1827. fprintf(stderr, "utimens %s %li.%09lu %li.%09lu\n",
  1828. path, tv[0].tv_sec, tv[0].tv_nsec,
  1829. tv[1].tv_sec, tv[1].tv_nsec);
  1830. return fs->op.utimens(path, tv);
  1831. } else if(fs->op.utime) {
  1832. struct utimbuf buf;
  1833. if (fs->debug)
  1834. fprintf(stderr, "utime %s %li %li\n", path,
  1835. tv[0].tv_sec, tv[1].tv_sec);
  1836. buf.actime = tv[0].tv_sec;
  1837. buf.modtime = tv[1].tv_sec;
  1838. return fs->op.utime(path, &buf);
  1839. } else {
  1840. return -ENOSYS;
  1841. }
  1842. }
  1843. int fuse_fs_access(struct fuse_fs *fs, const char *path, int mask)
  1844. {
  1845. fuse_get_context()->private_data = fs->user_data;
  1846. if (fs->op.access) {
  1847. if (fs->debug)
  1848. fprintf(stderr, "access %s 0%o\n", path, mask);
  1849. return fs->op.access(path, mask);
  1850. } else {
  1851. return -ENOSYS;
  1852. }
  1853. }
  1854. int fuse_fs_readlink(struct fuse_fs *fs, const char *path, char *buf,
  1855. size_t len)
  1856. {
  1857. fuse_get_context()->private_data = fs->user_data;
  1858. if (fs->op.readlink) {
  1859. if (fs->debug)
  1860. fprintf(stderr, "readlink %s %lu\n", path,
  1861. (unsigned long) len);
  1862. return fs->op.readlink(path, buf, len);
  1863. } else {
  1864. return -ENOSYS;
  1865. }
  1866. }
  1867. int fuse_fs_mknod(struct fuse_fs *fs, const char *path, mode_t mode,
  1868. dev_t rdev)
  1869. {
  1870. fuse_get_context()->private_data = fs->user_data;
  1871. if (fs->op.mknod) {
  1872. if (fs->debug)
  1873. fprintf(stderr, "mknod %s 0%o 0x%llx umask=0%03o\n",
  1874. path, mode, (unsigned long long) rdev,
  1875. fuse_get_context()->umask);
  1876. return fs->op.mknod(path, mode, rdev);
  1877. } else {
  1878. return -ENOSYS;
  1879. }
  1880. }
  1881. int fuse_fs_mkdir(struct fuse_fs *fs, const char *path, mode_t mode)
  1882. {
  1883. fuse_get_context()->private_data = fs->user_data;
  1884. if (fs->op.mkdir) {
  1885. if (fs->debug)
  1886. fprintf(stderr, "mkdir %s 0%o umask=0%03o\n",
  1887. path, mode, fuse_get_context()->umask);
  1888. return fs->op.mkdir(path, mode);
  1889. } else {
  1890. return -ENOSYS;
  1891. }
  1892. }
  1893. int fuse_fs_setxattr(struct fuse_fs *fs, const char *path, const char *name,
  1894. const char *value, size_t size, int flags)
  1895. {
  1896. fuse_get_context()->private_data = fs->user_data;
  1897. if (fs->op.setxattr) {
  1898. if (fs->debug)
  1899. fprintf(stderr, "setxattr %s %s %lu 0x%x\n",
  1900. path, name, (unsigned long) size, flags);
  1901. return fs->op.setxattr(path, name, value, size, flags);
  1902. } else {
  1903. return -ENOSYS;
  1904. }
  1905. }
  1906. int fuse_fs_getxattr(struct fuse_fs *fs, const char *path, const char *name,
  1907. char *value, size_t size)
  1908. {
  1909. fuse_get_context()->private_data = fs->user_data;
  1910. if (fs->op.getxattr) {
  1911. if (fs->debug)
  1912. fprintf(stderr, "getxattr %s %s %lu\n",
  1913. path, name, (unsigned long) size);
  1914. return fs->op.getxattr(path, name, value, size);
  1915. } else {
  1916. return -ENOSYS;
  1917. }
  1918. }
  1919. int fuse_fs_listxattr(struct fuse_fs *fs, const char *path, char *list,
  1920. size_t size)
  1921. {
  1922. fuse_get_context()->private_data = fs->user_data;
  1923. if (fs->op.listxattr) {
  1924. if (fs->debug)
  1925. fprintf(stderr, "listxattr %s %lu\n",
  1926. path, (unsigned long) size);
  1927. return fs->op.listxattr(path, list, size);
  1928. } else {
  1929. return -ENOSYS;
  1930. }
  1931. }
  1932. int fuse_fs_bmap(struct fuse_fs *fs, const char *path, size_t blocksize,
  1933. uint64_t *idx)
  1934. {
  1935. fuse_get_context()->private_data = fs->user_data;
  1936. if (fs->op.bmap) {
  1937. if (fs->debug)
  1938. fprintf(stderr, "bmap %s blocksize: %lu index: %llu\n",
  1939. path, (unsigned long) blocksize,
  1940. (unsigned long long) *idx);
  1941. return fs->op.bmap(path, blocksize, idx);
  1942. } else {
  1943. return -ENOSYS;
  1944. }
  1945. }
  1946. int fuse_fs_removexattr(struct fuse_fs *fs, const char *path, const char *name)
  1947. {
  1948. fuse_get_context()->private_data = fs->user_data;
  1949. if (fs->op.removexattr) {
  1950. if (fs->debug)
  1951. fprintf(stderr, "removexattr %s %s\n", path, name);
  1952. return fs->op.removexattr(path, name);
  1953. } else {
  1954. return -ENOSYS;
  1955. }
  1956. }
  1957. int fuse_fs_ioctl(struct fuse_fs *fs, const char *path, int cmd, void *arg,
  1958. struct fuse_file_info *fi, unsigned int flags, void *data)
  1959. {
  1960. fuse_get_context()->private_data = fs->user_data;
  1961. if (fs->op.ioctl) {
  1962. if (fs->debug)
  1963. fprintf(stderr, "ioctl[%llu] 0x%x flags: 0x%x\n",
  1964. (unsigned long long) fi->fh, cmd, flags);
  1965. return fs->op.ioctl(path, cmd, arg, fi, flags, data);
  1966. } else
  1967. return -ENOSYS;
  1968. }
  1969. int fuse_fs_poll(struct fuse_fs *fs, const char *path,
  1970. struct fuse_file_info *fi, struct fuse_pollhandle *ph,
  1971. unsigned *reventsp)
  1972. {
  1973. fuse_get_context()->private_data = fs->user_data;
  1974. if (fs->op.poll) {
  1975. int res;
  1976. if (fs->debug)
  1977. fprintf(stderr, "poll[%llu] ph: %p\n",
  1978. (unsigned long long) fi->fh, ph);
  1979. res = fs->op.poll(path, fi, ph, reventsp);
  1980. if (fs->debug && !res)
  1981. fprintf(stderr, " poll[%llu] revents: 0x%x\n",
  1982. (unsigned long long) fi->fh, *reventsp);
  1983. return res;
  1984. } else
  1985. return -ENOSYS;
  1986. }
  1987. int fuse_fs_fallocate(struct fuse_fs *fs, const char *path, int mode,
  1988. off_t offset, off_t length, struct fuse_file_info *fi)
  1989. {
  1990. fuse_get_context()->private_data = fs->user_data;
  1991. if (fs->op.fallocate) {
  1992. if (fs->debug)
  1993. fprintf(stderr, "fallocate %s mode %x, offset: %llu, length: %llu\n",
  1994. path,
  1995. mode,
  1996. (unsigned long long) offset,
  1997. (unsigned long long) length);
  1998. return fs->op.fallocate(path, mode, offset, length, fi);
  1999. } else
  2000. return -ENOSYS;
  2001. }
  2002. static int is_open(struct fuse *f, fuse_ino_t dir, const char *name)
  2003. {
  2004. struct node *node;
  2005. int isopen = 0;
  2006. pthread_mutex_lock(&f->lock);
  2007. node = lookup_node(f, dir, name);
  2008. if (node && node->open_count > 0)
  2009. isopen = 1;
  2010. pthread_mutex_unlock(&f->lock);
  2011. return isopen;
  2012. }
  2013. static char *hidden_name(struct fuse *f, fuse_ino_t dir, const char *oldname,
  2014. char *newname, size_t bufsize)
  2015. {
  2016. struct stat buf;
  2017. struct node *node;
  2018. struct node *newnode;
  2019. char *newpath;
  2020. int res;
  2021. int failctr = 10;
  2022. do {
  2023. pthread_mutex_lock(&f->lock);
  2024. node = lookup_node(f, dir, oldname);
  2025. if (node == NULL) {
  2026. pthread_mutex_unlock(&f->lock);
  2027. return NULL;
  2028. }
  2029. do {
  2030. f->hidectr ++;
  2031. snprintf(newname, bufsize, ".fuse_hidden%08x%08x",
  2032. (unsigned int) node->nodeid, f->hidectr);
  2033. newnode = lookup_node(f, dir, newname);
  2034. } while(newnode);
  2035. res = try_get_path(f, dir, newname, &newpath, NULL, false);
  2036. pthread_mutex_unlock(&f->lock);
  2037. if (res)
  2038. break;
  2039. memset(&buf, 0, sizeof(buf));
  2040. res = fuse_fs_getattr(f->fs, newpath, &buf);
  2041. if (res == -ENOENT)
  2042. break;
  2043. free(newpath);
  2044. newpath = NULL;
  2045. } while(res == 0 && --failctr);
  2046. return newpath;
  2047. }
  2048. static int hide_node(struct fuse *f, const char *oldpath,
  2049. fuse_ino_t dir, const char *oldname)
  2050. {
  2051. char newname[64];
  2052. char *newpath;
  2053. int err = -EBUSY;
  2054. newpath = hidden_name(f, dir, oldname, newname, sizeof(newname));
  2055. if (newpath) {
  2056. err = fuse_fs_rename(f->fs, oldpath, newpath);
  2057. if (!err)
  2058. err = rename_node(f, dir, oldname, dir, newname, 1);
  2059. free(newpath);
  2060. }
  2061. return err;
  2062. }
  2063. static int mtime_eq(const struct stat *stbuf, const struct timespec *ts)
  2064. {
  2065. return stbuf->st_mtime == ts->tv_sec &&
  2066. ST_MTIM_NSEC(stbuf) == ts->tv_nsec;
  2067. }
  2068. #ifndef CLOCK_MONOTONIC
  2069. #define CLOCK_MONOTONIC CLOCK_REALTIME
  2070. #endif
  2071. static void curr_time(struct timespec *now)
  2072. {
  2073. static clockid_t clockid = CLOCK_MONOTONIC;
  2074. int res = clock_gettime(clockid, now);
  2075. if (res == -1 && errno == EINVAL) {
  2076. clockid = CLOCK_REALTIME;
  2077. res = clock_gettime(clockid, now);
  2078. }
  2079. if (res == -1) {
  2080. perror("fuse: clock_gettime");
  2081. abort();
  2082. }
  2083. }
  2084. static void update_stat(struct node *node, const struct stat *stbuf)
  2085. {
  2086. if (node->cache_valid && (!mtime_eq(stbuf, &node->mtime) ||
  2087. stbuf->st_size != node->size))
  2088. node->cache_valid = 0;
  2089. node->mtime.tv_sec = stbuf->st_mtime;
  2090. node->mtime.tv_nsec = ST_MTIM_NSEC(stbuf);
  2091. node->size = stbuf->st_size;
  2092. curr_time(&node->stat_updated);
  2093. }
  2094. static int lookup_path(struct fuse *f, fuse_ino_t nodeid,
  2095. const char *name, const char *path,
  2096. struct fuse_entry_param *e, struct fuse_file_info *fi)
  2097. {
  2098. int res;
  2099. memset(e, 0, sizeof(struct fuse_entry_param));
  2100. if (fi)
  2101. res = fuse_fs_fgetattr(f->fs, path, &e->attr, fi);
  2102. else
  2103. res = fuse_fs_getattr(f->fs, path, &e->attr);
  2104. if (res == 0) {
  2105. struct node *node;
  2106. node = find_node(f, nodeid, name);
  2107. if (node == NULL)
  2108. res = -ENOMEM;
  2109. else {
  2110. e->ino = node->nodeid;
  2111. e->generation = node->generation;
  2112. e->entry_timeout = f->conf.entry_timeout;
  2113. e->attr_timeout = f->conf.attr_timeout;
  2114. if (f->conf.auto_cache) {
  2115. pthread_mutex_lock(&f->lock);
  2116. update_stat(node, &e->attr);
  2117. pthread_mutex_unlock(&f->lock);
  2118. }
  2119. set_stat(f, e->ino, &e->attr);
  2120. if (f->conf.debug)
  2121. fprintf(stderr, " NODEID: %lu\n",
  2122. (unsigned long) e->ino);
  2123. }
  2124. }
  2125. return res;
  2126. }
  2127. static struct fuse_context_i *fuse_get_context_internal(void)
  2128. {
  2129. struct fuse_context_i *c;
  2130. c = (struct fuse_context_i *) pthread_getspecific(fuse_context_key);
  2131. if (c == NULL) {
  2132. c = (struct fuse_context_i *)
  2133. calloc(1, sizeof(struct fuse_context_i));
  2134. if (c == NULL) {
  2135. /* This is hard to deal with properly, so just
  2136. abort. If memory is so low that the
  2137. context cannot be allocated, there's not
  2138. much hope for the filesystem anyway */
  2139. fprintf(stderr, "fuse: failed to allocate thread specific data\n");
  2140. abort();
  2141. }
  2142. pthread_setspecific(fuse_context_key, c);
  2143. }
  2144. return c;
  2145. }
  2146. static void fuse_freecontext(void *data)
  2147. {
  2148. free(data);
  2149. }
  2150. static int fuse_create_context_key(void)
  2151. {
  2152. int err = 0;
  2153. pthread_mutex_lock(&fuse_context_lock);
  2154. if (!fuse_context_ref) {
  2155. err = pthread_key_create(&fuse_context_key, fuse_freecontext);
  2156. if (err) {
  2157. fprintf(stderr, "fuse: failed to create thread specific key: %s\n",
  2158. strerror(err));
  2159. pthread_mutex_unlock(&fuse_context_lock);
  2160. return -1;
  2161. }
  2162. }
  2163. fuse_context_ref++;
  2164. pthread_mutex_unlock(&fuse_context_lock);
  2165. return 0;
  2166. }
  2167. static void fuse_delete_context_key(void)
  2168. {
  2169. pthread_mutex_lock(&fuse_context_lock);
  2170. fuse_context_ref--;
  2171. if (!fuse_context_ref) {
  2172. free(pthread_getspecific(fuse_context_key));
  2173. pthread_key_delete(fuse_context_key);
  2174. }
  2175. pthread_mutex_unlock(&fuse_context_lock);
  2176. }
  2177. static struct fuse *req_fuse_prepare(fuse_req_t req)
  2178. {
  2179. struct fuse_context_i *c = fuse_get_context_internal();
  2180. const struct fuse_ctx *ctx = fuse_req_ctx(req);
  2181. c->req = req;
  2182. c->ctx.fuse = req_fuse(req);
  2183. c->ctx.uid = ctx->uid;
  2184. c->ctx.gid = ctx->gid;
  2185. c->ctx.pid = ctx->pid;
  2186. c->ctx.umask = ctx->umask;
  2187. return c->ctx.fuse;
  2188. }
  2189. static inline void reply_err(fuse_req_t req, int err)
  2190. {
  2191. /* fuse_reply_err() uses non-negated errno values */
  2192. fuse_reply_err(req, -err);
  2193. }
  2194. static void reply_entry(fuse_req_t req, const struct fuse_entry_param *e,
  2195. int err)
  2196. {
  2197. if (!err) {
  2198. struct fuse *f = req_fuse(req);
  2199. if (fuse_reply_entry(req, e) == -ENOENT) {
  2200. /* Skip forget for negative result */
  2201. if (e->ino != 0)
  2202. forget_node(f, e->ino, 1);
  2203. }
  2204. } else
  2205. reply_err(req, err);
  2206. }
  2207. void fuse_fs_init(struct fuse_fs *fs, struct fuse_conn_info *conn)
  2208. {
  2209. fuse_get_context()->private_data = fs->user_data;
  2210. if (!fs->op.write_buf)
  2211. conn->want &= ~FUSE_CAP_SPLICE_READ;
  2212. if (!fs->op.lock)
  2213. conn->want &= ~FUSE_CAP_POSIX_LOCKS;
  2214. if (!fs->op.flock)
  2215. conn->want &= ~FUSE_CAP_FLOCK_LOCKS;
  2216. if (fs->op.init)
  2217. fs->user_data = fs->op.init(conn);
  2218. }
  2219. static void fuse_lib_init(void *data, struct fuse_conn_info *conn)
  2220. {
  2221. struct fuse *f = (struct fuse *) data;
  2222. struct fuse_context_i *c = fuse_get_context_internal();
  2223. memset(c, 0, sizeof(*c));
  2224. c->ctx.fuse = f;
  2225. conn->want |= FUSE_CAP_EXPORT_SUPPORT;
  2226. fuse_fs_init(f->fs, conn);
  2227. }
  2228. void fuse_fs_destroy(struct fuse_fs *fs)
  2229. {
  2230. fuse_get_context()->private_data = fs->user_data;
  2231. if (fs->op.destroy)
  2232. fs->op.destroy(fs->user_data);
  2233. if (fs->m)
  2234. fuse_put_module(fs->m);
  2235. free(fs);
  2236. }
  2237. static void fuse_lib_destroy(void *data)
  2238. {
  2239. struct fuse *f = (struct fuse *) data;
  2240. struct fuse_context_i *c = fuse_get_context_internal();
  2241. memset(c, 0, sizeof(*c));
  2242. c->ctx.fuse = f;
  2243. fuse_fs_destroy(f->fs);
  2244. f->fs = NULL;
  2245. }
  2246. static void fuse_lib_lookup(fuse_req_t req, fuse_ino_t parent,
  2247. const char *name)
  2248. {
  2249. struct fuse *f = req_fuse_prepare(req);
  2250. struct fuse_entry_param e;
  2251. char *path;
  2252. int err;
  2253. struct node *dot = NULL;
  2254. if (name[0] == '.') {
  2255. int len = strlen(name);
  2256. if (len == 1 || (name[1] == '.' && len == 2)) {
  2257. pthread_mutex_lock(&f->lock);
  2258. if (len == 1) {
  2259. if (f->conf.debug)
  2260. fprintf(stderr, "LOOKUP-DOT\n");
  2261. dot = get_node_nocheck(f, parent);
  2262. if (dot == NULL) {
  2263. pthread_mutex_unlock(&f->lock);
  2264. reply_entry(req, &e, -ESTALE);
  2265. return;
  2266. }
  2267. dot->refctr++;
  2268. } else {
  2269. if (f->conf.debug)
  2270. fprintf(stderr, "LOOKUP-DOTDOT\n");
  2271. parent = get_node(f, parent)->parent->nodeid;
  2272. }
  2273. pthread_mutex_unlock(&f->lock);
  2274. name = NULL;
  2275. }
  2276. }
  2277. err = get_path_name(f, parent, name, &path);
  2278. if (!err) {
  2279. struct fuse_intr_data d;
  2280. if (f->conf.debug)
  2281. fprintf(stderr, "LOOKUP %s\n", path);
  2282. fuse_prepare_interrupt(f, req, &d);
  2283. err = lookup_path(f, parent, name, path, &e, NULL);
  2284. if (err == -ENOENT && f->conf.negative_timeout != 0.0) {
  2285. e.ino = 0;
  2286. e.entry_timeout = f->conf.negative_timeout;
  2287. err = 0;
  2288. }
  2289. fuse_finish_interrupt(f, req, &d);
  2290. free_path(f, parent, path);
  2291. }
  2292. if (dot) {
  2293. pthread_mutex_lock(&f->lock);
  2294. unref_node(f, dot);
  2295. pthread_mutex_unlock(&f->lock);
  2296. }
  2297. reply_entry(req, &e, err);
  2298. }
  2299. static void do_forget(struct fuse *f, fuse_ino_t ino, uint64_t nlookup)
  2300. {
  2301. if (f->conf.debug)
  2302. fprintf(stderr, "FORGET %llu/%llu\n", (unsigned long long)ino,
  2303. (unsigned long long) nlookup);
  2304. forget_node(f, ino, nlookup);
  2305. }
  2306. static void fuse_lib_forget(fuse_req_t req, fuse_ino_t ino,
  2307. unsigned long nlookup)
  2308. {
  2309. do_forget(req_fuse(req), ino, nlookup);
  2310. fuse_reply_none(req);
  2311. }
  2312. static void fuse_lib_forget_multi(fuse_req_t req, size_t count,
  2313. struct fuse_forget_data *forgets)
  2314. {
  2315. struct fuse *f = req_fuse(req);
  2316. size_t i;
  2317. for (i = 0; i < count; i++)
  2318. do_forget(f, forgets[i].ino, forgets[i].nlookup);
  2319. fuse_reply_none(req);
  2320. }
  2321. static void fuse_lib_getattr(fuse_req_t req, fuse_ino_t ino,
  2322. struct fuse_file_info *fi)
  2323. {
  2324. struct fuse *f = req_fuse_prepare(req);
  2325. struct stat buf;
  2326. char *path;
  2327. int err;
  2328. memset(&buf, 0, sizeof(buf));
  2329. if (fi != NULL && f->fs->op.fgetattr)
  2330. err = get_path_nullok(f, ino, &path);
  2331. else
  2332. err = get_path(f, ino, &path);
  2333. if (!err) {
  2334. struct fuse_intr_data d;
  2335. fuse_prepare_interrupt(f, req, &d);
  2336. if (fi)
  2337. err = fuse_fs_fgetattr(f->fs, path, &buf, fi);
  2338. else
  2339. err = fuse_fs_getattr(f->fs, path, &buf);
  2340. fuse_finish_interrupt(f, req, &d);
  2341. free_path(f, ino, path);
  2342. }
  2343. if (!err) {
  2344. struct node *node;
  2345. pthread_mutex_lock(&f->lock);
  2346. node = get_node(f, ino);
  2347. if (node->is_hidden && buf.st_nlink > 0)
  2348. buf.st_nlink--;
  2349. if (f->conf.auto_cache)
  2350. update_stat(node, &buf);
  2351. pthread_mutex_unlock(&f->lock);
  2352. set_stat(f, ino, &buf);
  2353. fuse_reply_attr(req, &buf, f->conf.attr_timeout);
  2354. } else
  2355. reply_err(req, err);
  2356. }
  2357. int fuse_fs_chmod(struct fuse_fs *fs, const char *path, mode_t mode)
  2358. {
  2359. fuse_get_context()->private_data = fs->user_data;
  2360. if (fs->op.chmod)
  2361. return fs->op.chmod(path, mode);
  2362. else
  2363. return -ENOSYS;
  2364. }
  2365. static void fuse_lib_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr,
  2366. int valid, struct fuse_file_info *fi)
  2367. {
  2368. struct fuse *f = req_fuse_prepare(req);
  2369. struct stat buf;
  2370. char *path;
  2371. int err;
  2372. memset(&buf, 0, sizeof(buf));
  2373. if (valid == FUSE_SET_ATTR_SIZE && fi != NULL &&
  2374. f->fs->op.ftruncate && f->fs->op.fgetattr)
  2375. err = get_path_nullok(f, ino, &path);
  2376. else
  2377. err = get_path(f, ino, &path);
  2378. if (!err) {
  2379. struct fuse_intr_data d;
  2380. fuse_prepare_interrupt(f, req, &d);
  2381. err = 0;
  2382. if (!err && (valid & FUSE_SET_ATTR_MODE))
  2383. err = fuse_fs_chmod(f->fs, path, attr->st_mode);
  2384. if (!err && (valid & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID))) {
  2385. uid_t uid = (valid & FUSE_SET_ATTR_UID) ?
  2386. attr->st_uid : (uid_t) -1;
  2387. gid_t gid = (valid & FUSE_SET_ATTR_GID) ?
  2388. attr->st_gid : (gid_t) -1;
  2389. err = fuse_fs_chown(f->fs, path, uid, gid);
  2390. }
  2391. if (!err && (valid & FUSE_SET_ATTR_SIZE)) {
  2392. if (fi)
  2393. err = fuse_fs_ftruncate(f->fs, path,
  2394. attr->st_size, fi);
  2395. else
  2396. err = fuse_fs_truncate(f->fs, path,
  2397. attr->st_size);
  2398. }
  2399. #ifdef HAVE_UTIMENSAT
  2400. if (!err && f->utime_omit_ok &&
  2401. (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME))) {
  2402. struct timespec tv[2];
  2403. tv[0].tv_sec = 0;
  2404. tv[1].tv_sec = 0;
  2405. tv[0].tv_nsec = UTIME_OMIT;
  2406. tv[1].tv_nsec = UTIME_OMIT;
  2407. if (valid & FUSE_SET_ATTR_ATIME_NOW)
  2408. tv[0].tv_nsec = UTIME_NOW;
  2409. else if (valid & FUSE_SET_ATTR_ATIME)
  2410. tv[0] = attr->st_atim;
  2411. if (valid & FUSE_SET_ATTR_MTIME_NOW)
  2412. tv[1].tv_nsec = UTIME_NOW;
  2413. else if (valid & FUSE_SET_ATTR_MTIME)
  2414. tv[1] = attr->st_mtim;
  2415. err = fuse_fs_utimens(f->fs, path, tv);
  2416. } else
  2417. #endif
  2418. if (!err &&
  2419. (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) ==
  2420. (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) {
  2421. struct timespec tv[2];
  2422. tv[0].tv_sec = attr->st_atime;
  2423. tv[0].tv_nsec = ST_ATIM_NSEC(attr);
  2424. tv[1].tv_sec = attr->st_mtime;
  2425. tv[1].tv_nsec = ST_MTIM_NSEC(attr);
  2426. err = fuse_fs_utimens(f->fs, path, tv);
  2427. }
  2428. if (!err) {
  2429. if (fi)
  2430. err = fuse_fs_fgetattr(f->fs, path, &buf, fi);
  2431. else
  2432. err = fuse_fs_getattr(f->fs, path, &buf);
  2433. }
  2434. fuse_finish_interrupt(f, req, &d);
  2435. free_path(f, ino, path);
  2436. }
  2437. if (!err) {
  2438. if (f->conf.auto_cache) {
  2439. pthread_mutex_lock(&f->lock);
  2440. update_stat(get_node(f, ino), &buf);
  2441. pthread_mutex_unlock(&f->lock);
  2442. }
  2443. set_stat(f, ino, &buf);
  2444. fuse_reply_attr(req, &buf, f->conf.attr_timeout);
  2445. } else
  2446. reply_err(req, err);
  2447. }
  2448. static void fuse_lib_access(fuse_req_t req, fuse_ino_t ino, int mask)
  2449. {
  2450. struct fuse *f = req_fuse_prepare(req);
  2451. char *path;
  2452. int err;
  2453. err = get_path(f, ino, &path);
  2454. if (!err) {
  2455. struct fuse_intr_data d;
  2456. fuse_prepare_interrupt(f, req, &d);
  2457. err = fuse_fs_access(f->fs, path, mask);
  2458. fuse_finish_interrupt(f, req, &d);
  2459. free_path(f, ino, path);
  2460. }
  2461. reply_err(req, err);
  2462. }
  2463. static void fuse_lib_readlink(fuse_req_t req, fuse_ino_t ino)
  2464. {
  2465. struct fuse *f = req_fuse_prepare(req);
  2466. char linkname[PATH_MAX + 1];
  2467. char *path;
  2468. int err;
  2469. err = get_path(f, ino, &path);
  2470. if (!err) {
  2471. struct fuse_intr_data d;
  2472. fuse_prepare_interrupt(f, req, &d);
  2473. err = fuse_fs_readlink(f->fs, path, linkname, sizeof(linkname));
  2474. fuse_finish_interrupt(f, req, &d);
  2475. free_path(f, ino, path);
  2476. }
  2477. if (!err) {
  2478. linkname[PATH_MAX] = '\0';
  2479. fuse_reply_readlink(req, linkname);
  2480. } else
  2481. reply_err(req, err);
  2482. }
  2483. static void fuse_lib_mknod(fuse_req_t req, fuse_ino_t parent, const char *name,
  2484. mode_t mode, dev_t rdev)
  2485. {
  2486. struct fuse *f = req_fuse_prepare(req);
  2487. struct fuse_entry_param e;
  2488. char *path;
  2489. int err;
  2490. err = get_path_name(f, parent, name, &path);
  2491. if (!err) {
  2492. struct fuse_intr_data d;
  2493. fuse_prepare_interrupt(f, req, &d);
  2494. err = -ENOSYS;
  2495. if (S_ISREG(mode)) {
  2496. struct fuse_file_info fi;
  2497. memset(&fi, 0, sizeof(fi));
  2498. fi.flags = O_CREAT | O_EXCL | O_WRONLY;
  2499. err = fuse_fs_create(f->fs, path, mode, &fi);
  2500. if (!err) {
  2501. err = lookup_path(f, parent, name, path, &e,
  2502. &fi);
  2503. fuse_fs_release(f->fs, path, &fi);
  2504. }
  2505. }
  2506. if (err == -ENOSYS) {
  2507. err = fuse_fs_mknod(f->fs, path, mode, rdev);
  2508. if (!err)
  2509. err = lookup_path(f, parent, name, path, &e,
  2510. NULL);
  2511. }
  2512. fuse_finish_interrupt(f, req, &d);
  2513. free_path(f, parent, path);
  2514. }
  2515. reply_entry(req, &e, err);
  2516. }
  2517. static void fuse_lib_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name,
  2518. mode_t mode)
  2519. {
  2520. struct fuse *f = req_fuse_prepare(req);
  2521. struct fuse_entry_param e;
  2522. char *path;
  2523. int err;
  2524. err = get_path_name(f, parent, name, &path);
  2525. if (!err) {
  2526. struct fuse_intr_data d;
  2527. fuse_prepare_interrupt(f, req, &d);
  2528. err = fuse_fs_mkdir(f->fs, path, mode);
  2529. if (!err)
  2530. err = lookup_path(f, parent, name, path, &e, NULL);
  2531. fuse_finish_interrupt(f, req, &d);
  2532. free_path(f, parent, path);
  2533. }
  2534. reply_entry(req, &e, err);
  2535. }
  2536. static void fuse_lib_unlink(fuse_req_t req, fuse_ino_t parent,
  2537. const char *name)
  2538. {
  2539. struct fuse *f = req_fuse_prepare(req);
  2540. struct node *wnode;
  2541. char *path;
  2542. int err;
  2543. err = get_path_wrlock(f, parent, name, &path, &wnode);
  2544. if (!err) {
  2545. struct fuse_intr_data d;
  2546. fuse_prepare_interrupt(f, req, &d);
  2547. if (!f->conf.hard_remove && is_open(f, parent, name)) {
  2548. err = hide_node(f, path, parent, name);
  2549. } else {
  2550. err = fuse_fs_unlink(f->fs, path);
  2551. if (!err)
  2552. remove_node(f, parent, name);
  2553. }
  2554. fuse_finish_interrupt(f, req, &d);
  2555. free_path_wrlock(f, parent, wnode, path);
  2556. }
  2557. reply_err(req, err);
  2558. }
  2559. static void fuse_lib_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name)
  2560. {
  2561. struct fuse *f = req_fuse_prepare(req);
  2562. struct node *wnode;
  2563. char *path;
  2564. int err;
  2565. err = get_path_wrlock(f, parent, name, &path, &wnode);
  2566. if (!err) {
  2567. struct fuse_intr_data d;
  2568. fuse_prepare_interrupt(f, req, &d);
  2569. err = fuse_fs_rmdir(f->fs, path);
  2570. fuse_finish_interrupt(f, req, &d);
  2571. if (!err)
  2572. remove_node(f, parent, name);
  2573. free_path_wrlock(f, parent, wnode, path);
  2574. }
  2575. reply_err(req, err);
  2576. }
  2577. static void fuse_lib_symlink(fuse_req_t req, const char *linkname,
  2578. fuse_ino_t parent, const char *name)
  2579. {
  2580. struct fuse *f = req_fuse_prepare(req);
  2581. struct fuse_entry_param e;
  2582. char *path;
  2583. int err;
  2584. err = get_path_name(f, parent, name, &path);
  2585. if (!err) {
  2586. struct fuse_intr_data d;
  2587. fuse_prepare_interrupt(f, req, &d);
  2588. err = fuse_fs_symlink(f->fs, linkname, path);
  2589. if (!err)
  2590. err = lookup_path(f, parent, name, path, &e, NULL);
  2591. fuse_finish_interrupt(f, req, &d);
  2592. free_path(f, parent, path);
  2593. }
  2594. reply_entry(req, &e, err);
  2595. }
  2596. static void fuse_lib_rename(fuse_req_t req, fuse_ino_t olddir,
  2597. const char *oldname, fuse_ino_t newdir,
  2598. const char *newname)
  2599. {
  2600. struct fuse *f = req_fuse_prepare(req);
  2601. char *oldpath;
  2602. char *newpath;
  2603. struct node *wnode1;
  2604. struct node *wnode2;
  2605. int err;
  2606. err = get_path2(f, olddir, oldname, newdir, newname,
  2607. &oldpath, &newpath, &wnode1, &wnode2);
  2608. if (!err) {
  2609. struct fuse_intr_data d;
  2610. err = 0;
  2611. fuse_prepare_interrupt(f, req, &d);
  2612. if (!f->conf.hard_remove && is_open(f, newdir, newname))
  2613. err = hide_node(f, newpath, newdir, newname);
  2614. if (!err) {
  2615. err = fuse_fs_rename(f->fs, oldpath, newpath);
  2616. if (!err)
  2617. err = rename_node(f, olddir, oldname, newdir,
  2618. newname, 0);
  2619. }
  2620. fuse_finish_interrupt(f, req, &d);
  2621. free_path2(f, olddir, newdir, wnode1, wnode2, oldpath, newpath);
  2622. }
  2623. reply_err(req, err);
  2624. }
  2625. static void fuse_lib_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent,
  2626. const char *newname)
  2627. {
  2628. struct fuse *f = req_fuse_prepare(req);
  2629. struct fuse_entry_param e;
  2630. char *oldpath;
  2631. char *newpath;
  2632. int err;
  2633. err = get_path2(f, ino, NULL, newparent, newname,
  2634. &oldpath, &newpath, NULL, NULL);
  2635. if (!err) {
  2636. struct fuse_intr_data d;
  2637. fuse_prepare_interrupt(f, req, &d);
  2638. err = fuse_fs_link(f->fs, oldpath, newpath);
  2639. if (!err)
  2640. err = lookup_path(f, newparent, newname, newpath,
  2641. &e, NULL);
  2642. fuse_finish_interrupt(f, req, &d);
  2643. free_path2(f, ino, newparent, NULL, NULL, oldpath, newpath);
  2644. }
  2645. reply_entry(req, &e, err);
  2646. }
  2647. static void fuse_do_release(struct fuse *f, fuse_ino_t ino, const char *path,
  2648. struct fuse_file_info *fi)
  2649. {
  2650. struct node *node;
  2651. int unlink_hidden = 0;
  2652. const char *compatpath;
  2653. if (path != NULL || f->nullpath_ok || f->conf.nopath)
  2654. compatpath = path;
  2655. else
  2656. compatpath = "-";
  2657. fuse_fs_release(f->fs, compatpath, fi);
  2658. pthread_mutex_lock(&f->lock);
  2659. node = get_node(f, ino);
  2660. assert(node->open_count > 0);
  2661. --node->open_count;
  2662. if (node->is_hidden && !node->open_count) {
  2663. unlink_hidden = 1;
  2664. node->is_hidden = 0;
  2665. }
  2666. pthread_mutex_unlock(&f->lock);
  2667. if(unlink_hidden) {
  2668. if (path) {
  2669. fuse_fs_unlink(f->fs, path);
  2670. } else if (f->conf.nopath) {
  2671. char *unlinkpath;
  2672. if (get_path(f, ino, &unlinkpath) == 0)
  2673. fuse_fs_unlink(f->fs, unlinkpath);
  2674. free_path(f, ino, unlinkpath);
  2675. }
  2676. }
  2677. }
  2678. static void fuse_lib_create(fuse_req_t req, fuse_ino_t parent,
  2679. const char *name, mode_t mode,
  2680. struct fuse_file_info *fi)
  2681. {
  2682. struct fuse *f = req_fuse_prepare(req);
  2683. struct fuse_intr_data d;
  2684. struct fuse_entry_param e;
  2685. char *path;
  2686. int err;
  2687. err = get_path_name(f, parent, name, &path);
  2688. if (!err) {
  2689. fuse_prepare_interrupt(f, req, &d);
  2690. err = fuse_fs_create(f->fs, path, mode, fi);
  2691. if (!err) {
  2692. err = lookup_path(f, parent, name, path, &e, fi);
  2693. if (err)
  2694. fuse_fs_release(f->fs, path, fi);
  2695. else if (!S_ISREG(e.attr.st_mode)) {
  2696. err = -EIO;
  2697. fuse_fs_release(f->fs, path, fi);
  2698. forget_node(f, e.ino, 1);
  2699. } else {
  2700. if (f->conf.direct_io)
  2701. fi->direct_io = 1;
  2702. if (f->conf.kernel_cache)
  2703. fi->keep_cache = 1;
  2704. }
  2705. }
  2706. fuse_finish_interrupt(f, req, &d);
  2707. }
  2708. if (!err) {
  2709. pthread_mutex_lock(&f->lock);
  2710. get_node(f, e.ino)->open_count++;
  2711. pthread_mutex_unlock(&f->lock);
  2712. if (fuse_reply_create(req, &e, fi) == -ENOENT) {
  2713. /* The open syscall was interrupted, so it
  2714. must be cancelled */
  2715. fuse_do_release(f, e.ino, path, fi);
  2716. forget_node(f, e.ino, 1);
  2717. }
  2718. } else {
  2719. reply_err(req, err);
  2720. }
  2721. free_path(f, parent, path);
  2722. }
  2723. static double diff_timespec(const struct timespec *t1,
  2724. const struct timespec *t2)
  2725. {
  2726. return (t1->tv_sec - t2->tv_sec) +
  2727. ((double) t1->tv_nsec - (double) t2->tv_nsec) / 1000000000.0;
  2728. }
  2729. static void open_auto_cache(struct fuse *f, fuse_ino_t ino, const char *path,
  2730. struct fuse_file_info *fi)
  2731. {
  2732. struct node *node;
  2733. pthread_mutex_lock(&f->lock);
  2734. node = get_node(f, ino);
  2735. if (node->cache_valid) {
  2736. struct timespec now;
  2737. curr_time(&now);
  2738. if (diff_timespec(&now, &node->stat_updated) >
  2739. f->conf.ac_attr_timeout) {
  2740. struct stat stbuf;
  2741. int err;
  2742. pthread_mutex_unlock(&f->lock);
  2743. err = fuse_fs_fgetattr(f->fs, path, &stbuf, fi);
  2744. pthread_mutex_lock(&f->lock);
  2745. if (!err)
  2746. update_stat(node, &stbuf);
  2747. else
  2748. node->cache_valid = 0;
  2749. }
  2750. }
  2751. if (node->cache_valid)
  2752. fi->keep_cache = 1;
  2753. node->cache_valid = 1;
  2754. pthread_mutex_unlock(&f->lock);
  2755. }
  2756. static void fuse_lib_open(fuse_req_t req, fuse_ino_t ino,
  2757. struct fuse_file_info *fi)
  2758. {
  2759. struct fuse *f = req_fuse_prepare(req);
  2760. struct fuse_intr_data d;
  2761. char *path;
  2762. int err;
  2763. err = get_path(f, ino, &path);
  2764. if (!err) {
  2765. fuse_prepare_interrupt(f, req, &d);
  2766. err = fuse_fs_open(f->fs, path, fi);
  2767. if (!err) {
  2768. if (f->conf.direct_io)
  2769. fi->direct_io = 1;
  2770. if (f->conf.kernel_cache)
  2771. fi->keep_cache = 1;
  2772. if (f->conf.auto_cache)
  2773. open_auto_cache(f, ino, path, fi);
  2774. }
  2775. fuse_finish_interrupt(f, req, &d);
  2776. }
  2777. if (!err) {
  2778. pthread_mutex_lock(&f->lock);
  2779. get_node(f, ino)->open_count++;
  2780. pthread_mutex_unlock(&f->lock);
  2781. if (fuse_reply_open(req, fi) == -ENOENT) {
  2782. /* The open syscall was interrupted, so it
  2783. must be cancelled */
  2784. fuse_do_release(f, ino, path, fi);
  2785. }
  2786. } else
  2787. reply_err(req, err);
  2788. free_path(f, ino, path);
  2789. }
  2790. static void fuse_lib_read(fuse_req_t req, fuse_ino_t ino, size_t size,
  2791. off_t off, struct fuse_file_info *fi)
  2792. {
  2793. struct fuse *f = req_fuse_prepare(req);
  2794. struct fuse_bufvec *buf = NULL;
  2795. char *path;
  2796. int res;
  2797. res = get_path_nullok(f, ino, &path);
  2798. if (res == 0) {
  2799. struct fuse_intr_data d;
  2800. fuse_prepare_interrupt(f, req, &d);
  2801. res = fuse_fs_read_buf(f->fs, path, &buf, size, off, fi);
  2802. fuse_finish_interrupt(f, req, &d);
  2803. free_path(f, ino, path);
  2804. }
  2805. if (res == 0)
  2806. fuse_reply_data(req, buf, FUSE_BUF_SPLICE_MOVE);
  2807. else
  2808. reply_err(req, res);
  2809. fuse_free_buf(buf);
  2810. }
  2811. static void fuse_lib_write_buf(fuse_req_t req, fuse_ino_t ino,
  2812. struct fuse_bufvec *buf, off_t off,
  2813. struct fuse_file_info *fi)
  2814. {
  2815. struct fuse *f = req_fuse_prepare(req);
  2816. char *path;
  2817. int res;
  2818. res = get_path_nullok(f, ino, &path);
  2819. if (res == 0) {
  2820. struct fuse_intr_data d;
  2821. fuse_prepare_interrupt(f, req, &d);
  2822. res = fuse_fs_write_buf(f->fs, path, buf, off, fi);
  2823. fuse_finish_interrupt(f, req, &d);
  2824. free_path(f, ino, path);
  2825. }
  2826. if (res >= 0)
  2827. fuse_reply_write(req, res);
  2828. else
  2829. reply_err(req, res);
  2830. }
  2831. static void fuse_lib_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
  2832. struct fuse_file_info *fi)
  2833. {
  2834. struct fuse *f = req_fuse_prepare(req);
  2835. char *path;
  2836. int err;
  2837. err = get_path_nullok(f, ino, &path);
  2838. if (!err) {
  2839. struct fuse_intr_data d;
  2840. fuse_prepare_interrupt(f, req, &d);
  2841. err = fuse_fs_fsync(f->fs, path, datasync, fi);
  2842. fuse_finish_interrupt(f, req, &d);
  2843. free_path(f, ino, path);
  2844. }
  2845. reply_err(req, err);
  2846. }
  2847. static struct fuse_dh *get_dirhandle(const struct fuse_file_info *llfi,
  2848. struct fuse_file_info *fi)
  2849. {
  2850. struct fuse_dh *dh = (struct fuse_dh *) (uintptr_t) llfi->fh;
  2851. memset(fi, 0, sizeof(struct fuse_file_info));
  2852. fi->fh = dh->fh;
  2853. fi->fh_old = dh->fh;
  2854. return dh;
  2855. }
  2856. static void fuse_lib_opendir(fuse_req_t req, fuse_ino_t ino,
  2857. struct fuse_file_info *llfi)
  2858. {
  2859. struct fuse *f = req_fuse_prepare(req);
  2860. struct fuse_intr_data d;
  2861. struct fuse_dh *dh;
  2862. struct fuse_file_info fi;
  2863. char *path;
  2864. int err;
  2865. dh = (struct fuse_dh *) malloc(sizeof(struct fuse_dh));
  2866. if (dh == NULL) {
  2867. reply_err(req, -ENOMEM);
  2868. return;
  2869. }
  2870. memset(dh, 0, sizeof(struct fuse_dh));
  2871. dh->fuse = f;
  2872. dh->contents = NULL;
  2873. dh->len = 0;
  2874. dh->filled = 0;
  2875. dh->nodeid = ino;
  2876. fuse_mutex_init(&dh->lock);
  2877. llfi->fh = (uintptr_t) dh;
  2878. memset(&fi, 0, sizeof(fi));
  2879. fi.flags = llfi->flags;
  2880. err = get_path(f, ino, &path);
  2881. if (!err) {
  2882. fuse_prepare_interrupt(f, req, &d);
  2883. err = fuse_fs_opendir(f->fs, path, &fi);
  2884. fuse_finish_interrupt(f, req, &d);
  2885. dh->fh = fi.fh;
  2886. }
  2887. if (!err) {
  2888. if (fuse_reply_open(req, llfi) == -ENOENT) {
  2889. /* The opendir syscall was interrupted, so it
  2890. must be cancelled */
  2891. fuse_fs_releasedir(f->fs, path, &fi);
  2892. pthread_mutex_destroy(&dh->lock);
  2893. free(dh);
  2894. }
  2895. } else {
  2896. reply_err(req, err);
  2897. pthread_mutex_destroy(&dh->lock);
  2898. free(dh);
  2899. }
  2900. free_path(f, ino, path);
  2901. }
  2902. static int extend_contents(struct fuse_dh *dh, unsigned minsize)
  2903. {
  2904. if (minsize > dh->size) {
  2905. char *newptr;
  2906. unsigned newsize = dh->size;
  2907. if (!newsize)
  2908. newsize = 1024;
  2909. while (newsize < minsize) {
  2910. if (newsize >= 0x80000000)
  2911. newsize = 0xffffffff;
  2912. else
  2913. newsize *= 2;
  2914. }
  2915. newptr = (char *) realloc(dh->contents, newsize);
  2916. if (!newptr) {
  2917. dh->error = -ENOMEM;
  2918. return -1;
  2919. }
  2920. dh->contents = newptr;
  2921. dh->size = newsize;
  2922. }
  2923. return 0;
  2924. }
  2925. static int fill_dir(void *dh_, const char *name, const struct stat *statp,
  2926. off_t off)
  2927. {
  2928. struct fuse_dh *dh = (struct fuse_dh *) dh_;
  2929. struct stat stbuf;
  2930. size_t newlen;
  2931. if (statp)
  2932. stbuf = *statp;
  2933. else {
  2934. memset(&stbuf, 0, sizeof(stbuf));
  2935. stbuf.st_ino = FUSE_UNKNOWN_INO;
  2936. }
  2937. if (!dh->fuse->conf.use_ino) {
  2938. stbuf.st_ino = FUSE_UNKNOWN_INO;
  2939. if (dh->fuse->conf.readdir_ino) {
  2940. struct node *node;
  2941. pthread_mutex_lock(&dh->fuse->lock);
  2942. node = lookup_node(dh->fuse, dh->nodeid, name);
  2943. if (node)
  2944. stbuf.st_ino = (ino_t) node->nodeid;
  2945. pthread_mutex_unlock(&dh->fuse->lock);
  2946. }
  2947. }
  2948. if (off) {
  2949. if (extend_contents(dh, dh->needlen) == -1)
  2950. return 1;
  2951. dh->filled = 0;
  2952. newlen = dh->len +
  2953. fuse_add_direntry(dh->req, dh->contents + dh->len,
  2954. dh->needlen - dh->len, name,
  2955. &stbuf, off);
  2956. if (newlen > dh->needlen)
  2957. return 1;
  2958. } else {
  2959. newlen = dh->len +
  2960. fuse_add_direntry(dh->req, NULL, 0, name, NULL, 0);
  2961. if (extend_contents(dh, newlen) == -1)
  2962. return 1;
  2963. fuse_add_direntry(dh->req, dh->contents + dh->len,
  2964. dh->size - dh->len, name, &stbuf, newlen);
  2965. }
  2966. dh->len = newlen;
  2967. return 0;
  2968. }
  2969. static int readdir_fill(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  2970. size_t size, off_t off, struct fuse_dh *dh,
  2971. struct fuse_file_info *fi)
  2972. {
  2973. char *path;
  2974. int err;
  2975. if (f->fs->op.readdir)
  2976. err = get_path_nullok(f, ino, &path);
  2977. else
  2978. err = get_path(f, ino, &path);
  2979. if (!err) {
  2980. struct fuse_intr_data d;
  2981. dh->len = 0;
  2982. dh->error = 0;
  2983. dh->needlen = size;
  2984. dh->filled = 1;
  2985. dh->req = req;
  2986. fuse_prepare_interrupt(f, req, &d);
  2987. err = fuse_fs_readdir(f->fs, path, dh, fill_dir, off, fi);
  2988. fuse_finish_interrupt(f, req, &d);
  2989. dh->req = NULL;
  2990. if (!err)
  2991. err = dh->error;
  2992. if (err)
  2993. dh->filled = 0;
  2994. free_path(f, ino, path);
  2995. }
  2996. return err;
  2997. }
  2998. static void fuse_lib_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
  2999. off_t off, struct fuse_file_info *llfi)
  3000. {
  3001. struct fuse *f = req_fuse_prepare(req);
  3002. struct fuse_file_info fi;
  3003. struct fuse_dh *dh = get_dirhandle(llfi, &fi);
  3004. pthread_mutex_lock(&dh->lock);
  3005. /* According to SUS, directory contents need to be refreshed on
  3006. rewinddir() */
  3007. if (!off)
  3008. dh->filled = 0;
  3009. if (!dh->filled) {
  3010. int err = readdir_fill(f, req, ino, size, off, dh, &fi);
  3011. if (err) {
  3012. reply_err(req, err);
  3013. goto out;
  3014. }
  3015. }
  3016. if (dh->filled) {
  3017. if (off < dh->len) {
  3018. if (off + size > dh->len)
  3019. size = dh->len - off;
  3020. } else
  3021. size = 0;
  3022. } else {
  3023. size = dh->len;
  3024. off = 0;
  3025. }
  3026. fuse_reply_buf(req, dh->contents + off, size);
  3027. out:
  3028. pthread_mutex_unlock(&dh->lock);
  3029. }
  3030. static void fuse_lib_releasedir(fuse_req_t req, fuse_ino_t ino,
  3031. struct fuse_file_info *llfi)
  3032. {
  3033. struct fuse *f = req_fuse_prepare(req);
  3034. struct fuse_intr_data d;
  3035. struct fuse_file_info fi;
  3036. struct fuse_dh *dh = get_dirhandle(llfi, &fi);
  3037. char *path;
  3038. const char *compatpath;
  3039. get_path_nullok(f, ino, &path);
  3040. if (path != NULL || f->nullpath_ok || f->conf.nopath)
  3041. compatpath = path;
  3042. else
  3043. compatpath = "-";
  3044. fuse_prepare_interrupt(f, req, &d);
  3045. fuse_fs_releasedir(f->fs, compatpath, &fi);
  3046. fuse_finish_interrupt(f, req, &d);
  3047. free_path(f, ino, path);
  3048. pthread_mutex_lock(&dh->lock);
  3049. pthread_mutex_unlock(&dh->lock);
  3050. pthread_mutex_destroy(&dh->lock);
  3051. free(dh->contents);
  3052. free(dh);
  3053. reply_err(req, 0);
  3054. }
  3055. static void fuse_lib_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync,
  3056. struct fuse_file_info *llfi)
  3057. {
  3058. struct fuse *f = req_fuse_prepare(req);
  3059. struct fuse_file_info fi;
  3060. char *path;
  3061. int err;
  3062. get_dirhandle(llfi, &fi);
  3063. err = get_path_nullok(f, ino, &path);
  3064. if (!err) {
  3065. struct fuse_intr_data d;
  3066. fuse_prepare_interrupt(f, req, &d);
  3067. err = fuse_fs_fsyncdir(f->fs, path, datasync, &fi);
  3068. fuse_finish_interrupt(f, req, &d);
  3069. free_path(f, ino, path);
  3070. }
  3071. reply_err(req, err);
  3072. }
  3073. static void fuse_lib_statfs(fuse_req_t req, fuse_ino_t ino)
  3074. {
  3075. struct fuse *f = req_fuse_prepare(req);
  3076. struct statvfs buf;
  3077. char *path = NULL;
  3078. int err = 0;
  3079. memset(&buf, 0, sizeof(buf));
  3080. if (ino)
  3081. err = get_path(f, ino, &path);
  3082. if (!err) {
  3083. struct fuse_intr_data d;
  3084. fuse_prepare_interrupt(f, req, &d);
  3085. err = fuse_fs_statfs(f->fs, path ? path : "/", &buf);
  3086. fuse_finish_interrupt(f, req, &d);
  3087. free_path(f, ino, path);
  3088. }
  3089. if (!err)
  3090. fuse_reply_statfs(req, &buf);
  3091. else
  3092. reply_err(req, err);
  3093. }
  3094. static void fuse_lib_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  3095. const char *value, size_t size, int flags)
  3096. {
  3097. struct fuse *f = req_fuse_prepare(req);
  3098. char *path;
  3099. int err;
  3100. err = get_path(f, ino, &path);
  3101. if (!err) {
  3102. struct fuse_intr_data d;
  3103. fuse_prepare_interrupt(f, req, &d);
  3104. err = fuse_fs_setxattr(f->fs, path, name, value, size, flags);
  3105. fuse_finish_interrupt(f, req, &d);
  3106. free_path(f, ino, path);
  3107. }
  3108. reply_err(req, err);
  3109. }
  3110. static int common_getxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3111. const char *name, char *value, size_t size)
  3112. {
  3113. int err;
  3114. char *path;
  3115. err = get_path(f, ino, &path);
  3116. if (!err) {
  3117. struct fuse_intr_data d;
  3118. fuse_prepare_interrupt(f, req, &d);
  3119. err = fuse_fs_getxattr(f->fs, path, name, value, size);
  3120. fuse_finish_interrupt(f, req, &d);
  3121. free_path(f, ino, path);
  3122. }
  3123. return err;
  3124. }
  3125. static void fuse_lib_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  3126. size_t size)
  3127. {
  3128. struct fuse *f = req_fuse_prepare(req);
  3129. int res;
  3130. if (size) {
  3131. char *value = (char *) malloc(size);
  3132. if (value == NULL) {
  3133. reply_err(req, -ENOMEM);
  3134. return;
  3135. }
  3136. res = common_getxattr(f, req, ino, name, value, size);
  3137. if (res > 0)
  3138. fuse_reply_buf(req, value, res);
  3139. else
  3140. reply_err(req, res);
  3141. free(value);
  3142. } else {
  3143. res = common_getxattr(f, req, ino, name, NULL, 0);
  3144. if (res >= 0)
  3145. fuse_reply_xattr(req, res);
  3146. else
  3147. reply_err(req, res);
  3148. }
  3149. }
  3150. static int common_listxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3151. char *list, size_t size)
  3152. {
  3153. char *path;
  3154. int err;
  3155. err = get_path(f, ino, &path);
  3156. if (!err) {
  3157. struct fuse_intr_data d;
  3158. fuse_prepare_interrupt(f, req, &d);
  3159. err = fuse_fs_listxattr(f->fs, path, list, size);
  3160. fuse_finish_interrupt(f, req, &d);
  3161. free_path(f, ino, path);
  3162. }
  3163. return err;
  3164. }
  3165. static void fuse_lib_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
  3166. {
  3167. struct fuse *f = req_fuse_prepare(req);
  3168. int res;
  3169. if (size) {
  3170. char *list = (char *) malloc(size);
  3171. if (list == NULL) {
  3172. reply_err(req, -ENOMEM);
  3173. return;
  3174. }
  3175. res = common_listxattr(f, req, ino, list, size);
  3176. if (res > 0)
  3177. fuse_reply_buf(req, list, res);
  3178. else
  3179. reply_err(req, res);
  3180. free(list);
  3181. } else {
  3182. res = common_listxattr(f, req, ino, NULL, 0);
  3183. if (res >= 0)
  3184. fuse_reply_xattr(req, res);
  3185. else
  3186. reply_err(req, res);
  3187. }
  3188. }
  3189. static void fuse_lib_removexattr(fuse_req_t req, fuse_ino_t ino,
  3190. const char *name)
  3191. {
  3192. struct fuse *f = req_fuse_prepare(req);
  3193. char *path;
  3194. int err;
  3195. err = get_path(f, ino, &path);
  3196. if (!err) {
  3197. struct fuse_intr_data d;
  3198. fuse_prepare_interrupt(f, req, &d);
  3199. err = fuse_fs_removexattr(f->fs, path, name);
  3200. fuse_finish_interrupt(f, req, &d);
  3201. free_path(f, ino, path);
  3202. }
  3203. reply_err(req, err);
  3204. }
  3205. static struct lock *locks_conflict(struct node *node, const struct lock *lock)
  3206. {
  3207. struct lock *l;
  3208. for (l = node->locks; l; l = l->next)
  3209. if (l->owner != lock->owner &&
  3210. lock->start <= l->end && l->start <= lock->end &&
  3211. (l->type == F_WRLCK || lock->type == F_WRLCK))
  3212. break;
  3213. return l;
  3214. }
  3215. static void delete_lock(struct lock **lockp)
  3216. {
  3217. struct lock *l = *lockp;
  3218. *lockp = l->next;
  3219. free(l);
  3220. }
  3221. static void insert_lock(struct lock **pos, struct lock *lock)
  3222. {
  3223. lock->next = *pos;
  3224. *pos = lock;
  3225. }
  3226. static int locks_insert(struct node *node, struct lock *lock)
  3227. {
  3228. struct lock **lp;
  3229. struct lock *newl1 = NULL;
  3230. struct lock *newl2 = NULL;
  3231. if (lock->type != F_UNLCK || lock->start != 0 ||
  3232. lock->end != OFFSET_MAX) {
  3233. newl1 = malloc(sizeof(struct lock));
  3234. newl2 = malloc(sizeof(struct lock));
  3235. if (!newl1 || !newl2) {
  3236. free(newl1);
  3237. free(newl2);
  3238. return -ENOLCK;
  3239. }
  3240. }
  3241. for (lp = &node->locks; *lp;) {
  3242. struct lock *l = *lp;
  3243. if (l->owner != lock->owner)
  3244. goto skip;
  3245. if (lock->type == l->type) {
  3246. if (l->end < lock->start - 1)
  3247. goto skip;
  3248. if (lock->end < l->start - 1)
  3249. break;
  3250. if (l->start <= lock->start && lock->end <= l->end)
  3251. goto out;
  3252. if (l->start < lock->start)
  3253. lock->start = l->start;
  3254. if (lock->end < l->end)
  3255. lock->end = l->end;
  3256. goto delete;
  3257. } else {
  3258. if (l->end < lock->start)
  3259. goto skip;
  3260. if (lock->end < l->start)
  3261. break;
  3262. if (lock->start <= l->start && l->end <= lock->end)
  3263. goto delete;
  3264. if (l->end <= lock->end) {
  3265. l->end = lock->start - 1;
  3266. goto skip;
  3267. }
  3268. if (lock->start <= l->start) {
  3269. l->start = lock->end + 1;
  3270. break;
  3271. }
  3272. *newl2 = *l;
  3273. newl2->start = lock->end + 1;
  3274. l->end = lock->start - 1;
  3275. insert_lock(&l->next, newl2);
  3276. newl2 = NULL;
  3277. }
  3278. skip:
  3279. lp = &l->next;
  3280. continue;
  3281. delete:
  3282. delete_lock(lp);
  3283. }
  3284. if (lock->type != F_UNLCK) {
  3285. *newl1 = *lock;
  3286. insert_lock(lp, newl1);
  3287. newl1 = NULL;
  3288. }
  3289. out:
  3290. free(newl1);
  3291. free(newl2);
  3292. return 0;
  3293. }
  3294. static void flock_to_lock(struct flock *flock, struct lock *lock)
  3295. {
  3296. memset(lock, 0, sizeof(struct lock));
  3297. lock->type = flock->l_type;
  3298. lock->start = flock->l_start;
  3299. lock->end =
  3300. flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
  3301. lock->pid = flock->l_pid;
  3302. }
  3303. static void lock_to_flock(struct lock *lock, struct flock *flock)
  3304. {
  3305. flock->l_type = lock->type;
  3306. flock->l_start = lock->start;
  3307. flock->l_len =
  3308. (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
  3309. flock->l_pid = lock->pid;
  3310. }
  3311. static int fuse_flush_common(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3312. const char *path, struct fuse_file_info *fi)
  3313. {
  3314. struct fuse_intr_data d;
  3315. struct flock lock;
  3316. struct lock l;
  3317. int err;
  3318. int errlock;
  3319. fuse_prepare_interrupt(f, req, &d);
  3320. memset(&lock, 0, sizeof(lock));
  3321. lock.l_type = F_UNLCK;
  3322. lock.l_whence = SEEK_SET;
  3323. err = fuse_fs_flush(f->fs, path, fi);
  3324. errlock = fuse_fs_lock(f->fs, path, fi, F_SETLK, &lock);
  3325. fuse_finish_interrupt(f, req, &d);
  3326. if (errlock != -ENOSYS) {
  3327. flock_to_lock(&lock, &l);
  3328. l.owner = fi->lock_owner;
  3329. pthread_mutex_lock(&f->lock);
  3330. locks_insert(get_node(f, ino), &l);
  3331. pthread_mutex_unlock(&f->lock);
  3332. /* if op.lock() is defined FLUSH is needed regardless
  3333. of op.flush() */
  3334. if (err == -ENOSYS)
  3335. err = 0;
  3336. }
  3337. return err;
  3338. }
  3339. static void fuse_lib_release(fuse_req_t req, fuse_ino_t ino,
  3340. struct fuse_file_info *fi)
  3341. {
  3342. struct fuse *f = req_fuse_prepare(req);
  3343. struct fuse_intr_data d;
  3344. char *path;
  3345. int err = 0;
  3346. get_path_nullok(f, ino, &path);
  3347. if (fi->flush) {
  3348. err = fuse_flush_common(f, req, ino, path, fi);
  3349. if (err == -ENOSYS)
  3350. err = 0;
  3351. }
  3352. fuse_prepare_interrupt(f, req, &d);
  3353. fuse_do_release(f, ino, path, fi);
  3354. fuse_finish_interrupt(f, req, &d);
  3355. free_path(f, ino, path);
  3356. reply_err(req, err);
  3357. }
  3358. static void fuse_lib_flush(fuse_req_t req, fuse_ino_t ino,
  3359. struct fuse_file_info *fi)
  3360. {
  3361. struct fuse *f = req_fuse_prepare(req);
  3362. char *path;
  3363. int err;
  3364. get_path_nullok(f, ino, &path);
  3365. err = fuse_flush_common(f, req, ino, path, fi);
  3366. free_path(f, ino, path);
  3367. reply_err(req, err);
  3368. }
  3369. static int fuse_lock_common(fuse_req_t req, fuse_ino_t ino,
  3370. struct fuse_file_info *fi, struct flock *lock,
  3371. int cmd)
  3372. {
  3373. struct fuse *f = req_fuse_prepare(req);
  3374. char *path;
  3375. int err;
  3376. err = get_path_nullok(f, ino, &path);
  3377. if (!err) {
  3378. struct fuse_intr_data d;
  3379. fuse_prepare_interrupt(f, req, &d);
  3380. err = fuse_fs_lock(f->fs, path, fi, cmd, lock);
  3381. fuse_finish_interrupt(f, req, &d);
  3382. free_path(f, ino, path);
  3383. }
  3384. return err;
  3385. }
  3386. static void fuse_lib_getlk(fuse_req_t req, fuse_ino_t ino,
  3387. struct fuse_file_info *fi, struct flock *lock)
  3388. {
  3389. int err;
  3390. struct lock l;
  3391. struct lock *conflict;
  3392. struct fuse *f = req_fuse(req);
  3393. flock_to_lock(lock, &l);
  3394. l.owner = fi->lock_owner;
  3395. pthread_mutex_lock(&f->lock);
  3396. conflict = locks_conflict(get_node(f, ino), &l);
  3397. if (conflict)
  3398. lock_to_flock(conflict, lock);
  3399. pthread_mutex_unlock(&f->lock);
  3400. if (!conflict)
  3401. err = fuse_lock_common(req, ino, fi, lock, F_GETLK);
  3402. else
  3403. err = 0;
  3404. if (!err)
  3405. fuse_reply_lock(req, lock);
  3406. else
  3407. reply_err(req, err);
  3408. }
  3409. static void fuse_lib_setlk(fuse_req_t req, fuse_ino_t ino,
  3410. struct fuse_file_info *fi, struct flock *lock,
  3411. int sleep)
  3412. {
  3413. int err = fuse_lock_common(req, ino, fi, lock,
  3414. sleep ? F_SETLKW : F_SETLK);
  3415. if (!err) {
  3416. struct fuse *f = req_fuse(req);
  3417. struct lock l;
  3418. flock_to_lock(lock, &l);
  3419. l.owner = fi->lock_owner;
  3420. pthread_mutex_lock(&f->lock);
  3421. locks_insert(get_node(f, ino), &l);
  3422. pthread_mutex_unlock(&f->lock);
  3423. }
  3424. reply_err(req, err);
  3425. }
  3426. static void fuse_lib_flock(fuse_req_t req, fuse_ino_t ino,
  3427. struct fuse_file_info *fi, int op)
  3428. {
  3429. struct fuse *f = req_fuse_prepare(req);
  3430. char *path;
  3431. int err;
  3432. err = get_path_nullok(f, ino, &path);
  3433. if (err == 0) {
  3434. struct fuse_intr_data d;
  3435. fuse_prepare_interrupt(f, req, &d);
  3436. err = fuse_fs_flock(f->fs, path, fi, op);
  3437. fuse_finish_interrupt(f, req, &d);
  3438. free_path(f, ino, path);
  3439. }
  3440. reply_err(req, err);
  3441. }
  3442. static void fuse_lib_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
  3443. uint64_t idx)
  3444. {
  3445. struct fuse *f = req_fuse_prepare(req);
  3446. struct fuse_intr_data d;
  3447. char *path;
  3448. int err;
  3449. err = get_path(f, ino, &path);
  3450. if (!err) {
  3451. fuse_prepare_interrupt(f, req, &d);
  3452. err = fuse_fs_bmap(f->fs, path, blocksize, &idx);
  3453. fuse_finish_interrupt(f, req, &d);
  3454. free_path(f, ino, path);
  3455. }
  3456. if (!err)
  3457. fuse_reply_bmap(req, idx);
  3458. else
  3459. reply_err(req, err);
  3460. }
  3461. static void fuse_lib_ioctl(fuse_req_t req, fuse_ino_t ino, int cmd, void *arg,
  3462. struct fuse_file_info *llfi, unsigned int flags,
  3463. const void *in_buf, size_t in_bufsz,
  3464. size_t out_bufsz)
  3465. {
  3466. struct fuse *f = req_fuse_prepare(req);
  3467. struct fuse_intr_data d;
  3468. struct fuse_file_info fi;
  3469. char *path, *out_buf = NULL;
  3470. int err;
  3471. err = -EPERM;
  3472. if (flags & FUSE_IOCTL_UNRESTRICTED)
  3473. goto err;
  3474. if (flags & FUSE_IOCTL_DIR)
  3475. get_dirhandle(llfi, &fi);
  3476. else
  3477. fi = *llfi;
  3478. if (out_bufsz) {
  3479. err = -ENOMEM;
  3480. out_buf = malloc(out_bufsz);
  3481. if (!out_buf)
  3482. goto err;
  3483. }
  3484. assert(!in_bufsz || !out_bufsz || in_bufsz == out_bufsz);
  3485. if (out_buf)
  3486. memcpy(out_buf, in_buf, in_bufsz);
  3487. err = get_path_nullok(f, ino, &path);
  3488. if (err)
  3489. goto err;
  3490. fuse_prepare_interrupt(f, req, &d);
  3491. err = fuse_fs_ioctl(f->fs, path, cmd, arg, &fi, flags,
  3492. out_buf ?: (void *)in_buf);
  3493. fuse_finish_interrupt(f, req, &d);
  3494. free_path(f, ino, path);
  3495. fuse_reply_ioctl(req, err, out_buf, out_bufsz);
  3496. goto out;
  3497. err:
  3498. reply_err(req, err);
  3499. out:
  3500. free(out_buf);
  3501. }
  3502. static void fuse_lib_poll(fuse_req_t req, fuse_ino_t ino,
  3503. struct fuse_file_info *fi, struct fuse_pollhandle *ph)
  3504. {
  3505. struct fuse *f = req_fuse_prepare(req);
  3506. struct fuse_intr_data d;
  3507. char *path;
  3508. int err;
  3509. unsigned revents = 0;
  3510. err = get_path_nullok(f, ino, &path);
  3511. if (!err) {
  3512. fuse_prepare_interrupt(f, req, &d);
  3513. err = fuse_fs_poll(f->fs, path, fi, ph, &revents);
  3514. fuse_finish_interrupt(f, req, &d);
  3515. free_path(f, ino, path);
  3516. }
  3517. if (!err)
  3518. fuse_reply_poll(req, revents);
  3519. else
  3520. reply_err(req, err);
  3521. }
  3522. static void fuse_lib_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
  3523. off_t offset, off_t length, struct fuse_file_info *fi)
  3524. {
  3525. struct fuse *f = req_fuse_prepare(req);
  3526. struct fuse_intr_data d;
  3527. char *path;
  3528. int err;
  3529. err = get_path_nullok(f, ino, &path);
  3530. if (!err) {
  3531. fuse_prepare_interrupt(f, req, &d);
  3532. err = fuse_fs_fallocate(f->fs, path, mode, offset, length, fi);
  3533. fuse_finish_interrupt(f, req, &d);
  3534. free_path(f, ino, path);
  3535. }
  3536. reply_err(req, err);
  3537. }
  3538. static int clean_delay(struct fuse *f)
  3539. {
  3540. /*
  3541. * This is calculating the delay between clean runs. To
  3542. * reduce the number of cleans we are doing them 10 times
  3543. * within the remember window.
  3544. */
  3545. int min_sleep = 60;
  3546. int max_sleep = 3600;
  3547. int sleep_time = f->conf.remember / 10;
  3548. if (sleep_time > max_sleep)
  3549. return max_sleep;
  3550. if (sleep_time < min_sleep)
  3551. return min_sleep;
  3552. return sleep_time;
  3553. }
  3554. int fuse_clean_cache(struct fuse *f)
  3555. {
  3556. struct node_lru *lnode;
  3557. struct list_head *curr, *next;
  3558. struct node *node;
  3559. struct timespec now;
  3560. pthread_mutex_lock(&f->lock);
  3561. curr_time(&now);
  3562. for (curr = f->lru_table.next; curr != &f->lru_table; curr = next) {
  3563. double age;
  3564. next = curr->next;
  3565. lnode = list_entry(curr, struct node_lru, lru);
  3566. node = &lnode->node;
  3567. age = diff_timespec(&now, &lnode->forget_time);
  3568. if (age <= f->conf.remember)
  3569. break;
  3570. assert(node->nlookup == 1);
  3571. /* Don't forget active directories */
  3572. if (node->refctr > 1)
  3573. continue;
  3574. node->nlookup = 0;
  3575. unhash_name(f, node);
  3576. unref_node(f, node);
  3577. }
  3578. pthread_mutex_unlock(&f->lock);
  3579. return clean_delay(f);
  3580. }
  3581. static struct fuse_lowlevel_ops fuse_path_ops = {
  3582. .init = fuse_lib_init,
  3583. .destroy = fuse_lib_destroy,
  3584. .lookup = fuse_lib_lookup,
  3585. .forget = fuse_lib_forget,
  3586. .forget_multi = fuse_lib_forget_multi,
  3587. .getattr = fuse_lib_getattr,
  3588. .setattr = fuse_lib_setattr,
  3589. .access = fuse_lib_access,
  3590. .readlink = fuse_lib_readlink,
  3591. .mknod = fuse_lib_mknod,
  3592. .mkdir = fuse_lib_mkdir,
  3593. .unlink = fuse_lib_unlink,
  3594. .rmdir = fuse_lib_rmdir,
  3595. .symlink = fuse_lib_symlink,
  3596. .rename = fuse_lib_rename,
  3597. .link = fuse_lib_link,
  3598. .create = fuse_lib_create,
  3599. .open = fuse_lib_open,
  3600. .read = fuse_lib_read,
  3601. .write_buf = fuse_lib_write_buf,
  3602. .flush = fuse_lib_flush,
  3603. .release = fuse_lib_release,
  3604. .fsync = fuse_lib_fsync,
  3605. .opendir = fuse_lib_opendir,
  3606. .readdir = fuse_lib_readdir,
  3607. .releasedir = fuse_lib_releasedir,
  3608. .fsyncdir = fuse_lib_fsyncdir,
  3609. .statfs = fuse_lib_statfs,
  3610. .setxattr = fuse_lib_setxattr,
  3611. .getxattr = fuse_lib_getxattr,
  3612. .listxattr = fuse_lib_listxattr,
  3613. .removexattr = fuse_lib_removexattr,
  3614. .getlk = fuse_lib_getlk,
  3615. .setlk = fuse_lib_setlk,
  3616. .flock = fuse_lib_flock,
  3617. .bmap = fuse_lib_bmap,
  3618. .ioctl = fuse_lib_ioctl,
  3619. .poll = fuse_lib_poll,
  3620. .fallocate = fuse_lib_fallocate,
  3621. };
  3622. int fuse_notify_poll(struct fuse_pollhandle *ph)
  3623. {
  3624. return fuse_lowlevel_notify_poll(ph);
  3625. }
  3626. static void free_cmd(struct fuse_cmd *cmd)
  3627. {
  3628. free(cmd->buf);
  3629. free(cmd);
  3630. }
  3631. void fuse_process_cmd(struct fuse *f, struct fuse_cmd *cmd)
  3632. {
  3633. fuse_session_process(f->se, cmd->buf, cmd->buflen, cmd->ch);
  3634. free_cmd(cmd);
  3635. }
  3636. int fuse_exited(struct fuse *f)
  3637. {
  3638. return fuse_session_exited(f->se);
  3639. }
  3640. struct fuse_session *fuse_get_session(struct fuse *f)
  3641. {
  3642. return f->se;
  3643. }
  3644. static struct fuse_cmd *fuse_alloc_cmd(size_t bufsize)
  3645. {
  3646. struct fuse_cmd *cmd = (struct fuse_cmd *) malloc(sizeof(*cmd));
  3647. if (cmd == NULL) {
  3648. fprintf(stderr, "fuse: failed to allocate cmd\n");
  3649. return NULL;
  3650. }
  3651. cmd->buf = (char *) malloc(bufsize);
  3652. if (cmd->buf == NULL) {
  3653. fprintf(stderr, "fuse: failed to allocate read buffer\n");
  3654. free(cmd);
  3655. return NULL;
  3656. }
  3657. return cmd;
  3658. }
  3659. struct fuse_cmd *fuse_read_cmd(struct fuse *f)
  3660. {
  3661. struct fuse_chan *ch = fuse_session_next_chan(f->se, NULL);
  3662. size_t bufsize = fuse_chan_bufsize(ch);
  3663. struct fuse_cmd *cmd = fuse_alloc_cmd(bufsize);
  3664. if (cmd != NULL) {
  3665. int res = fuse_chan_recv(&ch, cmd->buf, bufsize);
  3666. if (res <= 0) {
  3667. free_cmd(cmd);
  3668. if (res < 0 && res != -EINTR && res != -EAGAIN)
  3669. fuse_exit(f);
  3670. return NULL;
  3671. }
  3672. cmd->buflen = res;
  3673. cmd->ch = ch;
  3674. }
  3675. return cmd;
  3676. }
  3677. static int fuse_session_loop_remember(struct fuse *f)
  3678. {
  3679. struct fuse_session *se = f->se;
  3680. int res = 0;
  3681. struct timespec now;
  3682. time_t next_clean;
  3683. struct fuse_chan *ch = fuse_session_next_chan(se, NULL);
  3684. size_t bufsize = fuse_chan_bufsize(ch);
  3685. char *buf = (char *) malloc(bufsize);
  3686. struct pollfd fds = {
  3687. .fd = fuse_chan_fd(ch),
  3688. .events = POLLIN
  3689. };
  3690. if (!buf) {
  3691. fprintf(stderr, "fuse: failed to allocate read buffer\n");
  3692. return -1;
  3693. }
  3694. curr_time(&now);
  3695. next_clean = now.tv_sec;
  3696. while (!fuse_session_exited(se)) {
  3697. struct fuse_chan *tmpch = ch;
  3698. struct fuse_buf fbuf = {
  3699. .mem = buf,
  3700. .size = bufsize,
  3701. };
  3702. unsigned timeout;
  3703. curr_time(&now);
  3704. if (now.tv_sec < next_clean)
  3705. timeout = next_clean - now.tv_sec;
  3706. else
  3707. timeout = 0;
  3708. res = poll(&fds, 1, timeout * 1000);
  3709. if (res == -1) {
  3710. if (errno == -EINTR)
  3711. continue;
  3712. else
  3713. break;
  3714. } else if (res > 0) {
  3715. res = fuse_session_receive_buf(se, &fbuf, &tmpch);
  3716. if (res == -EINTR)
  3717. continue;
  3718. if (res <= 0)
  3719. break;
  3720. fuse_session_process_buf(se, &fbuf, tmpch);
  3721. } else {
  3722. timeout = fuse_clean_cache(f);
  3723. curr_time(&now);
  3724. next_clean = now.tv_sec + timeout;
  3725. }
  3726. }
  3727. free(buf);
  3728. fuse_session_reset(se);
  3729. return res < 0 ? -1 : 0;
  3730. }
  3731. int fuse_loop(struct fuse *f)
  3732. {
  3733. if (!f)
  3734. return -1;
  3735. if (lru_enabled(f))
  3736. return fuse_session_loop_remember(f);
  3737. return fuse_session_loop(f->se);
  3738. }
  3739. int fuse_invalidate(struct fuse *f, const char *path)
  3740. {
  3741. (void) f;
  3742. (void) path;
  3743. return -EINVAL;
  3744. }
  3745. void fuse_exit(struct fuse *f)
  3746. {
  3747. fuse_session_exit(f->se);
  3748. }
  3749. struct fuse_context *fuse_get_context(void)
  3750. {
  3751. return &fuse_get_context_internal()->ctx;
  3752. }
  3753. /*
  3754. * The size of fuse_context got extended, so need to be careful about
  3755. * incompatibility (i.e. a new binary cannot work with an old
  3756. * library).
  3757. */
  3758. struct fuse_context *fuse_get_context_compat22(void);
  3759. struct fuse_context *fuse_get_context_compat22(void)
  3760. {
  3761. return &fuse_get_context_internal()->ctx;
  3762. }
  3763. FUSE_SYMVER(".symver fuse_get_context_compat22,fuse_get_context@FUSE_2.2");
  3764. int fuse_getgroups(int size, gid_t list[])
  3765. {
  3766. fuse_req_t req = fuse_get_context_internal()->req;
  3767. return fuse_req_getgroups(req, size, list);
  3768. }
  3769. int fuse_interrupted(void)
  3770. {
  3771. return fuse_req_interrupted(fuse_get_context_internal()->req);
  3772. }
  3773. void fuse_set_getcontext_func(struct fuse_context *(*func)(void))
  3774. {
  3775. (void) func;
  3776. /* no-op */
  3777. }
  3778. enum {
  3779. KEY_HELP,
  3780. };
  3781. #define FUSE_LIB_OPT(t, p, v) { t, offsetof(struct fuse_config, p), v }
  3782. static const struct fuse_opt fuse_lib_opts[] = {
  3783. FUSE_OPT_KEY("-h", KEY_HELP),
  3784. FUSE_OPT_KEY("--help", KEY_HELP),
  3785. FUSE_OPT_KEY("debug", FUSE_OPT_KEY_KEEP),
  3786. FUSE_OPT_KEY("-d", FUSE_OPT_KEY_KEEP),
  3787. FUSE_LIB_OPT("debug", debug, 1),
  3788. FUSE_LIB_OPT("-d", debug, 1),
  3789. FUSE_LIB_OPT("hard_remove", hard_remove, 1),
  3790. FUSE_LIB_OPT("use_ino", use_ino, 1),
  3791. FUSE_LIB_OPT("readdir_ino", readdir_ino, 1),
  3792. FUSE_LIB_OPT("direct_io", direct_io, 1),
  3793. FUSE_LIB_OPT("kernel_cache", kernel_cache, 1),
  3794. FUSE_LIB_OPT("auto_cache", auto_cache, 1),
  3795. FUSE_LIB_OPT("noauto_cache", auto_cache, 0),
  3796. FUSE_LIB_OPT("umask=", set_mode, 1),
  3797. FUSE_LIB_OPT("umask=%o", umask, 0),
  3798. FUSE_LIB_OPT("uid=", set_uid, 1),
  3799. FUSE_LIB_OPT("uid=%d", uid, 0),
  3800. FUSE_LIB_OPT("gid=", set_gid, 1),
  3801. FUSE_LIB_OPT("gid=%d", gid, 0),
  3802. FUSE_LIB_OPT("entry_timeout=%lf", entry_timeout, 0),
  3803. FUSE_LIB_OPT("attr_timeout=%lf", attr_timeout, 0),
  3804. FUSE_LIB_OPT("ac_attr_timeout=%lf", ac_attr_timeout, 0),
  3805. FUSE_LIB_OPT("ac_attr_timeout=", ac_attr_timeout_set, 1),
  3806. FUSE_LIB_OPT("negative_timeout=%lf", negative_timeout, 0),
  3807. FUSE_LIB_OPT("noforget", remember, -1),
  3808. FUSE_LIB_OPT("remember=%u", remember, 0),
  3809. FUSE_LIB_OPT("nopath", nopath, 1),
  3810. FUSE_LIB_OPT("intr", intr, 1),
  3811. FUSE_LIB_OPT("intr_signal=%d", intr_signal, 0),
  3812. FUSE_LIB_OPT("modules=%s", modules, 0),
  3813. FUSE_OPT_END
  3814. };
  3815. static void fuse_lib_help(void)
  3816. {
  3817. fprintf(stderr,
  3818. " -o hard_remove immediate removal (don't hide files)\n"
  3819. " -o use_ino let filesystem set inode numbers\n"
  3820. " -o readdir_ino try to fill in d_ino in readdir\n"
  3821. " -o direct_io use direct I/O\n"
  3822. " -o kernel_cache cache files in kernel\n"
  3823. " -o [no]auto_cache enable caching based on modification times (off)\n"
  3824. " -o umask=M set file permissions (octal)\n"
  3825. " -o uid=N set file owner\n"
  3826. " -o gid=N set file group\n"
  3827. " -o entry_timeout=T cache timeout for names (1.0s)\n"
  3828. " -o negative_timeout=T cache timeout for deleted names (0.0s)\n"
  3829. " -o attr_timeout=T cache timeout for attributes (1.0s)\n"
  3830. " -o ac_attr_timeout=T auto cache timeout for attributes (attr_timeout)\n"
  3831. " -o noforget never forget cached inodes\n"
  3832. " -o remember=T remember cached inodes for T seconds (0s)\n"
  3833. " -o nopath don't supply path if not necessary\n"
  3834. " -o intr allow requests to be interrupted\n"
  3835. " -o intr_signal=NUM signal to send on interrupt (%i)\n"
  3836. " -o modules=M1[:M2...] names of modules to push onto filesystem stack\n"
  3837. "\n", FUSE_DEFAULT_INTR_SIGNAL);
  3838. }
  3839. static void fuse_lib_help_modules(void)
  3840. {
  3841. struct fuse_module *m;
  3842. fprintf(stderr, "\nModule options:\n");
  3843. pthread_mutex_lock(&fuse_context_lock);
  3844. for (m = fuse_modules; m; m = m->next) {
  3845. struct fuse_fs *fs = NULL;
  3846. struct fuse_fs *newfs;
  3847. struct fuse_args args = FUSE_ARGS_INIT(0, NULL);
  3848. if (fuse_opt_add_arg(&args, "") != -1 &&
  3849. fuse_opt_add_arg(&args, "-h") != -1) {
  3850. fprintf(stderr, "\n[%s]\n", m->name);
  3851. newfs = m->factory(&args, &fs);
  3852. assert(newfs == NULL);
  3853. }
  3854. fuse_opt_free_args(&args);
  3855. }
  3856. pthread_mutex_unlock(&fuse_context_lock);
  3857. }
  3858. static int fuse_lib_opt_proc(void *data, const char *arg, int key,
  3859. struct fuse_args *outargs)
  3860. {
  3861. (void) arg; (void) outargs;
  3862. if (key == KEY_HELP) {
  3863. struct fuse_config *conf = (struct fuse_config *) data;
  3864. fuse_lib_help();
  3865. conf->help = 1;
  3866. }
  3867. return 1;
  3868. }
  3869. int fuse_is_lib_option(const char *opt)
  3870. {
  3871. return fuse_lowlevel_is_lib_option(opt) ||
  3872. fuse_opt_match(fuse_lib_opts, opt);
  3873. }
  3874. static int fuse_init_intr_signal(int signum, int *installed)
  3875. {
  3876. struct sigaction old_sa;
  3877. if (sigaction(signum, NULL, &old_sa) == -1) {
  3878. perror("fuse: cannot get old signal handler");
  3879. return -1;
  3880. }
  3881. if (old_sa.sa_handler == SIG_DFL) {
  3882. struct sigaction sa;
  3883. memset(&sa, 0, sizeof(struct sigaction));
  3884. sa.sa_handler = fuse_intr_sighandler;
  3885. sigemptyset(&sa.sa_mask);
  3886. if (sigaction(signum, &sa, NULL) == -1) {
  3887. perror("fuse: cannot set interrupt signal handler");
  3888. return -1;
  3889. }
  3890. *installed = 1;
  3891. }
  3892. return 0;
  3893. }
  3894. static void fuse_restore_intr_signal(int signum)
  3895. {
  3896. struct sigaction sa;
  3897. memset(&sa, 0, sizeof(struct sigaction));
  3898. sa.sa_handler = SIG_DFL;
  3899. sigaction(signum, &sa, NULL);
  3900. }
  3901. static int fuse_push_module(struct fuse *f, const char *module,
  3902. struct fuse_args *args)
  3903. {
  3904. struct fuse_fs *fs[2] = { f->fs, NULL };
  3905. struct fuse_fs *newfs;
  3906. struct fuse_module *m = fuse_get_module(module);
  3907. if (!m)
  3908. return -1;
  3909. newfs = m->factory(args, fs);
  3910. if (!newfs) {
  3911. fuse_put_module(m);
  3912. return -1;
  3913. }
  3914. newfs->m = m;
  3915. f->fs = newfs;
  3916. f->nullpath_ok = newfs->op.flag_nullpath_ok && f->nullpath_ok;
  3917. f->conf.nopath = newfs->op.flag_nopath && f->conf.nopath;
  3918. f->utime_omit_ok = newfs->op.flag_utime_omit_ok && f->utime_omit_ok;
  3919. return 0;
  3920. }
  3921. struct fuse_fs *fuse_fs_new(const struct fuse_operations *op, size_t op_size,
  3922. void *user_data)
  3923. {
  3924. struct fuse_fs *fs;
  3925. if (sizeof(struct fuse_operations) < op_size) {
  3926. fprintf(stderr, "fuse: warning: library too old, some operations may not not work\n");
  3927. op_size = sizeof(struct fuse_operations);
  3928. }
  3929. fs = (struct fuse_fs *) calloc(1, sizeof(struct fuse_fs));
  3930. if (!fs) {
  3931. fprintf(stderr, "fuse: failed to allocate fuse_fs object\n");
  3932. return NULL;
  3933. }
  3934. fs->user_data = user_data;
  3935. if (op)
  3936. memcpy(&fs->op, op, op_size);
  3937. return fs;
  3938. }
  3939. static int node_table_init(struct node_table *t)
  3940. {
  3941. t->size = NODE_TABLE_MIN_SIZE;
  3942. t->array = (struct node **) calloc(1, sizeof(struct node *) * t->size);
  3943. if (t->array == NULL) {
  3944. fprintf(stderr, "fuse: memory allocation failed\n");
  3945. return -1;
  3946. }
  3947. t->use = 0;
  3948. t->split = 0;
  3949. return 0;
  3950. }
  3951. static void *fuse_prune_nodes(void *fuse)
  3952. {
  3953. struct fuse *f = fuse;
  3954. int sleep_time;
  3955. while(1) {
  3956. sleep_time = fuse_clean_cache(f);
  3957. sleep(sleep_time);
  3958. }
  3959. return NULL;
  3960. }
  3961. int fuse_start_cleanup_thread(struct fuse *f)
  3962. {
  3963. if (lru_enabled(f))
  3964. return fuse_start_thread(&f->prune_thread, fuse_prune_nodes, f);
  3965. return 0;
  3966. }
  3967. void fuse_stop_cleanup_thread(struct fuse *f)
  3968. {
  3969. if (lru_enabled(f)) {
  3970. pthread_mutex_lock(&f->lock);
  3971. pthread_cancel(f->prune_thread);
  3972. pthread_mutex_unlock(&f->lock);
  3973. pthread_join(f->prune_thread, NULL);
  3974. }
  3975. }
  3976. struct fuse *fuse_new_common(struct fuse_chan *ch, struct fuse_args *args,
  3977. const struct fuse_operations *op,
  3978. size_t op_size, void *user_data, int compat)
  3979. {
  3980. struct fuse *f;
  3981. struct node *root;
  3982. struct fuse_fs *fs;
  3983. struct fuse_lowlevel_ops llop = fuse_path_ops;
  3984. if (fuse_create_context_key() == -1)
  3985. goto out;
  3986. f = (struct fuse *) calloc(1, sizeof(struct fuse));
  3987. if (f == NULL) {
  3988. fprintf(stderr, "fuse: failed to allocate fuse object\n");
  3989. goto out_delete_context_key;
  3990. }
  3991. fs = fuse_fs_new(op, op_size, user_data);
  3992. if (!fs)
  3993. goto out_free;
  3994. fs->compat = compat;
  3995. f->fs = fs;
  3996. f->nullpath_ok = fs->op.flag_nullpath_ok;
  3997. f->conf.nopath = fs->op.flag_nopath;
  3998. f->utime_omit_ok = fs->op.flag_utime_omit_ok;
  3999. /* Oh f**k, this is ugly! */
  4000. if (!fs->op.lock) {
  4001. llop.getlk = NULL;
  4002. llop.setlk = NULL;
  4003. }
  4004. f->conf.entry_timeout = 1.0;
  4005. f->conf.attr_timeout = 1.0;
  4006. f->conf.negative_timeout = 0.0;
  4007. f->conf.intr_signal = FUSE_DEFAULT_INTR_SIGNAL;
  4008. f->pagesize = getpagesize();
  4009. init_list_head(&f->partial_slabs);
  4010. init_list_head(&f->full_slabs);
  4011. init_list_head(&f->lru_table);
  4012. if (fuse_opt_parse(args, &f->conf, fuse_lib_opts,
  4013. fuse_lib_opt_proc) == -1)
  4014. goto out_free_fs;
  4015. if (f->conf.modules) {
  4016. char *module;
  4017. char *next;
  4018. for (module = f->conf.modules; module; module = next) {
  4019. char *p;
  4020. for (p = module; *p && *p != ':'; p++);
  4021. next = *p ? p + 1 : NULL;
  4022. *p = '\0';
  4023. if (module[0] &&
  4024. fuse_push_module(f, module, args) == -1)
  4025. goto out_free_fs;
  4026. }
  4027. }
  4028. if (!f->conf.ac_attr_timeout_set)
  4029. f->conf.ac_attr_timeout = f->conf.attr_timeout;
  4030. #if defined(__FreeBSD__) || defined(__NetBSD__)
  4031. /*
  4032. * In FreeBSD, we always use these settings as inode numbers
  4033. * are needed to make getcwd(3) work.
  4034. */
  4035. f->conf.readdir_ino = 1;
  4036. #endif
  4037. if (compat && compat <= 25) {
  4038. if (fuse_sync_compat_args(args) == -1)
  4039. goto out_free_fs;
  4040. }
  4041. f->se = fuse_lowlevel_new_common(args, &llop, sizeof(llop), f);
  4042. if (f->se == NULL) {
  4043. if (f->conf.help)
  4044. fuse_lib_help_modules();
  4045. goto out_free_fs;
  4046. }
  4047. fuse_session_add_chan(f->se, ch);
  4048. if (f->conf.debug) {
  4049. fprintf(stderr, "nullpath_ok: %i\n", f->nullpath_ok);
  4050. fprintf(stderr, "nopath: %i\n", f->conf.nopath);
  4051. fprintf(stderr, "utime_omit_ok: %i\n", f->utime_omit_ok);
  4052. }
  4053. /* Trace topmost layer by default */
  4054. f->fs->debug = f->conf.debug;
  4055. f->ctr = 0;
  4056. f->generation = 0;
  4057. if (node_table_init(&f->name_table) == -1)
  4058. goto out_free_session;
  4059. if (node_table_init(&f->id_table) == -1)
  4060. goto out_free_name_table;
  4061. fuse_mutex_init(&f->lock);
  4062. root = alloc_node(f);
  4063. if (root == NULL) {
  4064. fprintf(stderr, "fuse: memory allocation failed\n");
  4065. goto out_free_id_table;
  4066. }
  4067. if (lru_enabled(f)) {
  4068. struct node_lru *lnode = node_lru(root);
  4069. init_list_head(&lnode->lru);
  4070. }
  4071. strcpy(root->inline_name, "/");
  4072. root->name = root->inline_name;
  4073. if (f->conf.intr &&
  4074. fuse_init_intr_signal(f->conf.intr_signal,
  4075. &f->intr_installed) == -1)
  4076. goto out_free_root;
  4077. root->parent = NULL;
  4078. root->nodeid = FUSE_ROOT_ID;
  4079. inc_nlookup(root);
  4080. hash_id(f, root);
  4081. return f;
  4082. out_free_root:
  4083. free(root);
  4084. out_free_id_table:
  4085. free(f->id_table.array);
  4086. out_free_name_table:
  4087. free(f->name_table.array);
  4088. out_free_session:
  4089. fuse_session_destroy(f->se);
  4090. out_free_fs:
  4091. /* Horrible compatibility hack to stop the destructor from being
  4092. called on the filesystem without init being called first */
  4093. fs->op.destroy = NULL;
  4094. fuse_fs_destroy(f->fs);
  4095. free(f->conf.modules);
  4096. out_free:
  4097. free(f);
  4098. out_delete_context_key:
  4099. fuse_delete_context_key();
  4100. out:
  4101. return NULL;
  4102. }
  4103. struct fuse *fuse_new(struct fuse_chan *ch, struct fuse_args *args,
  4104. const struct fuse_operations *op, size_t op_size,
  4105. void *user_data)
  4106. {
  4107. return fuse_new_common(ch, args, op, op_size, user_data, 0);
  4108. }
  4109. void fuse_destroy(struct fuse *f)
  4110. {
  4111. size_t i;
  4112. if (f->conf.intr && f->intr_installed)
  4113. fuse_restore_intr_signal(f->conf.intr_signal);
  4114. if (f->fs) {
  4115. struct fuse_context_i *c = fuse_get_context_internal();
  4116. memset(c, 0, sizeof(*c));
  4117. c->ctx.fuse = f;
  4118. for (i = 0; i < f->id_table.size; i++) {
  4119. struct node *node;
  4120. for (node = f->id_table.array[i]; node != NULL;
  4121. node = node->id_next) {
  4122. if (node->is_hidden) {
  4123. char *path;
  4124. if (try_get_path(f, node->nodeid, NULL, &path, NULL, false) == 0) {
  4125. fuse_fs_unlink(f->fs, path);
  4126. free(path);
  4127. }
  4128. }
  4129. }
  4130. }
  4131. }
  4132. for (i = 0; i < f->id_table.size; i++) {
  4133. struct node *node;
  4134. struct node *next;
  4135. for (node = f->id_table.array[i]; node != NULL; node = next) {
  4136. next = node->id_next;
  4137. free_node(f, node);
  4138. f->id_table.use--;
  4139. }
  4140. }
  4141. assert(list_empty(&f->partial_slabs));
  4142. assert(list_empty(&f->full_slabs));
  4143. free(f->id_table.array);
  4144. free(f->name_table.array);
  4145. pthread_mutex_destroy(&f->lock);
  4146. fuse_session_destroy(f->se);
  4147. free(f->conf.modules);
  4148. free(f);
  4149. fuse_delete_context_key();
  4150. }
  4151. static struct fuse *fuse_new_common_compat25(int fd, struct fuse_args *args,
  4152. const struct fuse_operations *op,
  4153. size_t op_size, int compat)
  4154. {
  4155. struct fuse *f = NULL;
  4156. struct fuse_chan *ch = fuse_kern_chan_new(fd);
  4157. if (ch)
  4158. f = fuse_new_common(ch, args, op, op_size, NULL, compat);
  4159. return f;
  4160. }
  4161. /* called with fuse_context_lock held or during initialization (before
  4162. main() has been called) */
  4163. void fuse_register_module(struct fuse_module *mod)
  4164. {
  4165. mod->ctr = 0;
  4166. mod->so = fuse_current_so;
  4167. if (mod->so)
  4168. mod->so->ctr++;
  4169. mod->next = fuse_modules;
  4170. fuse_modules = mod;
  4171. }
  4172. #if !defined(__FreeBSD__) && !defined(__NetBSD__)
  4173. static struct fuse *fuse_new_common_compat(int fd, const char *opts,
  4174. const struct fuse_operations *op,
  4175. size_t op_size, int compat)
  4176. {
  4177. struct fuse *f;
  4178. struct fuse_args args = FUSE_ARGS_INIT(0, NULL);
  4179. if (fuse_opt_add_arg(&args, "") == -1)
  4180. return NULL;
  4181. if (opts &&
  4182. (fuse_opt_add_arg(&args, "-o") == -1 ||
  4183. fuse_opt_add_arg(&args, opts) == -1)) {
  4184. fuse_opt_free_args(&args);
  4185. return NULL;
  4186. }
  4187. f = fuse_new_common_compat25(fd, &args, op, op_size, compat);
  4188. fuse_opt_free_args(&args);
  4189. return f;
  4190. }
  4191. struct fuse *fuse_new_compat22(int fd, const char *opts,
  4192. const struct fuse_operations_compat22 *op,
  4193. size_t op_size)
  4194. {
  4195. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4196. op_size, 22);
  4197. }
  4198. struct fuse *fuse_new_compat2(int fd, const char *opts,
  4199. const struct fuse_operations_compat2 *op)
  4200. {
  4201. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4202. sizeof(struct fuse_operations_compat2),
  4203. 21);
  4204. }
  4205. struct fuse *fuse_new_compat1(int fd, int flags,
  4206. const struct fuse_operations_compat1 *op)
  4207. {
  4208. const char *opts = NULL;
  4209. if (flags & FUSE_DEBUG_COMPAT1)
  4210. opts = "debug";
  4211. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4212. sizeof(struct fuse_operations_compat1),
  4213. 11);
  4214. }
  4215. FUSE_SYMVER(".symver fuse_exited,__fuse_exited@");
  4216. FUSE_SYMVER(".symver fuse_process_cmd,__fuse_process_cmd@");
  4217. FUSE_SYMVER(".symver fuse_read_cmd,__fuse_read_cmd@");
  4218. FUSE_SYMVER(".symver fuse_set_getcontext_func,__fuse_set_getcontext_func@");
  4219. FUSE_SYMVER(".symver fuse_new_compat2,fuse_new@");
  4220. FUSE_SYMVER(".symver fuse_new_compat22,fuse_new@FUSE_2.2");
  4221. #endif /* __FreeBSD__ || __NetBSD__ */
  4222. struct fuse *fuse_new_compat25(int fd, struct fuse_args *args,
  4223. const struct fuse_operations_compat25 *op,
  4224. size_t op_size)
  4225. {
  4226. return fuse_new_common_compat25(fd, args, (struct fuse_operations *) op,
  4227. op_size, 25);
  4228. }
  4229. FUSE_SYMVER(".symver fuse_new_compat25,fuse_new@FUSE_2.5");