You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4909 lines
110 KiB

  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. /* For pthread_rwlock_t */
  8. #define _GNU_SOURCE
  9. #include "config.h"
  10. #include "fuse_i.h"
  11. #include "fuse_lowlevel.h"
  12. #include "fuse_opt.h"
  13. #include "fuse_misc.h"
  14. #include "fuse_common_compat.h"
  15. #include "fuse_compat.h"
  16. #include "fuse_kernel.h"
  17. #include <stdio.h>
  18. #include <string.h>
  19. #include <stdlib.h>
  20. #include <stddef.h>
  21. #include <stdbool.h>
  22. #include <unistd.h>
  23. #include <time.h>
  24. #include <fcntl.h>
  25. #include <limits.h>
  26. #include <errno.h>
  27. #include <signal.h>
  28. #include <dlfcn.h>
  29. #include <assert.h>
  30. #include <poll.h>
  31. #include <sys/param.h>
  32. #include <sys/uio.h>
  33. #include <sys/time.h>
  34. #include <sys/mman.h>
  35. #include <sys/file.h>
  36. #define FUSE_NODE_SLAB 1
  37. #ifndef MAP_ANONYMOUS
  38. #undef FUSE_NODE_SLAB
  39. #endif
  40. #define FUSE_DEFAULT_INTR_SIGNAL SIGUSR1
  41. #define FUSE_UNKNOWN_INO 0xffffffff
  42. #define OFFSET_MAX 0x7fffffffffffffffLL
  43. #define NODE_TABLE_MIN_SIZE 8192
  44. struct fuse_config {
  45. unsigned int uid;
  46. unsigned int gid;
  47. unsigned int umask;
  48. double entry_timeout;
  49. double negative_timeout;
  50. double attr_timeout;
  51. double ac_attr_timeout;
  52. int ac_attr_timeout_set;
  53. int remember;
  54. int nopath;
  55. int debug;
  56. int hard_remove;
  57. int use_ino;
  58. int readdir_ino;
  59. int set_mode;
  60. int set_uid;
  61. int set_gid;
  62. int direct_io;
  63. int kernel_cache;
  64. int auto_cache;
  65. int intr;
  66. int intr_signal;
  67. int help;
  68. char *modules;
  69. int threads;
  70. };
  71. struct fuse_fs {
  72. struct fuse_operations op;
  73. struct fuse_module *m;
  74. void *user_data;
  75. int compat;
  76. int debug;
  77. };
  78. struct fusemod_so {
  79. void *handle;
  80. int ctr;
  81. };
  82. struct lock_queue_element {
  83. struct lock_queue_element *next;
  84. pthread_cond_t cond;
  85. fuse_ino_t nodeid1;
  86. const char *name1;
  87. char **path1;
  88. struct node **wnode1;
  89. fuse_ino_t nodeid2;
  90. const char *name2;
  91. char **path2;
  92. struct node **wnode2;
  93. int err;
  94. bool first_locked : 1;
  95. bool second_locked : 1;
  96. bool done : 1;
  97. };
  98. struct node_table {
  99. struct node **array;
  100. size_t use;
  101. size_t size;
  102. size_t split;
  103. };
  104. #define container_of(ptr, type, member) ({ \
  105. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  106. (type *)( (char *)__mptr - offsetof(type,member) );})
  107. #define list_entry(ptr, type, member) \
  108. container_of(ptr, type, member)
  109. struct list_head {
  110. struct list_head *next;
  111. struct list_head *prev;
  112. };
  113. struct node_slab {
  114. struct list_head list; /* must be the first member */
  115. struct list_head freelist;
  116. int used;
  117. };
  118. struct fuse {
  119. struct fuse_session *se;
  120. struct node_table name_table;
  121. struct node_table id_table;
  122. struct list_head lru_table;
  123. fuse_ino_t ctr;
  124. unsigned int generation;
  125. unsigned int hidectr;
  126. pthread_mutex_t lock;
  127. struct fuse_config conf;
  128. int intr_installed;
  129. struct fuse_fs *fs;
  130. int nullpath_ok;
  131. int utime_omit_ok;
  132. struct lock_queue_element *lockq;
  133. int pagesize;
  134. struct list_head partial_slabs;
  135. struct list_head full_slabs;
  136. pthread_t prune_thread;
  137. };
  138. struct lock {
  139. int type;
  140. off_t start;
  141. off_t end;
  142. pid_t pid;
  143. uint64_t owner;
  144. struct lock *next;
  145. };
  146. struct node {
  147. struct node *name_next;
  148. struct node *id_next;
  149. fuse_ino_t nodeid;
  150. unsigned int generation;
  151. int refctr;
  152. struct node *parent;
  153. char *name;
  154. uint64_t nlookup;
  155. int open_count;
  156. struct timespec stat_updated;
  157. struct timespec mtime;
  158. off_t size;
  159. struct lock *locks;
  160. unsigned int is_hidden : 1;
  161. unsigned int cache_valid : 1;
  162. int treelock;
  163. char inline_name[32];
  164. };
  165. #define TREELOCK_WRITE -1
  166. #define TREELOCK_WAIT_OFFSET INT_MIN
  167. struct node_lru {
  168. struct node node;
  169. struct list_head lru;
  170. struct timespec forget_time;
  171. };
  172. struct fuse_dh {
  173. pthread_mutex_t lock;
  174. struct fuse *fuse;
  175. fuse_req_t req;
  176. char *contents;
  177. int allocated;
  178. unsigned len;
  179. unsigned size;
  180. unsigned needlen;
  181. int filled;
  182. uint64_t fh;
  183. int error;
  184. fuse_ino_t nodeid;
  185. };
  186. /* old dir handle */
  187. struct fuse_dirhandle {
  188. fuse_fill_dir_t filler;
  189. void *buf;
  190. };
  191. struct fuse_context_i {
  192. struct fuse_context ctx;
  193. fuse_req_t req;
  194. };
  195. static pthread_key_t fuse_context_key;
  196. static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
  197. static int fuse_context_ref;
  198. static struct fusemod_so *fuse_current_so;
  199. static struct fuse_module *fuse_modules;
  200. static int fuse_load_so_name(const char *soname)
  201. {
  202. struct fusemod_so *so;
  203. so = calloc(1, sizeof(struct fusemod_so));
  204. if (!so) {
  205. fprintf(stderr, "fuse: memory allocation failed\n");
  206. return -1;
  207. }
  208. fuse_current_so = so;
  209. so->handle = dlopen(soname, RTLD_NOW);
  210. fuse_current_so = NULL;
  211. if (!so->handle) {
  212. fprintf(stderr, "fuse: %s\n", dlerror());
  213. goto err;
  214. }
  215. if (!so->ctr) {
  216. fprintf(stderr, "fuse: %s did not register any modules\n",
  217. soname);
  218. goto err;
  219. }
  220. return 0;
  221. err:
  222. if (so->handle)
  223. dlclose(so->handle);
  224. free(so);
  225. return -1;
  226. }
  227. static int fuse_load_so_module(const char *module)
  228. {
  229. int res;
  230. char *soname = malloc(strlen(module) + 64);
  231. if (!soname) {
  232. fprintf(stderr, "fuse: memory allocation failed\n");
  233. return -1;
  234. }
  235. sprintf(soname, "libfusemod_%s.so", module);
  236. res = fuse_load_so_name(soname);
  237. free(soname);
  238. return res;
  239. }
  240. static struct fuse_module *fuse_find_module(const char *module)
  241. {
  242. struct fuse_module *m;
  243. for (m = fuse_modules; m; m = m->next) {
  244. if (strcmp(module, m->name) == 0) {
  245. m->ctr++;
  246. break;
  247. }
  248. }
  249. return m;
  250. }
  251. static struct fuse_module *fuse_get_module(const char *module)
  252. {
  253. struct fuse_module *m;
  254. pthread_mutex_lock(&fuse_context_lock);
  255. m = fuse_find_module(module);
  256. if (!m) {
  257. int err = fuse_load_so_module(module);
  258. if (!err)
  259. m = fuse_find_module(module);
  260. }
  261. pthread_mutex_unlock(&fuse_context_lock);
  262. return m;
  263. }
  264. static void fuse_put_module(struct fuse_module *m)
  265. {
  266. pthread_mutex_lock(&fuse_context_lock);
  267. assert(m->ctr > 0);
  268. m->ctr--;
  269. if (!m->ctr && m->so) {
  270. struct fusemod_so *so = m->so;
  271. assert(so->ctr > 0);
  272. so->ctr--;
  273. if (!so->ctr) {
  274. struct fuse_module **mp;
  275. for (mp = &fuse_modules; *mp;) {
  276. if ((*mp)->so == so)
  277. *mp = (*mp)->next;
  278. else
  279. mp = &(*mp)->next;
  280. }
  281. dlclose(so->handle);
  282. free(so);
  283. }
  284. }
  285. pthread_mutex_unlock(&fuse_context_lock);
  286. }
  287. static void init_list_head(struct list_head *list)
  288. {
  289. list->next = list;
  290. list->prev = list;
  291. }
  292. static int list_empty(const struct list_head *head)
  293. {
  294. return head->next == head;
  295. }
  296. static void list_add(struct list_head *new, struct list_head *prev,
  297. struct list_head *next)
  298. {
  299. next->prev = new;
  300. new->next = next;
  301. new->prev = prev;
  302. prev->next = new;
  303. }
  304. static inline void list_add_head(struct list_head *new, struct list_head *head)
  305. {
  306. list_add(new, head, head->next);
  307. }
  308. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  309. {
  310. list_add(new, head->prev, head);
  311. }
  312. static inline void list_del(struct list_head *entry)
  313. {
  314. struct list_head *prev = entry->prev;
  315. struct list_head *next = entry->next;
  316. next->prev = prev;
  317. prev->next = next;
  318. }
  319. static inline int lru_enabled(struct fuse *f)
  320. {
  321. return f->conf.remember > 0;
  322. }
  323. static struct node_lru *node_lru(struct node *node)
  324. {
  325. return (struct node_lru *) node;
  326. }
  327. static size_t get_node_size(struct fuse *f)
  328. {
  329. if (lru_enabled(f))
  330. return sizeof(struct node_lru);
  331. else
  332. return sizeof(struct node);
  333. }
  334. #ifdef FUSE_NODE_SLAB
  335. static struct node_slab *list_to_slab(struct list_head *head)
  336. {
  337. return (struct node_slab *) head;
  338. }
  339. static struct node_slab *node_to_slab(struct fuse *f, struct node *node)
  340. {
  341. return (struct node_slab *) (((uintptr_t) node) & ~((uintptr_t) f->pagesize - 1));
  342. }
  343. static int alloc_slab(struct fuse *f)
  344. {
  345. void *mem;
  346. struct node_slab *slab;
  347. char *start;
  348. size_t num;
  349. size_t i;
  350. size_t node_size = get_node_size(f);
  351. mem = mmap(NULL, f->pagesize, PROT_READ | PROT_WRITE,
  352. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  353. if (mem == MAP_FAILED)
  354. return -1;
  355. slab = mem;
  356. init_list_head(&slab->freelist);
  357. slab->used = 0;
  358. num = (f->pagesize - sizeof(struct node_slab)) / node_size;
  359. start = (char *) mem + f->pagesize - num * node_size;
  360. for (i = 0; i < num; i++) {
  361. struct list_head *n;
  362. n = (struct list_head *) (start + i * node_size);
  363. list_add_tail(n, &slab->freelist);
  364. }
  365. list_add_tail(&slab->list, &f->partial_slabs);
  366. return 0;
  367. }
  368. static struct node *alloc_node(struct fuse *f)
  369. {
  370. struct node_slab *slab;
  371. struct list_head *node;
  372. if (list_empty(&f->partial_slabs)) {
  373. int res = alloc_slab(f);
  374. if (res != 0)
  375. return NULL;
  376. }
  377. slab = list_to_slab(f->partial_slabs.next);
  378. slab->used++;
  379. node = slab->freelist.next;
  380. list_del(node);
  381. if (list_empty(&slab->freelist)) {
  382. list_del(&slab->list);
  383. list_add_tail(&slab->list, &f->full_slabs);
  384. }
  385. memset(node, 0, sizeof(struct node));
  386. return (struct node *) node;
  387. }
  388. static void free_slab(struct fuse *f, struct node_slab *slab)
  389. {
  390. int res;
  391. list_del(&slab->list);
  392. res = munmap(slab, f->pagesize);
  393. if (res == -1)
  394. fprintf(stderr, "fuse warning: munmap(%p) failed\n", slab);
  395. }
  396. static void free_node_mem(struct fuse *f, struct node *node)
  397. {
  398. struct node_slab *slab = node_to_slab(f, node);
  399. struct list_head *n = (struct list_head *) node;
  400. slab->used--;
  401. if (slab->used) {
  402. if (list_empty(&slab->freelist)) {
  403. list_del(&slab->list);
  404. list_add_tail(&slab->list, &f->partial_slabs);
  405. }
  406. list_add_head(n, &slab->freelist);
  407. } else {
  408. free_slab(f, slab);
  409. }
  410. }
  411. #else
  412. static struct node *alloc_node(struct fuse *f)
  413. {
  414. return (struct node *) calloc(1, get_node_size(f));
  415. }
  416. static void free_node_mem(struct fuse *f, struct node *node)
  417. {
  418. (void) f;
  419. free(node);
  420. }
  421. #endif
  422. static size_t id_hash(struct fuse *f, fuse_ino_t ino)
  423. {
  424. uint64_t hash = ((uint32_t) ino * 2654435761U) % f->id_table.size;
  425. uint64_t oldhash = hash % (f->id_table.size / 2);
  426. if (oldhash >= f->id_table.split)
  427. return oldhash;
  428. else
  429. return hash;
  430. }
  431. static struct node *get_node_nocheck(struct fuse *f, fuse_ino_t nodeid)
  432. {
  433. size_t hash = id_hash(f, nodeid);
  434. struct node *node;
  435. for (node = f->id_table.array[hash]; node != NULL; node = node->id_next)
  436. if (node->nodeid == nodeid)
  437. return node;
  438. return NULL;
  439. }
  440. static struct node *get_node(struct fuse *f, fuse_ino_t nodeid)
  441. {
  442. struct node *node = get_node_nocheck(f, nodeid);
  443. if (!node) {
  444. fprintf(stderr, "fuse internal error: node %llu not found\n",
  445. (unsigned long long) nodeid);
  446. abort();
  447. }
  448. return node;
  449. }
  450. static void curr_time(struct timespec *now);
  451. static double diff_timespec(const struct timespec *t1,
  452. const struct timespec *t2);
  453. static void remove_node_lru(struct node *node)
  454. {
  455. struct node_lru *lnode = node_lru(node);
  456. list_del(&lnode->lru);
  457. init_list_head(&lnode->lru);
  458. }
  459. static void set_forget_time(struct fuse *f, struct node *node)
  460. {
  461. struct node_lru *lnode = node_lru(node);
  462. list_del(&lnode->lru);
  463. list_add_tail(&lnode->lru, &f->lru_table);
  464. curr_time(&lnode->forget_time);
  465. }
  466. static void free_node(struct fuse *f, struct node *node)
  467. {
  468. if (node->name != node->inline_name)
  469. free(node->name);
  470. free_node_mem(f, node);
  471. }
  472. static void node_table_reduce(struct node_table *t)
  473. {
  474. size_t newsize = t->size / 2;
  475. void *newarray;
  476. if (newsize < NODE_TABLE_MIN_SIZE)
  477. return;
  478. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  479. if (newarray != NULL)
  480. t->array = newarray;
  481. t->size = newsize;
  482. t->split = t->size / 2;
  483. }
  484. static void remerge_id(struct fuse *f)
  485. {
  486. struct node_table *t = &f->id_table;
  487. int iter;
  488. if (t->split == 0)
  489. node_table_reduce(t);
  490. for (iter = 8; t->split > 0 && iter; iter--) {
  491. struct node **upper;
  492. t->split--;
  493. upper = &t->array[t->split + t->size / 2];
  494. if (*upper) {
  495. struct node **nodep;
  496. for (nodep = &t->array[t->split]; *nodep;
  497. nodep = &(*nodep)->id_next);
  498. *nodep = *upper;
  499. *upper = NULL;
  500. break;
  501. }
  502. }
  503. }
  504. static void unhash_id(struct fuse *f, struct node *node)
  505. {
  506. struct node **nodep = &f->id_table.array[id_hash(f, node->nodeid)];
  507. for (; *nodep != NULL; nodep = &(*nodep)->id_next)
  508. if (*nodep == node) {
  509. *nodep = node->id_next;
  510. f->id_table.use--;
  511. if(f->id_table.use < f->id_table.size / 4)
  512. remerge_id(f);
  513. return;
  514. }
  515. }
  516. static int node_table_resize(struct node_table *t)
  517. {
  518. size_t newsize = t->size * 2;
  519. void *newarray;
  520. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  521. if (newarray == NULL)
  522. return -1;
  523. t->array = newarray;
  524. memset(t->array + t->size, 0, t->size * sizeof(struct node *));
  525. t->size = newsize;
  526. t->split = 0;
  527. return 0;
  528. }
  529. static void rehash_id(struct fuse *f)
  530. {
  531. struct node_table *t = &f->id_table;
  532. struct node **nodep;
  533. struct node **next;
  534. size_t hash;
  535. if (t->split == t->size / 2)
  536. return;
  537. hash = t->split;
  538. t->split++;
  539. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  540. struct node *node = *nodep;
  541. size_t newhash = id_hash(f, node->nodeid);
  542. if (newhash != hash) {
  543. next = nodep;
  544. *nodep = node->id_next;
  545. node->id_next = t->array[newhash];
  546. t->array[newhash] = node;
  547. } else {
  548. next = &node->id_next;
  549. }
  550. }
  551. if (t->split == t->size / 2)
  552. node_table_resize(t);
  553. }
  554. static void hash_id(struct fuse *f, struct node *node)
  555. {
  556. size_t hash = id_hash(f, node->nodeid);
  557. node->id_next = f->id_table.array[hash];
  558. f->id_table.array[hash] = node;
  559. f->id_table.use++;
  560. if (f->id_table.use >= f->id_table.size / 2)
  561. rehash_id(f);
  562. }
  563. static size_t name_hash(struct fuse *f, fuse_ino_t parent,
  564. const char *name)
  565. {
  566. uint64_t hash = parent;
  567. uint64_t oldhash;
  568. for (; *name; name++)
  569. hash = hash * 31 + (unsigned char) *name;
  570. hash %= f->name_table.size;
  571. oldhash = hash % (f->name_table.size / 2);
  572. if (oldhash >= f->name_table.split)
  573. return oldhash;
  574. else
  575. return hash;
  576. }
  577. static void unref_node(struct fuse *f, struct node *node);
  578. static void remerge_name(struct fuse *f)
  579. {
  580. struct node_table *t = &f->name_table;
  581. int iter;
  582. if (t->split == 0)
  583. node_table_reduce(t);
  584. for (iter = 8; t->split > 0 && iter; iter--) {
  585. struct node **upper;
  586. t->split--;
  587. upper = &t->array[t->split + t->size / 2];
  588. if (*upper) {
  589. struct node **nodep;
  590. for (nodep = &t->array[t->split]; *nodep;
  591. nodep = &(*nodep)->name_next);
  592. *nodep = *upper;
  593. *upper = NULL;
  594. break;
  595. }
  596. }
  597. }
  598. static void unhash_name(struct fuse *f, struct node *node)
  599. {
  600. if (node->name) {
  601. size_t hash = name_hash(f, node->parent->nodeid, node->name);
  602. struct node **nodep = &f->name_table.array[hash];
  603. for (; *nodep != NULL; nodep = &(*nodep)->name_next)
  604. if (*nodep == node) {
  605. *nodep = node->name_next;
  606. node->name_next = NULL;
  607. unref_node(f, node->parent);
  608. if (node->name != node->inline_name)
  609. free(node->name);
  610. node->name = NULL;
  611. node->parent = NULL;
  612. f->name_table.use--;
  613. if (f->name_table.use < f->name_table.size / 4)
  614. remerge_name(f);
  615. return;
  616. }
  617. fprintf(stderr,
  618. "fuse internal error: unable to unhash node: %llu\n",
  619. (unsigned long long) node->nodeid);
  620. abort();
  621. }
  622. }
  623. static void rehash_name(struct fuse *f)
  624. {
  625. struct node_table *t = &f->name_table;
  626. struct node **nodep;
  627. struct node **next;
  628. size_t hash;
  629. if (t->split == t->size / 2)
  630. return;
  631. hash = t->split;
  632. t->split++;
  633. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  634. struct node *node = *nodep;
  635. size_t newhash = name_hash(f, node->parent->nodeid, node->name);
  636. if (newhash != hash) {
  637. next = nodep;
  638. *nodep = node->name_next;
  639. node->name_next = t->array[newhash];
  640. t->array[newhash] = node;
  641. } else {
  642. next = &node->name_next;
  643. }
  644. }
  645. if (t->split == t->size / 2)
  646. node_table_resize(t);
  647. }
  648. static int hash_name(struct fuse *f, struct node *node, fuse_ino_t parentid,
  649. const char *name)
  650. {
  651. size_t hash = name_hash(f, parentid, name);
  652. struct node *parent = get_node(f, parentid);
  653. if (strlen(name) < sizeof(node->inline_name)) {
  654. strcpy(node->inline_name, name);
  655. node->name = node->inline_name;
  656. } else {
  657. node->name = strdup(name);
  658. if (node->name == NULL)
  659. return -1;
  660. }
  661. parent->refctr ++;
  662. node->parent = parent;
  663. node->name_next = f->name_table.array[hash];
  664. f->name_table.array[hash] = node;
  665. f->name_table.use++;
  666. if (f->name_table.use >= f->name_table.size / 2)
  667. rehash_name(f);
  668. return 0;
  669. }
  670. static void delete_node(struct fuse *f, struct node *node)
  671. {
  672. if (f->conf.debug)
  673. fprintf(stderr, "DELETE: %llu\n",
  674. (unsigned long long) node->nodeid);
  675. assert(node->treelock == 0);
  676. unhash_name(f, node);
  677. if (lru_enabled(f))
  678. remove_node_lru(node);
  679. unhash_id(f, node);
  680. free_node(f, node);
  681. }
  682. static void unref_node(struct fuse *f, struct node *node)
  683. {
  684. assert(node->refctr > 0);
  685. node->refctr --;
  686. if (!node->refctr)
  687. delete_node(f, node);
  688. }
  689. static fuse_ino_t next_id(struct fuse *f)
  690. {
  691. do {
  692. f->ctr = (f->ctr + 1) & 0xffffffff;
  693. if (!f->ctr)
  694. f->generation ++;
  695. } while (f->ctr == 0 || f->ctr == FUSE_UNKNOWN_INO ||
  696. get_node_nocheck(f, f->ctr) != NULL);
  697. return f->ctr;
  698. }
  699. static struct node *lookup_node(struct fuse *f, fuse_ino_t parent,
  700. const char *name)
  701. {
  702. size_t hash = name_hash(f, parent, name);
  703. struct node *node;
  704. for (node = f->name_table.array[hash]; node != NULL; node = node->name_next)
  705. if (node->parent->nodeid == parent &&
  706. strcmp(node->name, name) == 0)
  707. return node;
  708. return NULL;
  709. }
  710. static void inc_nlookup(struct node *node)
  711. {
  712. if (!node->nlookup)
  713. node->refctr++;
  714. node->nlookup++;
  715. }
  716. static struct node *find_node(struct fuse *f, fuse_ino_t parent,
  717. const char *name)
  718. {
  719. struct node *node;
  720. pthread_mutex_lock(&f->lock);
  721. if (!name)
  722. node = get_node(f, parent);
  723. else
  724. node = lookup_node(f, parent, name);
  725. if (node == NULL) {
  726. node = alloc_node(f);
  727. if (node == NULL)
  728. goto out_err;
  729. node->nodeid = next_id(f);
  730. node->generation = f->generation;
  731. if (f->conf.remember)
  732. inc_nlookup(node);
  733. if (hash_name(f, node, parent, name) == -1) {
  734. free_node(f, node);
  735. node = NULL;
  736. goto out_err;
  737. }
  738. hash_id(f, node);
  739. if (lru_enabled(f)) {
  740. struct node_lru *lnode = node_lru(node);
  741. init_list_head(&lnode->lru);
  742. }
  743. } else if (lru_enabled(f) && node->nlookup == 1) {
  744. remove_node_lru(node);
  745. }
  746. inc_nlookup(node);
  747. out_err:
  748. pthread_mutex_unlock(&f->lock);
  749. return node;
  750. }
  751. static char *add_name(char **buf, unsigned *bufsize, char *s, const char *name)
  752. {
  753. size_t len = strlen(name);
  754. if (s - len <= *buf) {
  755. unsigned pathlen = *bufsize - (s - *buf);
  756. unsigned newbufsize = *bufsize;
  757. char *newbuf;
  758. while (newbufsize < pathlen + len + 1) {
  759. if (newbufsize >= 0x80000000)
  760. newbufsize = 0xffffffff;
  761. else
  762. newbufsize *= 2;
  763. }
  764. newbuf = realloc(*buf, newbufsize);
  765. if (newbuf == NULL)
  766. return NULL;
  767. *buf = newbuf;
  768. s = newbuf + newbufsize - pathlen;
  769. memmove(s, newbuf + *bufsize - pathlen, pathlen);
  770. *bufsize = newbufsize;
  771. }
  772. s -= len;
  773. strncpy(s, name, len);
  774. s--;
  775. *s = '/';
  776. return s;
  777. }
  778. static void unlock_path(struct fuse *f, fuse_ino_t nodeid, struct node *wnode,
  779. struct node *end)
  780. {
  781. struct node *node;
  782. if (wnode) {
  783. assert(wnode->treelock == TREELOCK_WRITE);
  784. wnode->treelock = 0;
  785. }
  786. for (node = get_node(f, nodeid);
  787. node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent) {
  788. assert(node->treelock != 0);
  789. assert(node->treelock != TREELOCK_WAIT_OFFSET);
  790. assert(node->treelock != TREELOCK_WRITE);
  791. node->treelock--;
  792. if (node->treelock == TREELOCK_WAIT_OFFSET)
  793. node->treelock = 0;
  794. }
  795. }
  796. static int try_get_path(struct fuse *f, fuse_ino_t nodeid, const char *name,
  797. char **path, struct node **wnodep, bool need_lock)
  798. {
  799. unsigned bufsize = 256;
  800. char *buf;
  801. char *s;
  802. struct node *node;
  803. struct node *wnode = NULL;
  804. int err;
  805. *path = NULL;
  806. err = -ENOMEM;
  807. buf = malloc(bufsize);
  808. if (buf == NULL)
  809. goto out_err;
  810. s = buf + bufsize - 1;
  811. *s = '\0';
  812. if (name != NULL) {
  813. s = add_name(&buf, &bufsize, s, name);
  814. err = -ENOMEM;
  815. if (s == NULL)
  816. goto out_free;
  817. }
  818. if (wnodep) {
  819. assert(need_lock);
  820. wnode = lookup_node(f, nodeid, name);
  821. if (wnode) {
  822. if (wnode->treelock != 0) {
  823. if (wnode->treelock > 0)
  824. wnode->treelock += TREELOCK_WAIT_OFFSET;
  825. err = -EAGAIN;
  826. goto out_free;
  827. }
  828. wnode->treelock = TREELOCK_WRITE;
  829. }
  830. }
  831. for (node = get_node(f, nodeid); node->nodeid != FUSE_ROOT_ID;
  832. node = node->parent) {
  833. err = -ENOENT;
  834. if (node->name == NULL || node->parent == NULL)
  835. goto out_unlock;
  836. err = -ENOMEM;
  837. s = add_name(&buf, &bufsize, s, node->name);
  838. if (s == NULL)
  839. goto out_unlock;
  840. if (need_lock) {
  841. err = -EAGAIN;
  842. if (node->treelock < 0)
  843. goto out_unlock;
  844. node->treelock++;
  845. }
  846. }
  847. if (s[0])
  848. memmove(buf, s, bufsize - (s - buf));
  849. else
  850. strcpy(buf, "/");
  851. *path = buf;
  852. if (wnodep)
  853. *wnodep = wnode;
  854. return 0;
  855. out_unlock:
  856. if (need_lock)
  857. unlock_path(f, nodeid, wnode, node);
  858. out_free:
  859. free(buf);
  860. out_err:
  861. return err;
  862. }
  863. static void queue_element_unlock(struct fuse *f, struct lock_queue_element *qe)
  864. {
  865. struct node *wnode;
  866. if (qe->first_locked) {
  867. wnode = qe->wnode1 ? *qe->wnode1 : NULL;
  868. unlock_path(f, qe->nodeid1, wnode, NULL);
  869. qe->first_locked = false;
  870. }
  871. if (qe->second_locked) {
  872. wnode = qe->wnode2 ? *qe->wnode2 : NULL;
  873. unlock_path(f, qe->nodeid2, wnode, NULL);
  874. qe->second_locked = false;
  875. }
  876. }
  877. static void queue_element_wakeup(struct fuse *f, struct lock_queue_element *qe)
  878. {
  879. int err;
  880. bool first = (qe == f->lockq);
  881. if (!qe->path1) {
  882. /* Just waiting for it to be unlocked */
  883. if (get_node(f, qe->nodeid1)->treelock == 0)
  884. pthread_cond_signal(&qe->cond);
  885. return;
  886. }
  887. if (!qe->first_locked) {
  888. err = try_get_path(f, qe->nodeid1, qe->name1, qe->path1,
  889. qe->wnode1, true);
  890. if (!err)
  891. qe->first_locked = true;
  892. else if (err != -EAGAIN)
  893. goto err_unlock;
  894. }
  895. if (!qe->second_locked && qe->path2) {
  896. err = try_get_path(f, qe->nodeid2, qe->name2, qe->path2,
  897. qe->wnode2, true);
  898. if (!err)
  899. qe->second_locked = true;
  900. else if (err != -EAGAIN)
  901. goto err_unlock;
  902. }
  903. if (qe->first_locked && (qe->second_locked || !qe->path2)) {
  904. err = 0;
  905. goto done;
  906. }
  907. /*
  908. * Only let the first element be partially locked otherwise there could
  909. * be a deadlock.
  910. *
  911. * But do allow the first element to be partially locked to prevent
  912. * starvation.
  913. */
  914. if (!first)
  915. queue_element_unlock(f, qe);
  916. /* keep trying */
  917. return;
  918. err_unlock:
  919. queue_element_unlock(f, qe);
  920. done:
  921. qe->err = err;
  922. qe->done = true;
  923. pthread_cond_signal(&qe->cond);
  924. }
  925. static void wake_up_queued(struct fuse *f)
  926. {
  927. struct lock_queue_element *qe;
  928. for (qe = f->lockq; qe != NULL; qe = qe->next)
  929. queue_element_wakeup(f, qe);
  930. }
  931. static void debug_path(struct fuse *f, const char *msg, fuse_ino_t nodeid,
  932. const char *name, bool wr)
  933. {
  934. if (f->conf.debug) {
  935. struct node *wnode = NULL;
  936. if (wr)
  937. wnode = lookup_node(f, nodeid, name);
  938. if (wnode)
  939. fprintf(stderr, "%s %li (w)\n", msg, wnode->nodeid);
  940. else
  941. fprintf(stderr, "%s %li\n", msg, nodeid);
  942. }
  943. }
  944. static void queue_path(struct fuse *f, struct lock_queue_element *qe)
  945. {
  946. struct lock_queue_element **qp;
  947. qe->done = false;
  948. qe->first_locked = false;
  949. qe->second_locked = false;
  950. pthread_cond_init(&qe->cond, NULL);
  951. qe->next = NULL;
  952. for (qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
  953. *qp = qe;
  954. }
  955. static void dequeue_path(struct fuse *f, struct lock_queue_element *qe)
  956. {
  957. struct lock_queue_element **qp;
  958. pthread_cond_destroy(&qe->cond);
  959. for (qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
  960. *qp = qe->next;
  961. }
  962. static int wait_path(struct fuse *f, struct lock_queue_element *qe)
  963. {
  964. queue_path(f, qe);
  965. do {
  966. pthread_cond_wait(&qe->cond, &f->lock);
  967. } while (!qe->done);
  968. dequeue_path(f, qe);
  969. return qe->err;
  970. }
  971. static int get_path_common(struct fuse *f, fuse_ino_t nodeid, const char *name,
  972. char **path, struct node **wnode)
  973. {
  974. int err;
  975. pthread_mutex_lock(&f->lock);
  976. err = try_get_path(f, nodeid, name, path, wnode, true);
  977. if (err == -EAGAIN) {
  978. struct lock_queue_element qe = {
  979. .nodeid1 = nodeid,
  980. .name1 = name,
  981. .path1 = path,
  982. .wnode1 = wnode,
  983. };
  984. debug_path(f, "QUEUE PATH", nodeid, name, !!wnode);
  985. err = wait_path(f, &qe);
  986. debug_path(f, "DEQUEUE PATH", nodeid, name, !!wnode);
  987. }
  988. pthread_mutex_unlock(&f->lock);
  989. return err;
  990. }
  991. static int get_path(struct fuse *f, fuse_ino_t nodeid, char **path)
  992. {
  993. return get_path_common(f, nodeid, NULL, path, NULL);
  994. }
  995. static int get_path_nullok(struct fuse *f, fuse_ino_t nodeid, char **path)
  996. {
  997. int err = 0;
  998. if (f->conf.nopath) {
  999. *path = NULL;
  1000. } else {
  1001. err = get_path_common(f, nodeid, NULL, path, NULL);
  1002. if (err == -ENOENT && f->nullpath_ok)
  1003. err = 0;
  1004. }
  1005. return err;
  1006. }
  1007. static int get_path_name(struct fuse *f, fuse_ino_t nodeid, const char *name,
  1008. char **path)
  1009. {
  1010. return get_path_common(f, nodeid, name, path, NULL);
  1011. }
  1012. static int get_path_wrlock(struct fuse *f, fuse_ino_t nodeid, const char *name,
  1013. char **path, struct node **wnode)
  1014. {
  1015. return get_path_common(f, nodeid, name, path, wnode);
  1016. }
  1017. static int try_get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  1018. fuse_ino_t nodeid2, const char *name2,
  1019. char **path1, char **path2,
  1020. struct node **wnode1, struct node **wnode2)
  1021. {
  1022. int err;
  1023. /* FIXME: locking two paths needs deadlock checking */
  1024. err = try_get_path(f, nodeid1, name1, path1, wnode1, true);
  1025. if (!err) {
  1026. err = try_get_path(f, nodeid2, name2, path2, wnode2, true);
  1027. if (err) {
  1028. struct node *wn1 = wnode1 ? *wnode1 : NULL;
  1029. unlock_path(f, nodeid1, wn1, NULL);
  1030. free(*path1);
  1031. }
  1032. }
  1033. return err;
  1034. }
  1035. static int get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  1036. fuse_ino_t nodeid2, const char *name2,
  1037. char **path1, char **path2,
  1038. struct node **wnode1, struct node **wnode2)
  1039. {
  1040. int err;
  1041. pthread_mutex_lock(&f->lock);
  1042. err = try_get_path2(f, nodeid1, name1, nodeid2, name2,
  1043. path1, path2, wnode1, wnode2);
  1044. if (err == -EAGAIN) {
  1045. struct lock_queue_element qe = {
  1046. .nodeid1 = nodeid1,
  1047. .name1 = name1,
  1048. .path1 = path1,
  1049. .wnode1 = wnode1,
  1050. .nodeid2 = nodeid2,
  1051. .name2 = name2,
  1052. .path2 = path2,
  1053. .wnode2 = wnode2,
  1054. };
  1055. debug_path(f, "QUEUE PATH1", nodeid1, name1, !!wnode1);
  1056. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  1057. err = wait_path(f, &qe);
  1058. debug_path(f, "DEQUEUE PATH1", nodeid1, name1, !!wnode1);
  1059. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  1060. }
  1061. pthread_mutex_unlock(&f->lock);
  1062. return err;
  1063. }
  1064. static void free_path_wrlock(struct fuse *f, fuse_ino_t nodeid,
  1065. struct node *wnode, char *path)
  1066. {
  1067. pthread_mutex_lock(&f->lock);
  1068. unlock_path(f, nodeid, wnode, NULL);
  1069. if (f->lockq)
  1070. wake_up_queued(f);
  1071. pthread_mutex_unlock(&f->lock);
  1072. free(path);
  1073. }
  1074. static void free_path(struct fuse *f, fuse_ino_t nodeid, char *path)
  1075. {
  1076. if (path)
  1077. free_path_wrlock(f, nodeid, NULL, path);
  1078. }
  1079. static void free_path2(struct fuse *f, fuse_ino_t nodeid1, fuse_ino_t nodeid2,
  1080. struct node *wnode1, struct node *wnode2,
  1081. char *path1, char *path2)
  1082. {
  1083. pthread_mutex_lock(&f->lock);
  1084. unlock_path(f, nodeid1, wnode1, NULL);
  1085. unlock_path(f, nodeid2, wnode2, NULL);
  1086. wake_up_queued(f);
  1087. pthread_mutex_unlock(&f->lock);
  1088. free(path1);
  1089. free(path2);
  1090. }
  1091. static void forget_node(struct fuse *f, fuse_ino_t nodeid, uint64_t nlookup)
  1092. {
  1093. struct node *node;
  1094. if (nodeid == FUSE_ROOT_ID)
  1095. return;
  1096. pthread_mutex_lock(&f->lock);
  1097. node = get_node(f, nodeid);
  1098. /*
  1099. * Node may still be locked due to interrupt idiocy in open,
  1100. * create and opendir
  1101. */
  1102. while (node->nlookup == nlookup && node->treelock) {
  1103. struct lock_queue_element qe = {
  1104. .nodeid1 = nodeid,
  1105. };
  1106. debug_path(f, "QUEUE PATH (forget)", nodeid, NULL, false);
  1107. queue_path(f, &qe);
  1108. do {
  1109. pthread_cond_wait(&qe.cond, &f->lock);
  1110. } while (node->nlookup == nlookup && node->treelock);
  1111. dequeue_path(f, &qe);
  1112. debug_path(f, "DEQUEUE_PATH (forget)", nodeid, NULL, false);
  1113. }
  1114. assert(node->nlookup >= nlookup);
  1115. node->nlookup -= nlookup;
  1116. if (!node->nlookup) {
  1117. unref_node(f, node);
  1118. } else if (lru_enabled(f) && node->nlookup == 1) {
  1119. set_forget_time(f, node);
  1120. }
  1121. pthread_mutex_unlock(&f->lock);
  1122. }
  1123. static void unlink_node(struct fuse *f, struct node *node)
  1124. {
  1125. if (f->conf.remember) {
  1126. assert(node->nlookup > 1);
  1127. node->nlookup--;
  1128. }
  1129. unhash_name(f, node);
  1130. }
  1131. static void remove_node(struct fuse *f, fuse_ino_t dir, const char *name)
  1132. {
  1133. struct node *node;
  1134. pthread_mutex_lock(&f->lock);
  1135. node = lookup_node(f, dir, name);
  1136. if (node != NULL)
  1137. unlink_node(f, node);
  1138. pthread_mutex_unlock(&f->lock);
  1139. }
  1140. static int rename_node(struct fuse *f, fuse_ino_t olddir, const char *oldname,
  1141. fuse_ino_t newdir, const char *newname, int hide)
  1142. {
  1143. struct node *node;
  1144. struct node *newnode;
  1145. int err = 0;
  1146. pthread_mutex_lock(&f->lock);
  1147. node = lookup_node(f, olddir, oldname);
  1148. newnode = lookup_node(f, newdir, newname);
  1149. if (node == NULL)
  1150. goto out;
  1151. if (newnode != NULL) {
  1152. if (hide) {
  1153. fprintf(stderr, "fuse: hidden file got created during hiding\n");
  1154. err = -EBUSY;
  1155. goto out;
  1156. }
  1157. unlink_node(f, newnode);
  1158. }
  1159. unhash_name(f, node);
  1160. if (hash_name(f, node, newdir, newname) == -1) {
  1161. err = -ENOMEM;
  1162. goto out;
  1163. }
  1164. if (hide)
  1165. node->is_hidden = 1;
  1166. out:
  1167. pthread_mutex_unlock(&f->lock);
  1168. return err;
  1169. }
  1170. static void set_stat(struct fuse *f, fuse_ino_t nodeid, struct stat *stbuf)
  1171. {
  1172. if (!f->conf.use_ino)
  1173. stbuf->st_ino = nodeid;
  1174. if (f->conf.set_mode)
  1175. stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
  1176. (0777 & ~f->conf.umask);
  1177. if (f->conf.set_uid)
  1178. stbuf->st_uid = f->conf.uid;
  1179. if (f->conf.set_gid)
  1180. stbuf->st_gid = f->conf.gid;
  1181. }
  1182. static struct fuse *req_fuse(fuse_req_t req)
  1183. {
  1184. return (struct fuse *) fuse_req_userdata(req);
  1185. }
  1186. static void fuse_intr_sighandler(int sig)
  1187. {
  1188. (void) sig;
  1189. /* Nothing to do */
  1190. }
  1191. struct fuse_intr_data {
  1192. pthread_t id;
  1193. pthread_cond_t cond;
  1194. int finished;
  1195. };
  1196. static void fuse_interrupt(fuse_req_t req, void *d_)
  1197. {
  1198. struct fuse_intr_data *d = d_;
  1199. struct fuse *f = req_fuse(req);
  1200. if (d->id == pthread_self())
  1201. return;
  1202. pthread_mutex_lock(&f->lock);
  1203. while (!d->finished) {
  1204. struct timeval now;
  1205. struct timespec timeout;
  1206. pthread_kill(d->id, f->conf.intr_signal);
  1207. gettimeofday(&now, NULL);
  1208. timeout.tv_sec = now.tv_sec + 1;
  1209. timeout.tv_nsec = now.tv_usec * 1000;
  1210. pthread_cond_timedwait(&d->cond, &f->lock, &timeout);
  1211. }
  1212. pthread_mutex_unlock(&f->lock);
  1213. }
  1214. static void fuse_do_finish_interrupt(struct fuse *f, fuse_req_t req,
  1215. struct fuse_intr_data *d)
  1216. {
  1217. pthread_mutex_lock(&f->lock);
  1218. d->finished = 1;
  1219. pthread_cond_broadcast(&d->cond);
  1220. pthread_mutex_unlock(&f->lock);
  1221. fuse_req_interrupt_func(req, NULL, NULL);
  1222. pthread_cond_destroy(&d->cond);
  1223. }
  1224. static void fuse_do_prepare_interrupt(fuse_req_t req, struct fuse_intr_data *d)
  1225. {
  1226. d->id = pthread_self();
  1227. pthread_cond_init(&d->cond, NULL);
  1228. d->finished = 0;
  1229. fuse_req_interrupt_func(req, fuse_interrupt, d);
  1230. }
  1231. static inline void fuse_finish_interrupt(struct fuse *f, fuse_req_t req,
  1232. struct fuse_intr_data *d)
  1233. {
  1234. if (f->conf.intr)
  1235. fuse_do_finish_interrupt(f, req, d);
  1236. }
  1237. static inline void fuse_prepare_interrupt(struct fuse *f, fuse_req_t req,
  1238. struct fuse_intr_data *d)
  1239. {
  1240. if (f->conf.intr)
  1241. fuse_do_prepare_interrupt(req, d);
  1242. }
  1243. #if !defined(__FreeBSD__) && !defined(__NetBSD__)
  1244. static int fuse_compat_open(struct fuse_fs *fs, const char *path,
  1245. struct fuse_file_info *fi)
  1246. {
  1247. int err;
  1248. if (!fs->compat || fs->compat >= 25)
  1249. err = fs->op.open(path, fi);
  1250. else if (fs->compat == 22) {
  1251. struct fuse_file_info_compat tmp;
  1252. memcpy(&tmp, fi, sizeof(tmp));
  1253. err = ((struct fuse_operations_compat22 *) &fs->op)->open(path,
  1254. &tmp);
  1255. memcpy(fi, &tmp, sizeof(tmp));
  1256. fi->fh = tmp.fh;
  1257. } else
  1258. err = ((struct fuse_operations_compat2 *) &fs->op)
  1259. ->open(path, fi->flags);
  1260. return err;
  1261. }
  1262. static int fuse_compat_release(struct fuse_fs *fs, const char *path,
  1263. struct fuse_file_info *fi)
  1264. {
  1265. if (!fs->compat || fs->compat >= 22)
  1266. return fs->op.release(path, fi);
  1267. else
  1268. return ((struct fuse_operations_compat2 *) &fs->op)
  1269. ->release(path, fi->flags);
  1270. }
  1271. static int fuse_compat_opendir(struct fuse_fs *fs, const char *path,
  1272. struct fuse_file_info *fi)
  1273. {
  1274. if (!fs->compat || fs->compat >= 25)
  1275. return fs->op.opendir(path, fi);
  1276. else {
  1277. int err;
  1278. struct fuse_file_info_compat tmp;
  1279. memcpy(&tmp, fi, sizeof(tmp));
  1280. err = ((struct fuse_operations_compat22 *) &fs->op)
  1281. ->opendir(path, &tmp);
  1282. memcpy(fi, &tmp, sizeof(tmp));
  1283. fi->fh = tmp.fh;
  1284. return err;
  1285. }
  1286. }
  1287. static void convert_statfs_compat(struct fuse_statfs_compat1 *compatbuf,
  1288. struct statvfs *stbuf)
  1289. {
  1290. stbuf->f_bsize = compatbuf->block_size;
  1291. stbuf->f_blocks = compatbuf->blocks;
  1292. stbuf->f_bfree = compatbuf->blocks_free;
  1293. stbuf->f_bavail = compatbuf->blocks_free;
  1294. stbuf->f_files = compatbuf->files;
  1295. stbuf->f_ffree = compatbuf->files_free;
  1296. stbuf->f_namemax = compatbuf->namelen;
  1297. }
  1298. static void convert_statfs_old(struct statfs *oldbuf, struct statvfs *stbuf)
  1299. {
  1300. stbuf->f_bsize = oldbuf->f_bsize;
  1301. stbuf->f_blocks = oldbuf->f_blocks;
  1302. stbuf->f_bfree = oldbuf->f_bfree;
  1303. stbuf->f_bavail = oldbuf->f_bavail;
  1304. stbuf->f_files = oldbuf->f_files;
  1305. stbuf->f_ffree = oldbuf->f_ffree;
  1306. stbuf->f_namemax = oldbuf->f_namelen;
  1307. }
  1308. static int fuse_compat_statfs(struct fuse_fs *fs, const char *path,
  1309. struct statvfs *buf)
  1310. {
  1311. int err;
  1312. if (!fs->compat || fs->compat >= 25) {
  1313. err = fs->op.statfs(fs->compat == 25 ? "/" : path, buf);
  1314. } else if (fs->compat > 11) {
  1315. struct statfs oldbuf;
  1316. err = ((struct fuse_operations_compat22 *) &fs->op)
  1317. ->statfs("/", &oldbuf);
  1318. if (!err)
  1319. convert_statfs_old(&oldbuf, buf);
  1320. } else {
  1321. struct fuse_statfs_compat1 compatbuf;
  1322. memset(&compatbuf, 0, sizeof(struct fuse_statfs_compat1));
  1323. err = ((struct fuse_operations_compat1 *) &fs->op)
  1324. ->statfs(&compatbuf);
  1325. if (!err)
  1326. convert_statfs_compat(&compatbuf, buf);
  1327. }
  1328. return err;
  1329. }
  1330. #else /* __FreeBSD__ || __NetBSD__ */
  1331. static inline int fuse_compat_open(struct fuse_fs *fs, char *path,
  1332. struct fuse_file_info *fi)
  1333. {
  1334. return fs->op.open(path, fi);
  1335. }
  1336. static inline int fuse_compat_release(struct fuse_fs *fs, const char *path,
  1337. struct fuse_file_info *fi)
  1338. {
  1339. return fs->op.release(path, fi);
  1340. }
  1341. static inline int fuse_compat_opendir(struct fuse_fs *fs, const char *path,
  1342. struct fuse_file_info *fi)
  1343. {
  1344. return fs->op.opendir(path, fi);
  1345. }
  1346. static inline int fuse_compat_statfs(struct fuse_fs *fs, const char *path,
  1347. struct statvfs *buf)
  1348. {
  1349. return fs->op.statfs(fs->compat == 25 ? "/" : path, buf);
  1350. }
  1351. #endif /* __FreeBSD__ || __NetBSD__ */
  1352. int fuse_fs_getattr(struct fuse_fs *fs, const char *path, struct stat *buf)
  1353. {
  1354. fuse_get_context()->private_data = fs->user_data;
  1355. if (fs->op.getattr) {
  1356. if (fs->debug)
  1357. fprintf(stderr, "getattr %s\n", path);
  1358. return fs->op.getattr(path, buf);
  1359. } else {
  1360. return -ENOSYS;
  1361. }
  1362. }
  1363. int fuse_fs_fgetattr(struct fuse_fs *fs, const char *path, struct stat *buf,
  1364. struct fuse_file_info *fi)
  1365. {
  1366. fuse_get_context()->private_data = fs->user_data;
  1367. if (fs->op.fgetattr) {
  1368. if (fs->debug)
  1369. fprintf(stderr, "fgetattr[%llu] %s\n",
  1370. (unsigned long long) fi->fh, path);
  1371. return fs->op.fgetattr(path, buf, fi);
  1372. } else if (path && fs->op.getattr) {
  1373. if (fs->debug)
  1374. fprintf(stderr, "getattr %s\n", path);
  1375. return fs->op.getattr(path, buf);
  1376. } else {
  1377. return -ENOSYS;
  1378. }
  1379. }
  1380. int fuse_fs_rename(struct fuse_fs *fs, const char *oldpath,
  1381. const char *newpath)
  1382. {
  1383. fuse_get_context()->private_data = fs->user_data;
  1384. if (fs->op.rename) {
  1385. if (fs->debug)
  1386. fprintf(stderr, "rename %s %s\n", oldpath, newpath);
  1387. return fs->op.rename(oldpath, newpath);
  1388. } else {
  1389. return -ENOSYS;
  1390. }
  1391. }
  1392. int fuse_fs_unlink(struct fuse_fs *fs, const char *path)
  1393. {
  1394. fuse_get_context()->private_data = fs->user_data;
  1395. if (fs->op.unlink) {
  1396. if (fs->debug)
  1397. fprintf(stderr, "unlink %s\n", path);
  1398. return fs->op.unlink(path);
  1399. } else {
  1400. return -ENOSYS;
  1401. }
  1402. }
  1403. int fuse_fs_rmdir(struct fuse_fs *fs, const char *path)
  1404. {
  1405. fuse_get_context()->private_data = fs->user_data;
  1406. if (fs->op.rmdir) {
  1407. if (fs->debug)
  1408. fprintf(stderr, "rmdir %s\n", path);
  1409. return fs->op.rmdir(path);
  1410. } else {
  1411. return -ENOSYS;
  1412. }
  1413. }
  1414. int fuse_fs_symlink(struct fuse_fs *fs, const char *linkname, const char *path)
  1415. {
  1416. fuse_get_context()->private_data = fs->user_data;
  1417. if (fs->op.symlink) {
  1418. if (fs->debug)
  1419. fprintf(stderr, "symlink %s %s\n", linkname, path);
  1420. return fs->op.symlink(linkname, path);
  1421. } else {
  1422. return -ENOSYS;
  1423. }
  1424. }
  1425. int fuse_fs_link(struct fuse_fs *fs, const char *oldpath, const char *newpath)
  1426. {
  1427. fuse_get_context()->private_data = fs->user_data;
  1428. if (fs->op.link) {
  1429. if (fs->debug)
  1430. fprintf(stderr, "link %s %s\n", oldpath, newpath);
  1431. return fs->op.link(oldpath, newpath);
  1432. } else {
  1433. return -ENOSYS;
  1434. }
  1435. }
  1436. int fuse_fs_release(struct fuse_fs *fs, const char *path,
  1437. struct fuse_file_info *fi)
  1438. {
  1439. fuse_get_context()->private_data = fs->user_data;
  1440. if (fs->op.release) {
  1441. if (fs->debug)
  1442. fprintf(stderr, "release%s[%llu] flags: 0x%x\n",
  1443. fi->flush ? "+flush" : "",
  1444. (unsigned long long) fi->fh, fi->flags);
  1445. return fuse_compat_release(fs, path, fi);
  1446. } else {
  1447. return 0;
  1448. }
  1449. }
  1450. int fuse_fs_opendir(struct fuse_fs *fs, const char *path,
  1451. struct fuse_file_info *fi)
  1452. {
  1453. fuse_get_context()->private_data = fs->user_data;
  1454. if (fs->op.opendir) {
  1455. int err;
  1456. if (fs->debug)
  1457. fprintf(stderr, "opendir flags: 0x%x %s\n", fi->flags,
  1458. path);
  1459. err = fuse_compat_opendir(fs, path, fi);
  1460. if (fs->debug && !err)
  1461. fprintf(stderr, " opendir[%lli] flags: 0x%x %s\n",
  1462. (unsigned long long) fi->fh, fi->flags, path);
  1463. return err;
  1464. } else {
  1465. return 0;
  1466. }
  1467. }
  1468. int fuse_fs_open(struct fuse_fs *fs, const char *path,
  1469. struct fuse_file_info *fi)
  1470. {
  1471. fuse_get_context()->private_data = fs->user_data;
  1472. if (fs->op.open) {
  1473. int err;
  1474. if (fs->debug)
  1475. fprintf(stderr, "open flags: 0x%x %s\n", fi->flags,
  1476. path);
  1477. err = fuse_compat_open(fs, path, fi);
  1478. if (fs->debug && !err)
  1479. fprintf(stderr, " open[%lli] flags: 0x%x %s\n",
  1480. (unsigned long long) fi->fh, fi->flags, path);
  1481. return err;
  1482. } else {
  1483. return 0;
  1484. }
  1485. }
  1486. static void fuse_free_buf(struct fuse_bufvec *buf)
  1487. {
  1488. if (buf != NULL) {
  1489. size_t i;
  1490. for (i = 0; i < buf->count; i++)
  1491. free(buf->buf[i].mem);
  1492. free(buf);
  1493. }
  1494. }
  1495. int fuse_fs_read_buf(struct fuse_fs *fs, const char *path,
  1496. struct fuse_bufvec **bufp, size_t size, off_t off,
  1497. struct fuse_file_info *fi)
  1498. {
  1499. fuse_get_context()->private_data = fs->user_data;
  1500. if (fs->op.read || fs->op.read_buf) {
  1501. int res;
  1502. if (fs->debug)
  1503. fprintf(stderr,
  1504. "read[%llu] %zu bytes from %llu flags: 0x%x\n",
  1505. (unsigned long long) fi->fh,
  1506. size, (unsigned long long) off, fi->flags);
  1507. if (fs->op.read_buf) {
  1508. res = fs->op.read_buf(path, bufp, size, off, fi);
  1509. } else {
  1510. struct fuse_bufvec *buf;
  1511. void *mem;
  1512. buf = malloc(sizeof(struct fuse_bufvec));
  1513. if (buf == NULL)
  1514. return -ENOMEM;
  1515. mem = malloc(size);
  1516. if (mem == NULL) {
  1517. free(buf);
  1518. return -ENOMEM;
  1519. }
  1520. *buf = FUSE_BUFVEC_INIT(size);
  1521. buf->buf[0].mem = mem;
  1522. *bufp = buf;
  1523. res = fs->op.read(path, mem, size, off, fi);
  1524. if (res >= 0)
  1525. buf->buf[0].size = res;
  1526. }
  1527. if (fs->debug && res >= 0)
  1528. fprintf(stderr, " read[%llu] %zu bytes from %llu\n",
  1529. (unsigned long long) fi->fh,
  1530. fuse_buf_size(*bufp),
  1531. (unsigned long long) off);
  1532. if (res >= 0 && fuse_buf_size(*bufp) > (int) size)
  1533. fprintf(stderr, "fuse: read too many bytes\n");
  1534. if (res < 0)
  1535. return res;
  1536. return 0;
  1537. } else {
  1538. return -ENOSYS;
  1539. }
  1540. }
  1541. int fuse_fs_read(struct fuse_fs *fs, const char *path, char *mem, size_t size,
  1542. off_t off, struct fuse_file_info *fi)
  1543. {
  1544. int res;
  1545. struct fuse_bufvec *buf = NULL;
  1546. res = fuse_fs_read_buf(fs, path, &buf, size, off, fi);
  1547. if (res == 0) {
  1548. struct fuse_bufvec dst = FUSE_BUFVEC_INIT(size);
  1549. dst.buf[0].mem = mem;
  1550. res = fuse_buf_copy(&dst, buf, 0);
  1551. }
  1552. fuse_free_buf(buf);
  1553. return res;
  1554. }
  1555. int fuse_fs_write_buf(struct fuse_fs *fs, const char *path,
  1556. struct fuse_bufvec *buf, off_t off,
  1557. struct fuse_file_info *fi)
  1558. {
  1559. fuse_get_context()->private_data = fs->user_data;
  1560. if (fs->op.write_buf || fs->op.write) {
  1561. int res;
  1562. size_t size = fuse_buf_size(buf);
  1563. assert(buf->idx == 0 && buf->off == 0);
  1564. if (fs->debug)
  1565. fprintf(stderr,
  1566. "write%s[%llu] %zu bytes to %llu flags: 0x%x\n",
  1567. fi->writepage ? "page" : "",
  1568. (unsigned long long) fi->fh,
  1569. size,
  1570. (unsigned long long) off,
  1571. fi->flags);
  1572. if (fs->op.write_buf) {
  1573. res = fs->op.write_buf(path, buf, off, fi);
  1574. } else {
  1575. void *mem = NULL;
  1576. struct fuse_buf *flatbuf;
  1577. struct fuse_bufvec tmp = FUSE_BUFVEC_INIT(size);
  1578. if (buf->count == 1 &&
  1579. !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
  1580. flatbuf = &buf->buf[0];
  1581. } else {
  1582. res = -ENOMEM;
  1583. mem = malloc(size);
  1584. if (mem == NULL)
  1585. goto out;
  1586. tmp.buf[0].mem = mem;
  1587. res = fuse_buf_copy(&tmp, buf, 0);
  1588. if (res <= 0)
  1589. goto out_free;
  1590. tmp.buf[0].size = res;
  1591. flatbuf = &tmp.buf[0];
  1592. }
  1593. res = fs->op.write(path, flatbuf->mem, flatbuf->size,
  1594. off, fi);
  1595. out_free:
  1596. free(mem);
  1597. }
  1598. out:
  1599. if (fs->debug && res >= 0)
  1600. fprintf(stderr, " write%s[%llu] %u bytes to %llu\n",
  1601. fi->writepage ? "page" : "",
  1602. (unsigned long long) fi->fh, res,
  1603. (unsigned long long) off);
  1604. if (res > (int) size)
  1605. fprintf(stderr, "fuse: wrote too many bytes\n");
  1606. return res;
  1607. } else {
  1608. return -ENOSYS;
  1609. }
  1610. }
  1611. int fuse_fs_write(struct fuse_fs *fs, const char *path, const char *mem,
  1612. size_t size, off_t off, struct fuse_file_info *fi)
  1613. {
  1614. struct fuse_bufvec bufv = FUSE_BUFVEC_INIT(size);
  1615. bufv.buf[0].mem = (void *) mem;
  1616. return fuse_fs_write_buf(fs, path, &bufv, off, fi);
  1617. }
  1618. int fuse_fs_fsync(struct fuse_fs *fs, const char *path, int datasync,
  1619. struct fuse_file_info *fi)
  1620. {
  1621. fuse_get_context()->private_data = fs->user_data;
  1622. if (fs->op.fsync) {
  1623. if (fs->debug)
  1624. fprintf(stderr, "fsync[%llu] datasync: %i\n",
  1625. (unsigned long long) fi->fh, datasync);
  1626. return fs->op.fsync(path, datasync, fi);
  1627. } else {
  1628. return -ENOSYS;
  1629. }
  1630. }
  1631. int fuse_fs_fsyncdir(struct fuse_fs *fs, const char *path, int datasync,
  1632. struct fuse_file_info *fi)
  1633. {
  1634. fuse_get_context()->private_data = fs->user_data;
  1635. if (fs->op.fsyncdir) {
  1636. if (fs->debug)
  1637. fprintf(stderr, "fsyncdir[%llu] datasync: %i\n",
  1638. (unsigned long long) fi->fh, datasync);
  1639. return fs->op.fsyncdir(path, datasync, fi);
  1640. } else {
  1641. return -ENOSYS;
  1642. }
  1643. }
  1644. int fuse_fs_flush(struct fuse_fs *fs, const char *path,
  1645. struct fuse_file_info *fi)
  1646. {
  1647. fuse_get_context()->private_data = fs->user_data;
  1648. if (fs->op.flush) {
  1649. if (fs->debug)
  1650. fprintf(stderr, "flush[%llu]\n",
  1651. (unsigned long long) fi->fh);
  1652. return fs->op.flush(path, fi);
  1653. } else {
  1654. return -ENOSYS;
  1655. }
  1656. }
  1657. int fuse_fs_statfs(struct fuse_fs *fs, const char *path, struct statvfs *buf)
  1658. {
  1659. fuse_get_context()->private_data = fs->user_data;
  1660. if (fs->op.statfs) {
  1661. if (fs->debug)
  1662. fprintf(stderr, "statfs %s\n", path);
  1663. return fuse_compat_statfs(fs, path, buf);
  1664. } else {
  1665. buf->f_namemax = 255;
  1666. buf->f_bsize = 512;
  1667. return 0;
  1668. }
  1669. }
  1670. int fuse_fs_releasedir(struct fuse_fs *fs, const char *path,
  1671. struct fuse_file_info *fi)
  1672. {
  1673. fuse_get_context()->private_data = fs->user_data;
  1674. if (fs->op.releasedir) {
  1675. if (fs->debug)
  1676. fprintf(stderr, "releasedir[%llu] flags: 0x%x\n",
  1677. (unsigned long long) fi->fh, fi->flags);
  1678. return fs->op.releasedir(path, fi);
  1679. } else {
  1680. return 0;
  1681. }
  1682. }
  1683. static int fill_dir_old(struct fuse_dirhandle *dh, const char *name, int type,
  1684. ino_t ino)
  1685. {
  1686. int res;
  1687. struct stat stbuf;
  1688. memset(&stbuf, 0, sizeof(stbuf));
  1689. stbuf.st_mode = type << 12;
  1690. stbuf.st_ino = ino;
  1691. res = dh->filler(dh->buf, name, &stbuf, 0);
  1692. return res ? -ENOMEM : 0;
  1693. }
  1694. int fuse_fs_readdir(struct fuse_fs *fs, const char *path, void *buf,
  1695. fuse_fill_dir_t filler, off_t off,
  1696. struct fuse_file_info *fi)
  1697. {
  1698. fuse_get_context()->private_data = fs->user_data;
  1699. if (fs->op.readdir) {
  1700. if (fs->debug)
  1701. fprintf(stderr, "readdir[%llu] from %llu\n",
  1702. (unsigned long long) fi->fh,
  1703. (unsigned long long) off);
  1704. return fs->op.readdir(path, buf, filler, off, fi);
  1705. } else if (fs->op.getdir) {
  1706. struct fuse_dirhandle dh;
  1707. if (fs->debug)
  1708. fprintf(stderr, "getdir[%llu]\n",
  1709. (unsigned long long) fi->fh);
  1710. dh.filler = filler;
  1711. dh.buf = buf;
  1712. return fs->op.getdir(path, &dh, fill_dir_old);
  1713. } else {
  1714. return -ENOSYS;
  1715. }
  1716. }
  1717. int fuse_fs_create(struct fuse_fs *fs, const char *path, mode_t mode,
  1718. struct fuse_file_info *fi)
  1719. {
  1720. fuse_get_context()->private_data = fs->user_data;
  1721. if (fs->op.create) {
  1722. int err;
  1723. if (fs->debug)
  1724. fprintf(stderr,
  1725. "create flags: 0x%x %s 0%o umask=0%03o\n",
  1726. fi->flags, path, mode,
  1727. fuse_get_context()->umask);
  1728. err = fs->op.create(path, mode, fi);
  1729. if (fs->debug && !err)
  1730. fprintf(stderr, " create[%llu] flags: 0x%x %s\n",
  1731. (unsigned long long) fi->fh, fi->flags, path);
  1732. return err;
  1733. } else {
  1734. return -ENOSYS;
  1735. }
  1736. }
  1737. int fuse_fs_lock(struct fuse_fs *fs, const char *path,
  1738. struct fuse_file_info *fi, int cmd, struct flock *lock)
  1739. {
  1740. fuse_get_context()->private_data = fs->user_data;
  1741. if (fs->op.lock) {
  1742. if (fs->debug)
  1743. fprintf(stderr, "lock[%llu] %s %s start: %llu len: %llu pid: %llu\n",
  1744. (unsigned long long) fi->fh,
  1745. (cmd == F_GETLK ? "F_GETLK" :
  1746. (cmd == F_SETLK ? "F_SETLK" :
  1747. (cmd == F_SETLKW ? "F_SETLKW" : "???"))),
  1748. (lock->l_type == F_RDLCK ? "F_RDLCK" :
  1749. (lock->l_type == F_WRLCK ? "F_WRLCK" :
  1750. (lock->l_type == F_UNLCK ? "F_UNLCK" :
  1751. "???"))),
  1752. (unsigned long long) lock->l_start,
  1753. (unsigned long long) lock->l_len,
  1754. (unsigned long long) lock->l_pid);
  1755. return fs->op.lock(path, fi, cmd, lock);
  1756. } else {
  1757. return -ENOSYS;
  1758. }
  1759. }
  1760. int fuse_fs_flock(struct fuse_fs *fs, const char *path,
  1761. struct fuse_file_info *fi, int op)
  1762. {
  1763. fuse_get_context()->private_data = fs->user_data;
  1764. if (fs->op.flock) {
  1765. if (fs->debug) {
  1766. int xop = op & ~LOCK_NB;
  1767. fprintf(stderr, "lock[%llu] %s%s\n",
  1768. (unsigned long long) fi->fh,
  1769. xop == LOCK_SH ? "LOCK_SH" :
  1770. (xop == LOCK_EX ? "LOCK_EX" :
  1771. (xop == LOCK_UN ? "LOCK_UN" : "???")),
  1772. (op & LOCK_NB) ? "|LOCK_NB" : "");
  1773. }
  1774. return fs->op.flock(path, fi, op);
  1775. } else {
  1776. return -ENOSYS;
  1777. }
  1778. }
  1779. int fuse_fs_chown(struct fuse_fs *fs, const char *path, uid_t uid, gid_t gid)
  1780. {
  1781. fuse_get_context()->private_data = fs->user_data;
  1782. if (fs->op.chown) {
  1783. if (fs->debug)
  1784. fprintf(stderr, "chown %s %lu %lu\n", path,
  1785. (unsigned long) uid, (unsigned long) gid);
  1786. return fs->op.chown(path, uid, gid);
  1787. } else {
  1788. return -ENOSYS;
  1789. }
  1790. }
  1791. int fuse_fs_truncate(struct fuse_fs *fs, const char *path, off_t size)
  1792. {
  1793. fuse_get_context()->private_data = fs->user_data;
  1794. if (fs->op.truncate) {
  1795. if (fs->debug)
  1796. fprintf(stderr, "truncate %s %llu\n", path,
  1797. (unsigned long long) size);
  1798. return fs->op.truncate(path, size);
  1799. } else {
  1800. return -ENOSYS;
  1801. }
  1802. }
  1803. int fuse_fs_ftruncate(struct fuse_fs *fs, const char *path, off_t size,
  1804. struct fuse_file_info *fi)
  1805. {
  1806. fuse_get_context()->private_data = fs->user_data;
  1807. if (fs->op.ftruncate) {
  1808. if (fs->debug)
  1809. fprintf(stderr, "ftruncate[%llu] %llu\n",
  1810. (unsigned long long) fi->fh,
  1811. (unsigned long long) size);
  1812. return fs->op.ftruncate(path, size, fi);
  1813. } else if (path && fs->op.truncate) {
  1814. if (fs->debug)
  1815. fprintf(stderr, "truncate %s %llu\n", path,
  1816. (unsigned long long) size);
  1817. return fs->op.truncate(path, size);
  1818. } else {
  1819. return -ENOSYS;
  1820. }
  1821. }
  1822. int fuse_fs_utimens(struct fuse_fs *fs, const char *path,
  1823. const struct timespec tv[2])
  1824. {
  1825. fuse_get_context()->private_data = fs->user_data;
  1826. if (fs->op.utimens) {
  1827. if (fs->debug)
  1828. fprintf(stderr, "utimens %s %li.%09lu %li.%09lu\n",
  1829. path, tv[0].tv_sec, tv[0].tv_nsec,
  1830. tv[1].tv_sec, tv[1].tv_nsec);
  1831. return fs->op.utimens(path, tv);
  1832. } else if(fs->op.utime) {
  1833. struct utimbuf buf;
  1834. if (fs->debug)
  1835. fprintf(stderr, "utime %s %li %li\n", path,
  1836. tv[0].tv_sec, tv[1].tv_sec);
  1837. buf.actime = tv[0].tv_sec;
  1838. buf.modtime = tv[1].tv_sec;
  1839. return fs->op.utime(path, &buf);
  1840. } else {
  1841. return -ENOSYS;
  1842. }
  1843. }
  1844. int fuse_fs_access(struct fuse_fs *fs, const char *path, int mask)
  1845. {
  1846. fuse_get_context()->private_data = fs->user_data;
  1847. if (fs->op.access) {
  1848. if (fs->debug)
  1849. fprintf(stderr, "access %s 0%o\n", path, mask);
  1850. return fs->op.access(path, mask);
  1851. } else {
  1852. return -ENOSYS;
  1853. }
  1854. }
  1855. int fuse_fs_readlink(struct fuse_fs *fs, const char *path, char *buf,
  1856. size_t len)
  1857. {
  1858. fuse_get_context()->private_data = fs->user_data;
  1859. if (fs->op.readlink) {
  1860. if (fs->debug)
  1861. fprintf(stderr, "readlink %s %lu\n", path,
  1862. (unsigned long) len);
  1863. return fs->op.readlink(path, buf, len);
  1864. } else {
  1865. return -ENOSYS;
  1866. }
  1867. }
  1868. int fuse_fs_mknod(struct fuse_fs *fs, const char *path, mode_t mode,
  1869. dev_t rdev)
  1870. {
  1871. fuse_get_context()->private_data = fs->user_data;
  1872. if (fs->op.mknod) {
  1873. if (fs->debug)
  1874. fprintf(stderr, "mknod %s 0%o 0x%llx umask=0%03o\n",
  1875. path, mode, (unsigned long long) rdev,
  1876. fuse_get_context()->umask);
  1877. return fs->op.mknod(path, mode, rdev);
  1878. } else {
  1879. return -ENOSYS;
  1880. }
  1881. }
  1882. int fuse_fs_mkdir(struct fuse_fs *fs, const char *path, mode_t mode)
  1883. {
  1884. fuse_get_context()->private_data = fs->user_data;
  1885. if (fs->op.mkdir) {
  1886. if (fs->debug)
  1887. fprintf(stderr, "mkdir %s 0%o umask=0%03o\n",
  1888. path, mode, fuse_get_context()->umask);
  1889. return fs->op.mkdir(path, mode);
  1890. } else {
  1891. return -ENOSYS;
  1892. }
  1893. }
  1894. int fuse_fs_setxattr(struct fuse_fs *fs, const char *path, const char *name,
  1895. const char *value, size_t size, int flags)
  1896. {
  1897. fuse_get_context()->private_data = fs->user_data;
  1898. if (fs->op.setxattr) {
  1899. if (fs->debug)
  1900. fprintf(stderr, "setxattr %s %s %lu 0x%x\n",
  1901. path, name, (unsigned long) size, flags);
  1902. return fs->op.setxattr(path, name, value, size, flags);
  1903. } else {
  1904. return -ENOSYS;
  1905. }
  1906. }
  1907. int fuse_fs_getxattr(struct fuse_fs *fs, const char *path, const char *name,
  1908. char *value, size_t size)
  1909. {
  1910. fuse_get_context()->private_data = fs->user_data;
  1911. if (fs->op.getxattr) {
  1912. if (fs->debug)
  1913. fprintf(stderr, "getxattr %s %s %lu\n",
  1914. path, name, (unsigned long) size);
  1915. return fs->op.getxattr(path, name, value, size);
  1916. } else {
  1917. return -ENOSYS;
  1918. }
  1919. }
  1920. int fuse_fs_listxattr(struct fuse_fs *fs, const char *path, char *list,
  1921. size_t size)
  1922. {
  1923. fuse_get_context()->private_data = fs->user_data;
  1924. if (fs->op.listxattr) {
  1925. if (fs->debug)
  1926. fprintf(stderr, "listxattr %s %lu\n",
  1927. path, (unsigned long) size);
  1928. return fs->op.listxattr(path, list, size);
  1929. } else {
  1930. return -ENOSYS;
  1931. }
  1932. }
  1933. int fuse_fs_bmap(struct fuse_fs *fs, const char *path, size_t blocksize,
  1934. uint64_t *idx)
  1935. {
  1936. fuse_get_context()->private_data = fs->user_data;
  1937. if (fs->op.bmap) {
  1938. if (fs->debug)
  1939. fprintf(stderr, "bmap %s blocksize: %lu index: %llu\n",
  1940. path, (unsigned long) blocksize,
  1941. (unsigned long long) *idx);
  1942. return fs->op.bmap(path, blocksize, idx);
  1943. } else {
  1944. return -ENOSYS;
  1945. }
  1946. }
  1947. int fuse_fs_removexattr(struct fuse_fs *fs, const char *path, const char *name)
  1948. {
  1949. fuse_get_context()->private_data = fs->user_data;
  1950. if (fs->op.removexattr) {
  1951. if (fs->debug)
  1952. fprintf(stderr, "removexattr %s %s\n", path, name);
  1953. return fs->op.removexattr(path, name);
  1954. } else {
  1955. return -ENOSYS;
  1956. }
  1957. }
  1958. int fuse_fs_ioctl(struct fuse_fs *fs, const char *path, int cmd, void *arg,
  1959. struct fuse_file_info *fi, unsigned int flags, void *data)
  1960. {
  1961. fuse_get_context()->private_data = fs->user_data;
  1962. if (fs->op.ioctl) {
  1963. if (fs->debug)
  1964. fprintf(stderr, "ioctl[%llu] 0x%x flags: 0x%x\n",
  1965. (unsigned long long) fi->fh, cmd, flags);
  1966. return fs->op.ioctl(path, cmd, arg, fi, flags, data);
  1967. } else
  1968. return -ENOSYS;
  1969. }
  1970. int fuse_fs_poll(struct fuse_fs *fs, const char *path,
  1971. struct fuse_file_info *fi, struct fuse_pollhandle *ph,
  1972. unsigned *reventsp)
  1973. {
  1974. fuse_get_context()->private_data = fs->user_data;
  1975. if (fs->op.poll) {
  1976. int res;
  1977. if (fs->debug)
  1978. fprintf(stderr, "poll[%llu] ph: %p\n",
  1979. (unsigned long long) fi->fh, ph);
  1980. res = fs->op.poll(path, fi, ph, reventsp);
  1981. if (fs->debug && !res)
  1982. fprintf(stderr, " poll[%llu] revents: 0x%x\n",
  1983. (unsigned long long) fi->fh, *reventsp);
  1984. return res;
  1985. } else
  1986. return -ENOSYS;
  1987. }
  1988. int fuse_fs_fallocate(struct fuse_fs *fs, const char *path, int mode,
  1989. off_t offset, off_t length, struct fuse_file_info *fi)
  1990. {
  1991. fuse_get_context()->private_data = fs->user_data;
  1992. if (fs->op.fallocate) {
  1993. if (fs->debug)
  1994. fprintf(stderr, "fallocate %s mode %x, offset: %llu, length: %llu\n",
  1995. path,
  1996. mode,
  1997. (unsigned long long) offset,
  1998. (unsigned long long) length);
  1999. return fs->op.fallocate(path, mode, offset, length, fi);
  2000. } else
  2001. return -ENOSYS;
  2002. }
  2003. static int is_open(struct fuse *f, fuse_ino_t dir, const char *name)
  2004. {
  2005. struct node *node;
  2006. int isopen = 0;
  2007. pthread_mutex_lock(&f->lock);
  2008. node = lookup_node(f, dir, name);
  2009. if (node && node->open_count > 0)
  2010. isopen = 1;
  2011. pthread_mutex_unlock(&f->lock);
  2012. return isopen;
  2013. }
  2014. static char *hidden_name(struct fuse *f, fuse_ino_t dir, const char *oldname,
  2015. char *newname, size_t bufsize)
  2016. {
  2017. struct stat buf;
  2018. struct node *node;
  2019. struct node *newnode;
  2020. char *newpath;
  2021. int res;
  2022. int failctr = 10;
  2023. do {
  2024. pthread_mutex_lock(&f->lock);
  2025. node = lookup_node(f, dir, oldname);
  2026. if (node == NULL) {
  2027. pthread_mutex_unlock(&f->lock);
  2028. return NULL;
  2029. }
  2030. do {
  2031. f->hidectr ++;
  2032. snprintf(newname, bufsize, ".fuse_hidden%08x%08x",
  2033. (unsigned int) node->nodeid, f->hidectr);
  2034. newnode = lookup_node(f, dir, newname);
  2035. } while(newnode);
  2036. res = try_get_path(f, dir, newname, &newpath, NULL, false);
  2037. pthread_mutex_unlock(&f->lock);
  2038. if (res)
  2039. break;
  2040. memset(&buf, 0, sizeof(buf));
  2041. res = fuse_fs_getattr(f->fs, newpath, &buf);
  2042. if (res == -ENOENT)
  2043. break;
  2044. free(newpath);
  2045. newpath = NULL;
  2046. } while(res == 0 && --failctr);
  2047. return newpath;
  2048. }
  2049. static int hide_node(struct fuse *f, const char *oldpath,
  2050. fuse_ino_t dir, const char *oldname)
  2051. {
  2052. char newname[64];
  2053. char *newpath;
  2054. int err = -EBUSY;
  2055. newpath = hidden_name(f, dir, oldname, newname, sizeof(newname));
  2056. if (newpath) {
  2057. err = fuse_fs_rename(f->fs, oldpath, newpath);
  2058. if (!err)
  2059. err = rename_node(f, dir, oldname, dir, newname, 1);
  2060. free(newpath);
  2061. }
  2062. return err;
  2063. }
  2064. static int mtime_eq(const struct stat *stbuf, const struct timespec *ts)
  2065. {
  2066. return stbuf->st_mtime == ts->tv_sec &&
  2067. ST_MTIM_NSEC(stbuf) == ts->tv_nsec;
  2068. }
  2069. #ifndef CLOCK_MONOTONIC
  2070. #define CLOCK_MONOTONIC CLOCK_REALTIME
  2071. #endif
  2072. static void curr_time(struct timespec *now)
  2073. {
  2074. static clockid_t clockid = CLOCK_MONOTONIC;
  2075. int res = clock_gettime(clockid, now);
  2076. if (res == -1 && errno == EINVAL) {
  2077. clockid = CLOCK_REALTIME;
  2078. res = clock_gettime(clockid, now);
  2079. }
  2080. if (res == -1) {
  2081. perror("fuse: clock_gettime");
  2082. abort();
  2083. }
  2084. }
  2085. static void update_stat(struct node *node, const struct stat *stbuf)
  2086. {
  2087. if (node->cache_valid && (!mtime_eq(stbuf, &node->mtime) ||
  2088. stbuf->st_size != node->size))
  2089. node->cache_valid = 0;
  2090. node->mtime.tv_sec = stbuf->st_mtime;
  2091. node->mtime.tv_nsec = ST_MTIM_NSEC(stbuf);
  2092. node->size = stbuf->st_size;
  2093. curr_time(&node->stat_updated);
  2094. }
  2095. static int lookup_path(struct fuse *f, fuse_ino_t nodeid,
  2096. const char *name, const char *path,
  2097. struct fuse_entry_param *e, struct fuse_file_info *fi)
  2098. {
  2099. int res;
  2100. memset(e, 0, sizeof(struct fuse_entry_param));
  2101. if (fi)
  2102. res = fuse_fs_fgetattr(f->fs, path, &e->attr, fi);
  2103. else
  2104. res = fuse_fs_getattr(f->fs, path, &e->attr);
  2105. if (res == 0) {
  2106. struct node *node;
  2107. node = find_node(f, nodeid, name);
  2108. if (node == NULL)
  2109. res = -ENOMEM;
  2110. else {
  2111. e->ino = node->nodeid;
  2112. e->generation = node->generation;
  2113. e->entry_timeout = f->conf.entry_timeout;
  2114. e->attr_timeout = f->conf.attr_timeout;
  2115. if (f->conf.auto_cache) {
  2116. pthread_mutex_lock(&f->lock);
  2117. update_stat(node, &e->attr);
  2118. pthread_mutex_unlock(&f->lock);
  2119. }
  2120. set_stat(f, e->ino, &e->attr);
  2121. if (f->conf.debug)
  2122. fprintf(stderr, " NODEID: %lu\n",
  2123. (unsigned long) e->ino);
  2124. }
  2125. }
  2126. return res;
  2127. }
  2128. static struct fuse_context_i *fuse_get_context_internal(void)
  2129. {
  2130. struct fuse_context_i *c;
  2131. c = (struct fuse_context_i *) pthread_getspecific(fuse_context_key);
  2132. if (c == NULL) {
  2133. c = (struct fuse_context_i *)
  2134. calloc(1, sizeof(struct fuse_context_i));
  2135. if (c == NULL) {
  2136. /* This is hard to deal with properly, so just
  2137. abort. If memory is so low that the
  2138. context cannot be allocated, there's not
  2139. much hope for the filesystem anyway */
  2140. fprintf(stderr, "fuse: failed to allocate thread specific data\n");
  2141. abort();
  2142. }
  2143. pthread_setspecific(fuse_context_key, c);
  2144. }
  2145. return c;
  2146. }
  2147. static void fuse_freecontext(void *data)
  2148. {
  2149. free(data);
  2150. }
  2151. static int fuse_create_context_key(void)
  2152. {
  2153. int err = 0;
  2154. pthread_mutex_lock(&fuse_context_lock);
  2155. if (!fuse_context_ref) {
  2156. err = pthread_key_create(&fuse_context_key, fuse_freecontext);
  2157. if (err) {
  2158. fprintf(stderr, "fuse: failed to create thread specific key: %s\n",
  2159. strerror(err));
  2160. pthread_mutex_unlock(&fuse_context_lock);
  2161. return -1;
  2162. }
  2163. }
  2164. fuse_context_ref++;
  2165. pthread_mutex_unlock(&fuse_context_lock);
  2166. return 0;
  2167. }
  2168. static void fuse_delete_context_key(void)
  2169. {
  2170. pthread_mutex_lock(&fuse_context_lock);
  2171. fuse_context_ref--;
  2172. if (!fuse_context_ref) {
  2173. free(pthread_getspecific(fuse_context_key));
  2174. pthread_key_delete(fuse_context_key);
  2175. }
  2176. pthread_mutex_unlock(&fuse_context_lock);
  2177. }
  2178. static struct fuse *req_fuse_prepare(fuse_req_t req)
  2179. {
  2180. struct fuse_context_i *c = fuse_get_context_internal();
  2181. const struct fuse_ctx *ctx = fuse_req_ctx(req);
  2182. c->req = req;
  2183. c->ctx.fuse = req_fuse(req);
  2184. c->ctx.uid = ctx->uid;
  2185. c->ctx.gid = ctx->gid;
  2186. c->ctx.pid = ctx->pid;
  2187. c->ctx.umask = ctx->umask;
  2188. return c->ctx.fuse;
  2189. }
  2190. static inline void reply_err(fuse_req_t req, int err)
  2191. {
  2192. /* fuse_reply_err() uses non-negated errno values */
  2193. fuse_reply_err(req, -err);
  2194. }
  2195. static void reply_entry(fuse_req_t req, const struct fuse_entry_param *e,
  2196. int err)
  2197. {
  2198. if (!err) {
  2199. struct fuse *f = req_fuse(req);
  2200. if (fuse_reply_entry(req, e) == -ENOENT) {
  2201. /* Skip forget for negative result */
  2202. if (e->ino != 0)
  2203. forget_node(f, e->ino, 1);
  2204. }
  2205. } else
  2206. reply_err(req, err);
  2207. }
  2208. void fuse_fs_init(struct fuse_fs *fs, struct fuse_conn_info *conn)
  2209. {
  2210. fuse_get_context()->private_data = fs->user_data;
  2211. if (!fs->op.write_buf)
  2212. conn->want &= ~FUSE_CAP_SPLICE_READ;
  2213. if (!fs->op.lock)
  2214. conn->want &= ~FUSE_CAP_POSIX_LOCKS;
  2215. if (!fs->op.flock)
  2216. conn->want &= ~FUSE_CAP_FLOCK_LOCKS;
  2217. if (fs->op.init)
  2218. fs->user_data = fs->op.init(conn);
  2219. }
  2220. static void fuse_lib_init(void *data, struct fuse_conn_info *conn)
  2221. {
  2222. struct fuse *f = (struct fuse *) data;
  2223. struct fuse_context_i *c = fuse_get_context_internal();
  2224. memset(c, 0, sizeof(*c));
  2225. c->ctx.fuse = f;
  2226. conn->want |= FUSE_CAP_EXPORT_SUPPORT;
  2227. fuse_fs_init(f->fs, conn);
  2228. }
  2229. void fuse_fs_destroy(struct fuse_fs *fs)
  2230. {
  2231. fuse_get_context()->private_data = fs->user_data;
  2232. if (fs->op.destroy)
  2233. fs->op.destroy(fs->user_data);
  2234. if (fs->m)
  2235. fuse_put_module(fs->m);
  2236. free(fs);
  2237. }
  2238. static void fuse_lib_destroy(void *data)
  2239. {
  2240. struct fuse *f = (struct fuse *) data;
  2241. struct fuse_context_i *c = fuse_get_context_internal();
  2242. memset(c, 0, sizeof(*c));
  2243. c->ctx.fuse = f;
  2244. fuse_fs_destroy(f->fs);
  2245. f->fs = NULL;
  2246. }
  2247. static void fuse_lib_lookup(fuse_req_t req, fuse_ino_t parent,
  2248. const char *name)
  2249. {
  2250. struct fuse *f = req_fuse_prepare(req);
  2251. struct fuse_entry_param e;
  2252. char *path;
  2253. int err;
  2254. struct node *dot = NULL;
  2255. if (name[0] == '.') {
  2256. int len = strlen(name);
  2257. if (len == 1 || (name[1] == '.' && len == 2)) {
  2258. pthread_mutex_lock(&f->lock);
  2259. if (len == 1) {
  2260. if (f->conf.debug)
  2261. fprintf(stderr, "LOOKUP-DOT\n");
  2262. dot = get_node_nocheck(f, parent);
  2263. if (dot == NULL) {
  2264. pthread_mutex_unlock(&f->lock);
  2265. reply_entry(req, &e, -ESTALE);
  2266. return;
  2267. }
  2268. dot->refctr++;
  2269. } else {
  2270. if (f->conf.debug)
  2271. fprintf(stderr, "LOOKUP-DOTDOT\n");
  2272. parent = get_node(f, parent)->parent->nodeid;
  2273. }
  2274. pthread_mutex_unlock(&f->lock);
  2275. name = NULL;
  2276. }
  2277. }
  2278. err = get_path_name(f, parent, name, &path);
  2279. if (!err) {
  2280. struct fuse_intr_data d;
  2281. if (f->conf.debug)
  2282. fprintf(stderr, "LOOKUP %s\n", path);
  2283. fuse_prepare_interrupt(f, req, &d);
  2284. err = lookup_path(f, parent, name, path, &e, NULL);
  2285. if (err == -ENOENT && f->conf.negative_timeout != 0.0) {
  2286. e.ino = 0;
  2287. e.entry_timeout = f->conf.negative_timeout;
  2288. err = 0;
  2289. }
  2290. fuse_finish_interrupt(f, req, &d);
  2291. free_path(f, parent, path);
  2292. }
  2293. if (dot) {
  2294. pthread_mutex_lock(&f->lock);
  2295. unref_node(f, dot);
  2296. pthread_mutex_unlock(&f->lock);
  2297. }
  2298. reply_entry(req, &e, err);
  2299. }
  2300. static void do_forget(struct fuse *f, fuse_ino_t ino, uint64_t nlookup)
  2301. {
  2302. if (f->conf.debug)
  2303. fprintf(stderr, "FORGET %llu/%llu\n", (unsigned long long)ino,
  2304. (unsigned long long) nlookup);
  2305. forget_node(f, ino, nlookup);
  2306. }
  2307. static void fuse_lib_forget(fuse_req_t req, fuse_ino_t ino,
  2308. unsigned long nlookup)
  2309. {
  2310. do_forget(req_fuse(req), ino, nlookup);
  2311. fuse_reply_none(req);
  2312. }
  2313. static void fuse_lib_forget_multi(fuse_req_t req, size_t count,
  2314. struct fuse_forget_data *forgets)
  2315. {
  2316. struct fuse *f = req_fuse(req);
  2317. size_t i;
  2318. for (i = 0; i < count; i++)
  2319. do_forget(f, forgets[i].ino, forgets[i].nlookup);
  2320. fuse_reply_none(req);
  2321. }
  2322. static void fuse_lib_getattr(fuse_req_t req, fuse_ino_t ino,
  2323. struct fuse_file_info *fi)
  2324. {
  2325. struct fuse *f = req_fuse_prepare(req);
  2326. struct stat buf;
  2327. char *path;
  2328. int err;
  2329. memset(&buf, 0, sizeof(buf));
  2330. if (fi != NULL && f->fs->op.fgetattr)
  2331. err = get_path_nullok(f, ino, &path);
  2332. else
  2333. err = get_path(f, ino, &path);
  2334. if (!err) {
  2335. struct fuse_intr_data d;
  2336. fuse_prepare_interrupt(f, req, &d);
  2337. if (fi)
  2338. err = fuse_fs_fgetattr(f->fs, path, &buf, fi);
  2339. else
  2340. err = fuse_fs_getattr(f->fs, path, &buf);
  2341. fuse_finish_interrupt(f, req, &d);
  2342. free_path(f, ino, path);
  2343. }
  2344. if (!err) {
  2345. struct node *node;
  2346. pthread_mutex_lock(&f->lock);
  2347. node = get_node(f, ino);
  2348. if (node->is_hidden && buf.st_nlink > 0)
  2349. buf.st_nlink--;
  2350. if (f->conf.auto_cache)
  2351. update_stat(node, &buf);
  2352. pthread_mutex_unlock(&f->lock);
  2353. set_stat(f, ino, &buf);
  2354. fuse_reply_attr(req, &buf, f->conf.attr_timeout);
  2355. } else
  2356. reply_err(req, err);
  2357. }
  2358. int fuse_fs_chmod(struct fuse_fs *fs, const char *path, mode_t mode)
  2359. {
  2360. fuse_get_context()->private_data = fs->user_data;
  2361. if (fs->op.chmod)
  2362. return fs->op.chmod(path, mode);
  2363. else
  2364. return -ENOSYS;
  2365. }
  2366. static void fuse_lib_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr,
  2367. int valid, struct fuse_file_info *fi)
  2368. {
  2369. struct fuse *f = req_fuse_prepare(req);
  2370. struct stat buf;
  2371. char *path;
  2372. int err;
  2373. memset(&buf, 0, sizeof(buf));
  2374. if (valid == FUSE_SET_ATTR_SIZE && fi != NULL &&
  2375. f->fs->op.ftruncate && f->fs->op.fgetattr)
  2376. err = get_path_nullok(f, ino, &path);
  2377. else
  2378. err = get_path(f, ino, &path);
  2379. if (!err) {
  2380. struct fuse_intr_data d;
  2381. fuse_prepare_interrupt(f, req, &d);
  2382. err = 0;
  2383. if (!err && (valid & FUSE_SET_ATTR_MODE))
  2384. err = fuse_fs_chmod(f->fs, path, attr->st_mode);
  2385. if (!err && (valid & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID))) {
  2386. uid_t uid = (valid & FUSE_SET_ATTR_UID) ?
  2387. attr->st_uid : (uid_t) -1;
  2388. gid_t gid = (valid & FUSE_SET_ATTR_GID) ?
  2389. attr->st_gid : (gid_t) -1;
  2390. err = fuse_fs_chown(f->fs, path, uid, gid);
  2391. }
  2392. if (!err && (valid & FUSE_SET_ATTR_SIZE)) {
  2393. if (fi)
  2394. err = fuse_fs_ftruncate(f->fs, path,
  2395. attr->st_size, fi);
  2396. else
  2397. err = fuse_fs_truncate(f->fs, path,
  2398. attr->st_size);
  2399. }
  2400. #ifdef HAVE_UTIMENSAT
  2401. if (!err && f->utime_omit_ok &&
  2402. (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME))) {
  2403. struct timespec tv[2];
  2404. tv[0].tv_sec = 0;
  2405. tv[1].tv_sec = 0;
  2406. tv[0].tv_nsec = UTIME_OMIT;
  2407. tv[1].tv_nsec = UTIME_OMIT;
  2408. if (valid & FUSE_SET_ATTR_ATIME_NOW)
  2409. tv[0].tv_nsec = UTIME_NOW;
  2410. else if (valid & FUSE_SET_ATTR_ATIME)
  2411. tv[0] = attr->st_atim;
  2412. if (valid & FUSE_SET_ATTR_MTIME_NOW)
  2413. tv[1].tv_nsec = UTIME_NOW;
  2414. else if (valid & FUSE_SET_ATTR_MTIME)
  2415. tv[1] = attr->st_mtim;
  2416. err = fuse_fs_utimens(f->fs, path, tv);
  2417. } else
  2418. #endif
  2419. if (!err &&
  2420. (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) ==
  2421. (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) {
  2422. struct timespec tv[2];
  2423. tv[0].tv_sec = attr->st_atime;
  2424. tv[0].tv_nsec = ST_ATIM_NSEC(attr);
  2425. tv[1].tv_sec = attr->st_mtime;
  2426. tv[1].tv_nsec = ST_MTIM_NSEC(attr);
  2427. err = fuse_fs_utimens(f->fs, path, tv);
  2428. }
  2429. if (!err) {
  2430. if (fi)
  2431. err = fuse_fs_fgetattr(f->fs, path, &buf, fi);
  2432. else
  2433. err = fuse_fs_getattr(f->fs, path, &buf);
  2434. }
  2435. fuse_finish_interrupt(f, req, &d);
  2436. free_path(f, ino, path);
  2437. }
  2438. if (!err) {
  2439. if (f->conf.auto_cache) {
  2440. pthread_mutex_lock(&f->lock);
  2441. update_stat(get_node(f, ino), &buf);
  2442. pthread_mutex_unlock(&f->lock);
  2443. }
  2444. set_stat(f, ino, &buf);
  2445. fuse_reply_attr(req, &buf, f->conf.attr_timeout);
  2446. } else
  2447. reply_err(req, err);
  2448. }
  2449. static void fuse_lib_access(fuse_req_t req, fuse_ino_t ino, int mask)
  2450. {
  2451. struct fuse *f = req_fuse_prepare(req);
  2452. char *path;
  2453. int err;
  2454. err = get_path(f, ino, &path);
  2455. if (!err) {
  2456. struct fuse_intr_data d;
  2457. fuse_prepare_interrupt(f, req, &d);
  2458. err = fuse_fs_access(f->fs, path, mask);
  2459. fuse_finish_interrupt(f, req, &d);
  2460. free_path(f, ino, path);
  2461. }
  2462. reply_err(req, err);
  2463. }
  2464. static void fuse_lib_readlink(fuse_req_t req, fuse_ino_t ino)
  2465. {
  2466. struct fuse *f = req_fuse_prepare(req);
  2467. char linkname[PATH_MAX + 1];
  2468. char *path;
  2469. int err;
  2470. err = get_path(f, ino, &path);
  2471. if (!err) {
  2472. struct fuse_intr_data d;
  2473. fuse_prepare_interrupt(f, req, &d);
  2474. err = fuse_fs_readlink(f->fs, path, linkname, sizeof(linkname));
  2475. fuse_finish_interrupt(f, req, &d);
  2476. free_path(f, ino, path);
  2477. }
  2478. if (!err) {
  2479. linkname[PATH_MAX] = '\0';
  2480. fuse_reply_readlink(req, linkname);
  2481. } else
  2482. reply_err(req, err);
  2483. }
  2484. static void fuse_lib_mknod(fuse_req_t req, fuse_ino_t parent, const char *name,
  2485. mode_t mode, dev_t rdev)
  2486. {
  2487. struct fuse *f = req_fuse_prepare(req);
  2488. struct fuse_entry_param e;
  2489. char *path;
  2490. int err;
  2491. err = get_path_name(f, parent, name, &path);
  2492. if (!err) {
  2493. struct fuse_intr_data d;
  2494. fuse_prepare_interrupt(f, req, &d);
  2495. err = -ENOSYS;
  2496. if (S_ISREG(mode)) {
  2497. struct fuse_file_info fi;
  2498. memset(&fi, 0, sizeof(fi));
  2499. fi.flags = O_CREAT | O_EXCL | O_WRONLY;
  2500. err = fuse_fs_create(f->fs, path, mode, &fi);
  2501. if (!err) {
  2502. err = lookup_path(f, parent, name, path, &e,
  2503. &fi);
  2504. fuse_fs_release(f->fs, path, &fi);
  2505. }
  2506. }
  2507. if (err == -ENOSYS) {
  2508. err = fuse_fs_mknod(f->fs, path, mode, rdev);
  2509. if (!err)
  2510. err = lookup_path(f, parent, name, path, &e,
  2511. NULL);
  2512. }
  2513. fuse_finish_interrupt(f, req, &d);
  2514. free_path(f, parent, path);
  2515. }
  2516. reply_entry(req, &e, err);
  2517. }
  2518. static void fuse_lib_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name,
  2519. mode_t mode)
  2520. {
  2521. struct fuse *f = req_fuse_prepare(req);
  2522. struct fuse_entry_param e;
  2523. char *path;
  2524. int err;
  2525. err = get_path_name(f, parent, name, &path);
  2526. if (!err) {
  2527. struct fuse_intr_data d;
  2528. fuse_prepare_interrupt(f, req, &d);
  2529. err = fuse_fs_mkdir(f->fs, path, mode);
  2530. if (!err)
  2531. err = lookup_path(f, parent, name, path, &e, NULL);
  2532. fuse_finish_interrupt(f, req, &d);
  2533. free_path(f, parent, path);
  2534. }
  2535. reply_entry(req, &e, err);
  2536. }
  2537. static void fuse_lib_unlink(fuse_req_t req, fuse_ino_t parent,
  2538. const char *name)
  2539. {
  2540. struct fuse *f = req_fuse_prepare(req);
  2541. struct node *wnode;
  2542. char *path;
  2543. int err;
  2544. err = get_path_wrlock(f, parent, name, &path, &wnode);
  2545. if (!err) {
  2546. struct fuse_intr_data d;
  2547. fuse_prepare_interrupt(f, req, &d);
  2548. if (!f->conf.hard_remove && is_open(f, parent, name)) {
  2549. err = hide_node(f, path, parent, name);
  2550. } else {
  2551. err = fuse_fs_unlink(f->fs, path);
  2552. if (!err)
  2553. remove_node(f, parent, name);
  2554. }
  2555. fuse_finish_interrupt(f, req, &d);
  2556. free_path_wrlock(f, parent, wnode, path);
  2557. }
  2558. reply_err(req, err);
  2559. }
  2560. static void fuse_lib_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name)
  2561. {
  2562. struct fuse *f = req_fuse_prepare(req);
  2563. struct node *wnode;
  2564. char *path;
  2565. int err;
  2566. err = get_path_wrlock(f, parent, name, &path, &wnode);
  2567. if (!err) {
  2568. struct fuse_intr_data d;
  2569. fuse_prepare_interrupt(f, req, &d);
  2570. err = fuse_fs_rmdir(f->fs, path);
  2571. fuse_finish_interrupt(f, req, &d);
  2572. if (!err)
  2573. remove_node(f, parent, name);
  2574. free_path_wrlock(f, parent, wnode, path);
  2575. }
  2576. reply_err(req, err);
  2577. }
  2578. static void fuse_lib_symlink(fuse_req_t req, const char *linkname,
  2579. fuse_ino_t parent, const char *name)
  2580. {
  2581. struct fuse *f = req_fuse_prepare(req);
  2582. struct fuse_entry_param e;
  2583. char *path;
  2584. int err;
  2585. err = get_path_name(f, parent, name, &path);
  2586. if (!err) {
  2587. struct fuse_intr_data d;
  2588. fuse_prepare_interrupt(f, req, &d);
  2589. err = fuse_fs_symlink(f->fs, linkname, path);
  2590. if (!err)
  2591. err = lookup_path(f, parent, name, path, &e, NULL);
  2592. fuse_finish_interrupt(f, req, &d);
  2593. free_path(f, parent, path);
  2594. }
  2595. reply_entry(req, &e, err);
  2596. }
  2597. static void fuse_lib_rename(fuse_req_t req, fuse_ino_t olddir,
  2598. const char *oldname, fuse_ino_t newdir,
  2599. const char *newname)
  2600. {
  2601. struct fuse *f = req_fuse_prepare(req);
  2602. char *oldpath;
  2603. char *newpath;
  2604. struct node *wnode1;
  2605. struct node *wnode2;
  2606. int err;
  2607. err = get_path2(f, olddir, oldname, newdir, newname,
  2608. &oldpath, &newpath, &wnode1, &wnode2);
  2609. if (!err) {
  2610. struct fuse_intr_data d;
  2611. err = 0;
  2612. fuse_prepare_interrupt(f, req, &d);
  2613. if (!f->conf.hard_remove && is_open(f, newdir, newname))
  2614. err = hide_node(f, newpath, newdir, newname);
  2615. if (!err) {
  2616. err = fuse_fs_rename(f->fs, oldpath, newpath);
  2617. if (!err)
  2618. err = rename_node(f, olddir, oldname, newdir,
  2619. newname, 0);
  2620. }
  2621. fuse_finish_interrupt(f, req, &d);
  2622. free_path2(f, olddir, newdir, wnode1, wnode2, oldpath, newpath);
  2623. }
  2624. reply_err(req, err);
  2625. }
  2626. static void fuse_lib_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent,
  2627. const char *newname)
  2628. {
  2629. struct fuse *f = req_fuse_prepare(req);
  2630. struct fuse_entry_param e;
  2631. char *oldpath;
  2632. char *newpath;
  2633. int err;
  2634. err = get_path2(f, ino, NULL, newparent, newname,
  2635. &oldpath, &newpath, NULL, NULL);
  2636. if (!err) {
  2637. struct fuse_intr_data d;
  2638. fuse_prepare_interrupt(f, req, &d);
  2639. err = fuse_fs_link(f->fs, oldpath, newpath);
  2640. if (!err)
  2641. err = lookup_path(f, newparent, newname, newpath,
  2642. &e, NULL);
  2643. fuse_finish_interrupt(f, req, &d);
  2644. free_path2(f, ino, newparent, NULL, NULL, oldpath, newpath);
  2645. }
  2646. reply_entry(req, &e, err);
  2647. }
  2648. static void fuse_do_release(struct fuse *f, fuse_ino_t ino, const char *path,
  2649. struct fuse_file_info *fi)
  2650. {
  2651. struct node *node;
  2652. int unlink_hidden = 0;
  2653. const char *compatpath;
  2654. if (path != NULL || f->nullpath_ok || f->conf.nopath)
  2655. compatpath = path;
  2656. else
  2657. compatpath = "-";
  2658. fuse_fs_release(f->fs, compatpath, fi);
  2659. pthread_mutex_lock(&f->lock);
  2660. node = get_node(f, ino);
  2661. assert(node->open_count > 0);
  2662. --node->open_count;
  2663. if (node->is_hidden && !node->open_count) {
  2664. unlink_hidden = 1;
  2665. node->is_hidden = 0;
  2666. }
  2667. pthread_mutex_unlock(&f->lock);
  2668. if(unlink_hidden) {
  2669. if (path) {
  2670. fuse_fs_unlink(f->fs, path);
  2671. } else if (f->conf.nopath) {
  2672. char *unlinkpath;
  2673. if (get_path(f, ino, &unlinkpath) == 0)
  2674. fuse_fs_unlink(f->fs, unlinkpath);
  2675. free_path(f, ino, unlinkpath);
  2676. }
  2677. }
  2678. }
  2679. static void fuse_lib_create(fuse_req_t req, fuse_ino_t parent,
  2680. const char *name, mode_t mode,
  2681. struct fuse_file_info *fi)
  2682. {
  2683. struct fuse *f = req_fuse_prepare(req);
  2684. struct fuse_intr_data d;
  2685. struct fuse_entry_param e;
  2686. char *path;
  2687. int err;
  2688. err = get_path_name(f, parent, name, &path);
  2689. if (!err) {
  2690. fuse_prepare_interrupt(f, req, &d);
  2691. err = fuse_fs_create(f->fs, path, mode, fi);
  2692. if (!err) {
  2693. err = lookup_path(f, parent, name, path, &e, fi);
  2694. if (err)
  2695. fuse_fs_release(f->fs, path, fi);
  2696. else if (!S_ISREG(e.attr.st_mode)) {
  2697. err = -EIO;
  2698. fuse_fs_release(f->fs, path, fi);
  2699. forget_node(f, e.ino, 1);
  2700. } else {
  2701. if (f->conf.direct_io)
  2702. fi->direct_io = 1;
  2703. if (f->conf.kernel_cache)
  2704. fi->keep_cache = 1;
  2705. }
  2706. }
  2707. fuse_finish_interrupt(f, req, &d);
  2708. }
  2709. if (!err) {
  2710. pthread_mutex_lock(&f->lock);
  2711. get_node(f, e.ino)->open_count++;
  2712. pthread_mutex_unlock(&f->lock);
  2713. if (fuse_reply_create(req, &e, fi) == -ENOENT) {
  2714. /* The open syscall was interrupted, so it
  2715. must be cancelled */
  2716. fuse_do_release(f, e.ino, path, fi);
  2717. forget_node(f, e.ino, 1);
  2718. }
  2719. } else {
  2720. reply_err(req, err);
  2721. }
  2722. free_path(f, parent, path);
  2723. }
  2724. static double diff_timespec(const struct timespec *t1,
  2725. const struct timespec *t2)
  2726. {
  2727. return (t1->tv_sec - t2->tv_sec) +
  2728. ((double) t1->tv_nsec - (double) t2->tv_nsec) / 1000000000.0;
  2729. }
  2730. static void open_auto_cache(struct fuse *f, fuse_ino_t ino, const char *path,
  2731. struct fuse_file_info *fi)
  2732. {
  2733. struct node *node;
  2734. pthread_mutex_lock(&f->lock);
  2735. node = get_node(f, ino);
  2736. if (node->cache_valid) {
  2737. struct timespec now;
  2738. curr_time(&now);
  2739. if (diff_timespec(&now, &node->stat_updated) >
  2740. f->conf.ac_attr_timeout) {
  2741. struct stat stbuf;
  2742. int err;
  2743. pthread_mutex_unlock(&f->lock);
  2744. err = fuse_fs_fgetattr(f->fs, path, &stbuf, fi);
  2745. pthread_mutex_lock(&f->lock);
  2746. if (!err)
  2747. update_stat(node, &stbuf);
  2748. else
  2749. node->cache_valid = 0;
  2750. }
  2751. }
  2752. if (node->cache_valid)
  2753. fi->keep_cache = 1;
  2754. node->cache_valid = 1;
  2755. pthread_mutex_unlock(&f->lock);
  2756. }
  2757. static void fuse_lib_open(fuse_req_t req, fuse_ino_t ino,
  2758. struct fuse_file_info *fi)
  2759. {
  2760. struct fuse *f = req_fuse_prepare(req);
  2761. struct fuse_intr_data d;
  2762. char *path;
  2763. int err;
  2764. err = get_path(f, ino, &path);
  2765. if (!err) {
  2766. fuse_prepare_interrupt(f, req, &d);
  2767. err = fuse_fs_open(f->fs, path, fi);
  2768. if (!err) {
  2769. if (f->conf.direct_io)
  2770. fi->direct_io = 1;
  2771. if (f->conf.kernel_cache)
  2772. fi->keep_cache = 1;
  2773. if (f->conf.auto_cache)
  2774. open_auto_cache(f, ino, path, fi);
  2775. }
  2776. fuse_finish_interrupt(f, req, &d);
  2777. }
  2778. if (!err) {
  2779. pthread_mutex_lock(&f->lock);
  2780. get_node(f, ino)->open_count++;
  2781. pthread_mutex_unlock(&f->lock);
  2782. if (fuse_reply_open(req, fi) == -ENOENT) {
  2783. /* The open syscall was interrupted, so it
  2784. must be cancelled */
  2785. fuse_do_release(f, ino, path, fi);
  2786. }
  2787. } else
  2788. reply_err(req, err);
  2789. free_path(f, ino, path);
  2790. }
  2791. static void fuse_lib_read(fuse_req_t req, fuse_ino_t ino, size_t size,
  2792. off_t off, struct fuse_file_info *fi)
  2793. {
  2794. struct fuse *f = req_fuse_prepare(req);
  2795. struct fuse_bufvec *buf = NULL;
  2796. char *path;
  2797. int res;
  2798. res = get_path_nullok(f, ino, &path);
  2799. if (res == 0) {
  2800. struct fuse_intr_data d;
  2801. fuse_prepare_interrupt(f, req, &d);
  2802. res = fuse_fs_read_buf(f->fs, path, &buf, size, off, fi);
  2803. fuse_finish_interrupt(f, req, &d);
  2804. free_path(f, ino, path);
  2805. }
  2806. if (res == 0)
  2807. fuse_reply_data(req, buf, FUSE_BUF_SPLICE_MOVE);
  2808. else
  2809. reply_err(req, res);
  2810. fuse_free_buf(buf);
  2811. }
  2812. static void fuse_lib_write_buf(fuse_req_t req, fuse_ino_t ino,
  2813. struct fuse_bufvec *buf, off_t off,
  2814. struct fuse_file_info *fi)
  2815. {
  2816. struct fuse *f = req_fuse_prepare(req);
  2817. char *path;
  2818. int res;
  2819. res = get_path_nullok(f, ino, &path);
  2820. if (res == 0) {
  2821. struct fuse_intr_data d;
  2822. fuse_prepare_interrupt(f, req, &d);
  2823. res = fuse_fs_write_buf(f->fs, path, buf, off, fi);
  2824. fuse_finish_interrupt(f, req, &d);
  2825. free_path(f, ino, path);
  2826. }
  2827. if (res >= 0)
  2828. fuse_reply_write(req, res);
  2829. else
  2830. reply_err(req, res);
  2831. }
  2832. static void fuse_lib_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
  2833. struct fuse_file_info *fi)
  2834. {
  2835. struct fuse *f = req_fuse_prepare(req);
  2836. char *path;
  2837. int err;
  2838. err = get_path_nullok(f, ino, &path);
  2839. if (!err) {
  2840. struct fuse_intr_data d;
  2841. fuse_prepare_interrupt(f, req, &d);
  2842. err = fuse_fs_fsync(f->fs, path, datasync, fi);
  2843. fuse_finish_interrupt(f, req, &d);
  2844. free_path(f, ino, path);
  2845. }
  2846. reply_err(req, err);
  2847. }
  2848. static struct fuse_dh *get_dirhandle(const struct fuse_file_info *llfi,
  2849. struct fuse_file_info *fi)
  2850. {
  2851. struct fuse_dh *dh = (struct fuse_dh *) (uintptr_t) llfi->fh;
  2852. memset(fi, 0, sizeof(struct fuse_file_info));
  2853. fi->fh = dh->fh;
  2854. fi->fh_old = dh->fh;
  2855. return dh;
  2856. }
  2857. static void fuse_lib_opendir(fuse_req_t req, fuse_ino_t ino,
  2858. struct fuse_file_info *llfi)
  2859. {
  2860. struct fuse *f = req_fuse_prepare(req);
  2861. struct fuse_intr_data d;
  2862. struct fuse_dh *dh;
  2863. struct fuse_file_info fi;
  2864. char *path;
  2865. int err;
  2866. dh = (struct fuse_dh *) malloc(sizeof(struct fuse_dh));
  2867. if (dh == NULL) {
  2868. reply_err(req, -ENOMEM);
  2869. return;
  2870. }
  2871. memset(dh, 0, sizeof(struct fuse_dh));
  2872. dh->fuse = f;
  2873. dh->contents = NULL;
  2874. dh->len = 0;
  2875. dh->filled = 0;
  2876. dh->nodeid = ino;
  2877. fuse_mutex_init(&dh->lock);
  2878. llfi->fh = (uintptr_t) dh;
  2879. memset(&fi, 0, sizeof(fi));
  2880. fi.flags = llfi->flags;
  2881. err = get_path(f, ino, &path);
  2882. if (!err) {
  2883. fuse_prepare_interrupt(f, req, &d);
  2884. err = fuse_fs_opendir(f->fs, path, &fi);
  2885. fuse_finish_interrupt(f, req, &d);
  2886. dh->fh = fi.fh;
  2887. }
  2888. if (!err) {
  2889. if (fuse_reply_open(req, llfi) == -ENOENT) {
  2890. /* The opendir syscall was interrupted, so it
  2891. must be cancelled */
  2892. fuse_fs_releasedir(f->fs, path, &fi);
  2893. pthread_mutex_destroy(&dh->lock);
  2894. free(dh);
  2895. }
  2896. } else {
  2897. reply_err(req, err);
  2898. pthread_mutex_destroy(&dh->lock);
  2899. free(dh);
  2900. }
  2901. free_path(f, ino, path);
  2902. }
  2903. static int extend_contents(struct fuse_dh *dh, unsigned minsize)
  2904. {
  2905. if (minsize > dh->size) {
  2906. char *newptr;
  2907. unsigned newsize = dh->size;
  2908. if (!newsize)
  2909. newsize = 1024;
  2910. while (newsize < minsize) {
  2911. if (newsize >= 0x80000000)
  2912. newsize = 0xffffffff;
  2913. else
  2914. newsize *= 2;
  2915. }
  2916. newptr = (char *) realloc(dh->contents, newsize);
  2917. if (!newptr) {
  2918. dh->error = -ENOMEM;
  2919. return -1;
  2920. }
  2921. dh->contents = newptr;
  2922. dh->size = newsize;
  2923. }
  2924. return 0;
  2925. }
  2926. static int fill_dir(void *dh_, const char *name, const struct stat *statp,
  2927. off_t off)
  2928. {
  2929. struct fuse_dh *dh = (struct fuse_dh *) dh_;
  2930. struct stat stbuf;
  2931. size_t newlen;
  2932. if (statp)
  2933. stbuf = *statp;
  2934. else {
  2935. memset(&stbuf, 0, sizeof(stbuf));
  2936. stbuf.st_ino = FUSE_UNKNOWN_INO;
  2937. }
  2938. if (!dh->fuse->conf.use_ino) {
  2939. stbuf.st_ino = FUSE_UNKNOWN_INO;
  2940. if (dh->fuse->conf.readdir_ino) {
  2941. struct node *node;
  2942. pthread_mutex_lock(&dh->fuse->lock);
  2943. node = lookup_node(dh->fuse, dh->nodeid, name);
  2944. if (node)
  2945. stbuf.st_ino = (ino_t) node->nodeid;
  2946. pthread_mutex_unlock(&dh->fuse->lock);
  2947. }
  2948. }
  2949. if (off) {
  2950. if (extend_contents(dh, dh->needlen) == -1)
  2951. return 1;
  2952. dh->filled = 0;
  2953. newlen = dh->len +
  2954. fuse_add_direntry(dh->req, dh->contents + dh->len,
  2955. dh->needlen - dh->len, name,
  2956. &stbuf, off);
  2957. if (newlen > dh->needlen)
  2958. return 1;
  2959. } else {
  2960. newlen = dh->len +
  2961. fuse_add_direntry(dh->req, NULL, 0, name, NULL, 0);
  2962. if (extend_contents(dh, newlen) == -1)
  2963. return 1;
  2964. fuse_add_direntry(dh->req, dh->contents + dh->len,
  2965. dh->size - dh->len, name, &stbuf, newlen);
  2966. }
  2967. dh->len = newlen;
  2968. return 0;
  2969. }
  2970. static int readdir_fill(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  2971. size_t size, off_t off, struct fuse_dh *dh,
  2972. struct fuse_file_info *fi)
  2973. {
  2974. char *path;
  2975. int err;
  2976. if (f->fs->op.readdir)
  2977. err = get_path_nullok(f, ino, &path);
  2978. else
  2979. err = get_path(f, ino, &path);
  2980. if (!err) {
  2981. struct fuse_intr_data d;
  2982. dh->len = 0;
  2983. dh->error = 0;
  2984. dh->needlen = size;
  2985. dh->filled = 1;
  2986. dh->req = req;
  2987. fuse_prepare_interrupt(f, req, &d);
  2988. err = fuse_fs_readdir(f->fs, path, dh, fill_dir, off, fi);
  2989. fuse_finish_interrupt(f, req, &d);
  2990. dh->req = NULL;
  2991. if (!err)
  2992. err = dh->error;
  2993. if (err)
  2994. dh->filled = 0;
  2995. free_path(f, ino, path);
  2996. }
  2997. return err;
  2998. }
  2999. static void fuse_lib_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
  3000. off_t off, struct fuse_file_info *llfi)
  3001. {
  3002. struct fuse *f = req_fuse_prepare(req);
  3003. struct fuse_file_info fi;
  3004. struct fuse_dh *dh = get_dirhandle(llfi, &fi);
  3005. pthread_mutex_lock(&dh->lock);
  3006. /* According to SUS, directory contents need to be refreshed on
  3007. rewinddir() */
  3008. if (!off)
  3009. dh->filled = 0;
  3010. if (!dh->filled) {
  3011. int err = readdir_fill(f, req, ino, size, off, dh, &fi);
  3012. if (err) {
  3013. reply_err(req, err);
  3014. goto out;
  3015. }
  3016. }
  3017. if (dh->filled) {
  3018. if (off < dh->len) {
  3019. if (off + size > dh->len)
  3020. size = dh->len - off;
  3021. } else
  3022. size = 0;
  3023. } else {
  3024. size = dh->len;
  3025. off = 0;
  3026. }
  3027. fuse_reply_buf(req, dh->contents + off, size);
  3028. out:
  3029. pthread_mutex_unlock(&dh->lock);
  3030. }
  3031. static void fuse_lib_releasedir(fuse_req_t req, fuse_ino_t ino,
  3032. struct fuse_file_info *llfi)
  3033. {
  3034. struct fuse *f = req_fuse_prepare(req);
  3035. struct fuse_intr_data d;
  3036. struct fuse_file_info fi;
  3037. struct fuse_dh *dh = get_dirhandle(llfi, &fi);
  3038. char *path;
  3039. const char *compatpath;
  3040. get_path_nullok(f, ino, &path);
  3041. if (path != NULL || f->nullpath_ok || f->conf.nopath)
  3042. compatpath = path;
  3043. else
  3044. compatpath = "-";
  3045. fuse_prepare_interrupt(f, req, &d);
  3046. fuse_fs_releasedir(f->fs, compatpath, &fi);
  3047. fuse_finish_interrupt(f, req, &d);
  3048. free_path(f, ino, path);
  3049. pthread_mutex_lock(&dh->lock);
  3050. pthread_mutex_unlock(&dh->lock);
  3051. pthread_mutex_destroy(&dh->lock);
  3052. free(dh->contents);
  3053. free(dh);
  3054. reply_err(req, 0);
  3055. }
  3056. static void fuse_lib_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync,
  3057. struct fuse_file_info *llfi)
  3058. {
  3059. struct fuse *f = req_fuse_prepare(req);
  3060. struct fuse_file_info fi;
  3061. char *path;
  3062. int err;
  3063. get_dirhandle(llfi, &fi);
  3064. err = get_path_nullok(f, ino, &path);
  3065. if (!err) {
  3066. struct fuse_intr_data d;
  3067. fuse_prepare_interrupt(f, req, &d);
  3068. err = fuse_fs_fsyncdir(f->fs, path, datasync, &fi);
  3069. fuse_finish_interrupt(f, req, &d);
  3070. free_path(f, ino, path);
  3071. }
  3072. reply_err(req, err);
  3073. }
  3074. static void fuse_lib_statfs(fuse_req_t req, fuse_ino_t ino)
  3075. {
  3076. struct fuse *f = req_fuse_prepare(req);
  3077. struct statvfs buf;
  3078. char *path = NULL;
  3079. int err = 0;
  3080. memset(&buf, 0, sizeof(buf));
  3081. if (ino)
  3082. err = get_path(f, ino, &path);
  3083. if (!err) {
  3084. struct fuse_intr_data d;
  3085. fuse_prepare_interrupt(f, req, &d);
  3086. err = fuse_fs_statfs(f->fs, path ? path : "/", &buf);
  3087. fuse_finish_interrupt(f, req, &d);
  3088. free_path(f, ino, path);
  3089. }
  3090. if (!err)
  3091. fuse_reply_statfs(req, &buf);
  3092. else
  3093. reply_err(req, err);
  3094. }
  3095. static void fuse_lib_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  3096. const char *value, size_t size, int flags)
  3097. {
  3098. struct fuse *f = req_fuse_prepare(req);
  3099. char *path;
  3100. int err;
  3101. err = get_path(f, ino, &path);
  3102. if (!err) {
  3103. struct fuse_intr_data d;
  3104. fuse_prepare_interrupt(f, req, &d);
  3105. err = fuse_fs_setxattr(f->fs, path, name, value, size, flags);
  3106. fuse_finish_interrupt(f, req, &d);
  3107. free_path(f, ino, path);
  3108. }
  3109. reply_err(req, err);
  3110. }
  3111. static int common_getxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3112. const char *name, char *value, size_t size)
  3113. {
  3114. int err;
  3115. char *path;
  3116. err = get_path(f, ino, &path);
  3117. if (!err) {
  3118. struct fuse_intr_data d;
  3119. fuse_prepare_interrupt(f, req, &d);
  3120. err = fuse_fs_getxattr(f->fs, path, name, value, size);
  3121. fuse_finish_interrupt(f, req, &d);
  3122. free_path(f, ino, path);
  3123. }
  3124. return err;
  3125. }
  3126. static void fuse_lib_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  3127. size_t size)
  3128. {
  3129. struct fuse *f = req_fuse_prepare(req);
  3130. int res;
  3131. if (size) {
  3132. char *value = (char *) malloc(size);
  3133. if (value == NULL) {
  3134. reply_err(req, -ENOMEM);
  3135. return;
  3136. }
  3137. res = common_getxattr(f, req, ino, name, value, size);
  3138. if (res > 0)
  3139. fuse_reply_buf(req, value, res);
  3140. else
  3141. reply_err(req, res);
  3142. free(value);
  3143. } else {
  3144. res = common_getxattr(f, req, ino, name, NULL, 0);
  3145. if (res >= 0)
  3146. fuse_reply_xattr(req, res);
  3147. else
  3148. reply_err(req, res);
  3149. }
  3150. }
  3151. static int common_listxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3152. char *list, size_t size)
  3153. {
  3154. char *path;
  3155. int err;
  3156. err = get_path(f, ino, &path);
  3157. if (!err) {
  3158. struct fuse_intr_data d;
  3159. fuse_prepare_interrupt(f, req, &d);
  3160. err = fuse_fs_listxattr(f->fs, path, list, size);
  3161. fuse_finish_interrupt(f, req, &d);
  3162. free_path(f, ino, path);
  3163. }
  3164. return err;
  3165. }
  3166. static void fuse_lib_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
  3167. {
  3168. struct fuse *f = req_fuse_prepare(req);
  3169. int res;
  3170. if (size) {
  3171. char *list = (char *) malloc(size);
  3172. if (list == NULL) {
  3173. reply_err(req, -ENOMEM);
  3174. return;
  3175. }
  3176. res = common_listxattr(f, req, ino, list, size);
  3177. if (res > 0)
  3178. fuse_reply_buf(req, list, res);
  3179. else
  3180. reply_err(req, res);
  3181. free(list);
  3182. } else {
  3183. res = common_listxattr(f, req, ino, NULL, 0);
  3184. if (res >= 0)
  3185. fuse_reply_xattr(req, res);
  3186. else
  3187. reply_err(req, res);
  3188. }
  3189. }
  3190. static void fuse_lib_removexattr(fuse_req_t req, fuse_ino_t ino,
  3191. const char *name)
  3192. {
  3193. struct fuse *f = req_fuse_prepare(req);
  3194. char *path;
  3195. int err;
  3196. err = get_path(f, ino, &path);
  3197. if (!err) {
  3198. struct fuse_intr_data d;
  3199. fuse_prepare_interrupt(f, req, &d);
  3200. err = fuse_fs_removexattr(f->fs, path, name);
  3201. fuse_finish_interrupt(f, req, &d);
  3202. free_path(f, ino, path);
  3203. }
  3204. reply_err(req, err);
  3205. }
  3206. static struct lock *locks_conflict(struct node *node, const struct lock *lock)
  3207. {
  3208. struct lock *l;
  3209. for (l = node->locks; l; l = l->next)
  3210. if (l->owner != lock->owner &&
  3211. lock->start <= l->end && l->start <= lock->end &&
  3212. (l->type == F_WRLCK || lock->type == F_WRLCK))
  3213. break;
  3214. return l;
  3215. }
  3216. static void delete_lock(struct lock **lockp)
  3217. {
  3218. struct lock *l = *lockp;
  3219. *lockp = l->next;
  3220. free(l);
  3221. }
  3222. static void insert_lock(struct lock **pos, struct lock *lock)
  3223. {
  3224. lock->next = *pos;
  3225. *pos = lock;
  3226. }
  3227. static int locks_insert(struct node *node, struct lock *lock)
  3228. {
  3229. struct lock **lp;
  3230. struct lock *newl1 = NULL;
  3231. struct lock *newl2 = NULL;
  3232. if (lock->type != F_UNLCK || lock->start != 0 ||
  3233. lock->end != OFFSET_MAX) {
  3234. newl1 = malloc(sizeof(struct lock));
  3235. newl2 = malloc(sizeof(struct lock));
  3236. if (!newl1 || !newl2) {
  3237. free(newl1);
  3238. free(newl2);
  3239. return -ENOLCK;
  3240. }
  3241. }
  3242. for (lp = &node->locks; *lp;) {
  3243. struct lock *l = *lp;
  3244. if (l->owner != lock->owner)
  3245. goto skip;
  3246. if (lock->type == l->type) {
  3247. if (l->end < lock->start - 1)
  3248. goto skip;
  3249. if (lock->end < l->start - 1)
  3250. break;
  3251. if (l->start <= lock->start && lock->end <= l->end)
  3252. goto out;
  3253. if (l->start < lock->start)
  3254. lock->start = l->start;
  3255. if (lock->end < l->end)
  3256. lock->end = l->end;
  3257. goto delete;
  3258. } else {
  3259. if (l->end < lock->start)
  3260. goto skip;
  3261. if (lock->end < l->start)
  3262. break;
  3263. if (lock->start <= l->start && l->end <= lock->end)
  3264. goto delete;
  3265. if (l->end <= lock->end) {
  3266. l->end = lock->start - 1;
  3267. goto skip;
  3268. }
  3269. if (lock->start <= l->start) {
  3270. l->start = lock->end + 1;
  3271. break;
  3272. }
  3273. *newl2 = *l;
  3274. newl2->start = lock->end + 1;
  3275. l->end = lock->start - 1;
  3276. insert_lock(&l->next, newl2);
  3277. newl2 = NULL;
  3278. }
  3279. skip:
  3280. lp = &l->next;
  3281. continue;
  3282. delete:
  3283. delete_lock(lp);
  3284. }
  3285. if (lock->type != F_UNLCK) {
  3286. *newl1 = *lock;
  3287. insert_lock(lp, newl1);
  3288. newl1 = NULL;
  3289. }
  3290. out:
  3291. free(newl1);
  3292. free(newl2);
  3293. return 0;
  3294. }
  3295. static void flock_to_lock(struct flock *flock, struct lock *lock)
  3296. {
  3297. memset(lock, 0, sizeof(struct lock));
  3298. lock->type = flock->l_type;
  3299. lock->start = flock->l_start;
  3300. lock->end =
  3301. flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
  3302. lock->pid = flock->l_pid;
  3303. }
  3304. static void lock_to_flock(struct lock *lock, struct flock *flock)
  3305. {
  3306. flock->l_type = lock->type;
  3307. flock->l_start = lock->start;
  3308. flock->l_len =
  3309. (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
  3310. flock->l_pid = lock->pid;
  3311. }
  3312. static int fuse_flush_common(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3313. const char *path, struct fuse_file_info *fi)
  3314. {
  3315. struct fuse_intr_data d;
  3316. struct flock lock;
  3317. struct lock l;
  3318. int err;
  3319. int errlock;
  3320. fuse_prepare_interrupt(f, req, &d);
  3321. memset(&lock, 0, sizeof(lock));
  3322. lock.l_type = F_UNLCK;
  3323. lock.l_whence = SEEK_SET;
  3324. err = fuse_fs_flush(f->fs, path, fi);
  3325. errlock = fuse_fs_lock(f->fs, path, fi, F_SETLK, &lock);
  3326. fuse_finish_interrupt(f, req, &d);
  3327. if (errlock != -ENOSYS) {
  3328. flock_to_lock(&lock, &l);
  3329. l.owner = fi->lock_owner;
  3330. pthread_mutex_lock(&f->lock);
  3331. locks_insert(get_node(f, ino), &l);
  3332. pthread_mutex_unlock(&f->lock);
  3333. /* if op.lock() is defined FLUSH is needed regardless
  3334. of op.flush() */
  3335. if (err == -ENOSYS)
  3336. err = 0;
  3337. }
  3338. return err;
  3339. }
  3340. static void fuse_lib_release(fuse_req_t req, fuse_ino_t ino,
  3341. struct fuse_file_info *fi)
  3342. {
  3343. struct fuse *f = req_fuse_prepare(req);
  3344. struct fuse_intr_data d;
  3345. char *path;
  3346. int err = 0;
  3347. get_path_nullok(f, ino, &path);
  3348. if (fi->flush) {
  3349. err = fuse_flush_common(f, req, ino, path, fi);
  3350. if (err == -ENOSYS)
  3351. err = 0;
  3352. }
  3353. fuse_prepare_interrupt(f, req, &d);
  3354. fuse_do_release(f, ino, path, fi);
  3355. fuse_finish_interrupt(f, req, &d);
  3356. free_path(f, ino, path);
  3357. reply_err(req, err);
  3358. }
  3359. static void fuse_lib_flush(fuse_req_t req, fuse_ino_t ino,
  3360. struct fuse_file_info *fi)
  3361. {
  3362. struct fuse *f = req_fuse_prepare(req);
  3363. char *path;
  3364. int err;
  3365. get_path_nullok(f, ino, &path);
  3366. err = fuse_flush_common(f, req, ino, path, fi);
  3367. free_path(f, ino, path);
  3368. reply_err(req, err);
  3369. }
  3370. static int fuse_lock_common(fuse_req_t req, fuse_ino_t ino,
  3371. struct fuse_file_info *fi, struct flock *lock,
  3372. int cmd)
  3373. {
  3374. struct fuse *f = req_fuse_prepare(req);
  3375. char *path;
  3376. int err;
  3377. err = get_path_nullok(f, ino, &path);
  3378. if (!err) {
  3379. struct fuse_intr_data d;
  3380. fuse_prepare_interrupt(f, req, &d);
  3381. err = fuse_fs_lock(f->fs, path, fi, cmd, lock);
  3382. fuse_finish_interrupt(f, req, &d);
  3383. free_path(f, ino, path);
  3384. }
  3385. return err;
  3386. }
  3387. static void fuse_lib_getlk(fuse_req_t req, fuse_ino_t ino,
  3388. struct fuse_file_info *fi, struct flock *lock)
  3389. {
  3390. int err;
  3391. struct lock l;
  3392. struct lock *conflict;
  3393. struct fuse *f = req_fuse(req);
  3394. flock_to_lock(lock, &l);
  3395. l.owner = fi->lock_owner;
  3396. pthread_mutex_lock(&f->lock);
  3397. conflict = locks_conflict(get_node(f, ino), &l);
  3398. if (conflict)
  3399. lock_to_flock(conflict, lock);
  3400. pthread_mutex_unlock(&f->lock);
  3401. if (!conflict)
  3402. err = fuse_lock_common(req, ino, fi, lock, F_GETLK);
  3403. else
  3404. err = 0;
  3405. if (!err)
  3406. fuse_reply_lock(req, lock);
  3407. else
  3408. reply_err(req, err);
  3409. }
  3410. static void fuse_lib_setlk(fuse_req_t req, fuse_ino_t ino,
  3411. struct fuse_file_info *fi, struct flock *lock,
  3412. int sleep)
  3413. {
  3414. int err = fuse_lock_common(req, ino, fi, lock,
  3415. sleep ? F_SETLKW : F_SETLK);
  3416. if (!err) {
  3417. struct fuse *f = req_fuse(req);
  3418. struct lock l;
  3419. flock_to_lock(lock, &l);
  3420. l.owner = fi->lock_owner;
  3421. pthread_mutex_lock(&f->lock);
  3422. locks_insert(get_node(f, ino), &l);
  3423. pthread_mutex_unlock(&f->lock);
  3424. }
  3425. reply_err(req, err);
  3426. }
  3427. static void fuse_lib_flock(fuse_req_t req, fuse_ino_t ino,
  3428. struct fuse_file_info *fi, int op)
  3429. {
  3430. struct fuse *f = req_fuse_prepare(req);
  3431. char *path;
  3432. int err;
  3433. err = get_path_nullok(f, ino, &path);
  3434. if (err == 0) {
  3435. struct fuse_intr_data d;
  3436. fuse_prepare_interrupt(f, req, &d);
  3437. err = fuse_fs_flock(f->fs, path, fi, op);
  3438. fuse_finish_interrupt(f, req, &d);
  3439. free_path(f, ino, path);
  3440. }
  3441. reply_err(req, err);
  3442. }
  3443. static void fuse_lib_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
  3444. uint64_t idx)
  3445. {
  3446. struct fuse *f = req_fuse_prepare(req);
  3447. struct fuse_intr_data d;
  3448. char *path;
  3449. int err;
  3450. err = get_path(f, ino, &path);
  3451. if (!err) {
  3452. fuse_prepare_interrupt(f, req, &d);
  3453. err = fuse_fs_bmap(f->fs, path, blocksize, &idx);
  3454. fuse_finish_interrupt(f, req, &d);
  3455. free_path(f, ino, path);
  3456. }
  3457. if (!err)
  3458. fuse_reply_bmap(req, idx);
  3459. else
  3460. reply_err(req, err);
  3461. }
  3462. static void fuse_lib_ioctl(fuse_req_t req, fuse_ino_t ino, int cmd, void *arg,
  3463. struct fuse_file_info *llfi, unsigned int flags,
  3464. const void *in_buf, size_t in_bufsz,
  3465. size_t out_bufsz)
  3466. {
  3467. struct fuse *f = req_fuse_prepare(req);
  3468. struct fuse_intr_data d;
  3469. struct fuse_file_info fi;
  3470. char *path, *out_buf = NULL;
  3471. int err;
  3472. err = -EPERM;
  3473. if (flags & FUSE_IOCTL_UNRESTRICTED)
  3474. goto err;
  3475. if (flags & FUSE_IOCTL_DIR)
  3476. get_dirhandle(llfi, &fi);
  3477. else
  3478. fi = *llfi;
  3479. if (out_bufsz) {
  3480. err = -ENOMEM;
  3481. out_buf = malloc(out_bufsz);
  3482. if (!out_buf)
  3483. goto err;
  3484. }
  3485. assert(!in_bufsz || !out_bufsz || in_bufsz == out_bufsz);
  3486. if (out_buf)
  3487. memcpy(out_buf, in_buf, in_bufsz);
  3488. err = get_path_nullok(f, ino, &path);
  3489. if (err)
  3490. goto err;
  3491. fuse_prepare_interrupt(f, req, &d);
  3492. err = fuse_fs_ioctl(f->fs, path, cmd, arg, &fi, flags,
  3493. out_buf ?: (void *)in_buf);
  3494. fuse_finish_interrupt(f, req, &d);
  3495. free_path(f, ino, path);
  3496. fuse_reply_ioctl(req, err, out_buf, out_bufsz);
  3497. goto out;
  3498. err:
  3499. reply_err(req, err);
  3500. out:
  3501. free(out_buf);
  3502. }
  3503. static void fuse_lib_poll(fuse_req_t req, fuse_ino_t ino,
  3504. struct fuse_file_info *fi, struct fuse_pollhandle *ph)
  3505. {
  3506. struct fuse *f = req_fuse_prepare(req);
  3507. struct fuse_intr_data d;
  3508. char *path;
  3509. int err;
  3510. unsigned revents = 0;
  3511. err = get_path_nullok(f, ino, &path);
  3512. if (!err) {
  3513. fuse_prepare_interrupt(f, req, &d);
  3514. err = fuse_fs_poll(f->fs, path, fi, ph, &revents);
  3515. fuse_finish_interrupt(f, req, &d);
  3516. free_path(f, ino, path);
  3517. }
  3518. if (!err)
  3519. fuse_reply_poll(req, revents);
  3520. else
  3521. reply_err(req, err);
  3522. }
  3523. static void fuse_lib_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
  3524. off_t offset, off_t length, struct fuse_file_info *fi)
  3525. {
  3526. struct fuse *f = req_fuse_prepare(req);
  3527. struct fuse_intr_data d;
  3528. char *path;
  3529. int err;
  3530. err = get_path_nullok(f, ino, &path);
  3531. if (!err) {
  3532. fuse_prepare_interrupt(f, req, &d);
  3533. err = fuse_fs_fallocate(f->fs, path, mode, offset, length, fi);
  3534. fuse_finish_interrupt(f, req, &d);
  3535. free_path(f, ino, path);
  3536. }
  3537. reply_err(req, err);
  3538. }
  3539. static int clean_delay(struct fuse *f)
  3540. {
  3541. /*
  3542. * This is calculating the delay between clean runs. To
  3543. * reduce the number of cleans we are doing them 10 times
  3544. * within the remember window.
  3545. */
  3546. int min_sleep = 60;
  3547. int max_sleep = 3600;
  3548. int sleep_time = f->conf.remember / 10;
  3549. if (sleep_time > max_sleep)
  3550. return max_sleep;
  3551. if (sleep_time < min_sleep)
  3552. return min_sleep;
  3553. return sleep_time;
  3554. }
  3555. int fuse_clean_cache(struct fuse *f)
  3556. {
  3557. struct node_lru *lnode;
  3558. struct list_head *curr, *next;
  3559. struct node *node;
  3560. struct timespec now;
  3561. pthread_mutex_lock(&f->lock);
  3562. curr_time(&now);
  3563. for (curr = f->lru_table.next; curr != &f->lru_table; curr = next) {
  3564. double age;
  3565. next = curr->next;
  3566. lnode = list_entry(curr, struct node_lru, lru);
  3567. node = &lnode->node;
  3568. age = diff_timespec(&now, &lnode->forget_time);
  3569. if (age <= f->conf.remember)
  3570. break;
  3571. assert(node->nlookup == 1);
  3572. /* Don't forget active directories */
  3573. if (node->refctr > 1)
  3574. continue;
  3575. node->nlookup = 0;
  3576. unhash_name(f, node);
  3577. unref_node(f, node);
  3578. }
  3579. pthread_mutex_unlock(&f->lock);
  3580. return clean_delay(f);
  3581. }
  3582. static struct fuse_lowlevel_ops fuse_path_ops = {
  3583. .init = fuse_lib_init,
  3584. .destroy = fuse_lib_destroy,
  3585. .lookup = fuse_lib_lookup,
  3586. .forget = fuse_lib_forget,
  3587. .forget_multi = fuse_lib_forget_multi,
  3588. .getattr = fuse_lib_getattr,
  3589. .setattr = fuse_lib_setattr,
  3590. .access = fuse_lib_access,
  3591. .readlink = fuse_lib_readlink,
  3592. .mknod = fuse_lib_mknod,
  3593. .mkdir = fuse_lib_mkdir,
  3594. .unlink = fuse_lib_unlink,
  3595. .rmdir = fuse_lib_rmdir,
  3596. .symlink = fuse_lib_symlink,
  3597. .rename = fuse_lib_rename,
  3598. .link = fuse_lib_link,
  3599. .create = fuse_lib_create,
  3600. .open = fuse_lib_open,
  3601. .read = fuse_lib_read,
  3602. .write_buf = fuse_lib_write_buf,
  3603. .flush = fuse_lib_flush,
  3604. .release = fuse_lib_release,
  3605. .fsync = fuse_lib_fsync,
  3606. .opendir = fuse_lib_opendir,
  3607. .readdir = fuse_lib_readdir,
  3608. .releasedir = fuse_lib_releasedir,
  3609. .fsyncdir = fuse_lib_fsyncdir,
  3610. .statfs = fuse_lib_statfs,
  3611. .setxattr = fuse_lib_setxattr,
  3612. .getxattr = fuse_lib_getxattr,
  3613. .listxattr = fuse_lib_listxattr,
  3614. .removexattr = fuse_lib_removexattr,
  3615. .getlk = fuse_lib_getlk,
  3616. .setlk = fuse_lib_setlk,
  3617. .flock = fuse_lib_flock,
  3618. .bmap = fuse_lib_bmap,
  3619. .ioctl = fuse_lib_ioctl,
  3620. .poll = fuse_lib_poll,
  3621. .fallocate = fuse_lib_fallocate,
  3622. };
  3623. int fuse_notify_poll(struct fuse_pollhandle *ph)
  3624. {
  3625. return fuse_lowlevel_notify_poll(ph);
  3626. }
  3627. static void free_cmd(struct fuse_cmd *cmd)
  3628. {
  3629. free(cmd->buf);
  3630. free(cmd);
  3631. }
  3632. void fuse_process_cmd(struct fuse *f, struct fuse_cmd *cmd)
  3633. {
  3634. fuse_session_process(f->se, cmd->buf, cmd->buflen, cmd->ch);
  3635. free_cmd(cmd);
  3636. }
  3637. int fuse_exited(struct fuse *f)
  3638. {
  3639. return fuse_session_exited(f->se);
  3640. }
  3641. struct fuse_session *fuse_get_session(struct fuse *f)
  3642. {
  3643. return f->se;
  3644. }
  3645. static struct fuse_cmd *fuse_alloc_cmd(size_t bufsize)
  3646. {
  3647. struct fuse_cmd *cmd = (struct fuse_cmd *) malloc(sizeof(*cmd));
  3648. if (cmd == NULL) {
  3649. fprintf(stderr, "fuse: failed to allocate cmd\n");
  3650. return NULL;
  3651. }
  3652. cmd->buf = (char *) malloc(bufsize);
  3653. if (cmd->buf == NULL) {
  3654. fprintf(stderr, "fuse: failed to allocate read buffer\n");
  3655. free(cmd);
  3656. return NULL;
  3657. }
  3658. return cmd;
  3659. }
  3660. struct fuse_cmd *fuse_read_cmd(struct fuse *f)
  3661. {
  3662. struct fuse_chan *ch = fuse_session_next_chan(f->se, NULL);
  3663. size_t bufsize = fuse_chan_bufsize(ch);
  3664. struct fuse_cmd *cmd = fuse_alloc_cmd(bufsize);
  3665. if (cmd != NULL) {
  3666. int res = fuse_chan_recv(&ch, cmd->buf, bufsize);
  3667. if (res <= 0) {
  3668. free_cmd(cmd);
  3669. if (res < 0 && res != -EINTR && res != -EAGAIN)
  3670. fuse_exit(f);
  3671. return NULL;
  3672. }
  3673. cmd->buflen = res;
  3674. cmd->ch = ch;
  3675. }
  3676. return cmd;
  3677. }
  3678. static int fuse_session_loop_remember(struct fuse *f)
  3679. {
  3680. struct fuse_session *se = f->se;
  3681. int res = 0;
  3682. struct timespec now;
  3683. time_t next_clean;
  3684. struct fuse_chan *ch = fuse_session_next_chan(se, NULL);
  3685. size_t bufsize = fuse_chan_bufsize(ch);
  3686. char *buf = (char *) malloc(bufsize);
  3687. struct pollfd fds = {
  3688. .fd = fuse_chan_fd(ch),
  3689. .events = POLLIN
  3690. };
  3691. if (!buf) {
  3692. fprintf(stderr, "fuse: failed to allocate read buffer\n");
  3693. return -1;
  3694. }
  3695. curr_time(&now);
  3696. next_clean = now.tv_sec;
  3697. while (!fuse_session_exited(se)) {
  3698. struct fuse_chan *tmpch = ch;
  3699. struct fuse_buf fbuf = {
  3700. .mem = buf,
  3701. .size = bufsize,
  3702. };
  3703. unsigned timeout;
  3704. curr_time(&now);
  3705. if (now.tv_sec < next_clean)
  3706. timeout = next_clean - now.tv_sec;
  3707. else
  3708. timeout = 0;
  3709. res = poll(&fds, 1, timeout * 1000);
  3710. if (res == -1) {
  3711. if (errno == -EINTR)
  3712. continue;
  3713. else
  3714. break;
  3715. } else if (res > 0) {
  3716. res = fuse_session_receive_buf(se, &fbuf, &tmpch);
  3717. if (res == -EINTR)
  3718. continue;
  3719. if (res <= 0)
  3720. break;
  3721. fuse_session_process_buf(se, &fbuf, tmpch);
  3722. } else {
  3723. timeout = fuse_clean_cache(f);
  3724. curr_time(&now);
  3725. next_clean = now.tv_sec + timeout;
  3726. }
  3727. }
  3728. free(buf);
  3729. fuse_session_reset(se);
  3730. return res < 0 ? -1 : 0;
  3731. }
  3732. int fuse_loop(struct fuse *f)
  3733. {
  3734. if (!f)
  3735. return -1;
  3736. if (lru_enabled(f))
  3737. return fuse_session_loop_remember(f);
  3738. return fuse_session_loop(f->se);
  3739. }
  3740. int fuse_invalidate(struct fuse *f, const char *path)
  3741. {
  3742. (void) f;
  3743. (void) path;
  3744. return -EINVAL;
  3745. }
  3746. void fuse_exit(struct fuse *f)
  3747. {
  3748. fuse_session_exit(f->se);
  3749. }
  3750. struct fuse_context *fuse_get_context(void)
  3751. {
  3752. return &fuse_get_context_internal()->ctx;
  3753. }
  3754. /*
  3755. * The size of fuse_context got extended, so need to be careful about
  3756. * incompatibility (i.e. a new binary cannot work with an old
  3757. * library).
  3758. */
  3759. struct fuse_context *fuse_get_context_compat22(void);
  3760. struct fuse_context *fuse_get_context_compat22(void)
  3761. {
  3762. return &fuse_get_context_internal()->ctx;
  3763. }
  3764. FUSE_SYMVER(".symver fuse_get_context_compat22,fuse_get_context@FUSE_2.2");
  3765. int fuse_getgroups(int size, gid_t list[])
  3766. {
  3767. fuse_req_t req = fuse_get_context_internal()->req;
  3768. return fuse_req_getgroups(req, size, list);
  3769. }
  3770. int fuse_interrupted(void)
  3771. {
  3772. return fuse_req_interrupted(fuse_get_context_internal()->req);
  3773. }
  3774. void fuse_set_getcontext_func(struct fuse_context *(*func)(void))
  3775. {
  3776. (void) func;
  3777. /* no-op */
  3778. }
  3779. enum {
  3780. KEY_HELP,
  3781. };
  3782. #define FUSE_LIB_OPT(t, p, v) { t, offsetof(struct fuse_config, p), v }
  3783. static const struct fuse_opt fuse_lib_opts[] = {
  3784. FUSE_OPT_KEY("-h", KEY_HELP),
  3785. FUSE_OPT_KEY("--help", KEY_HELP),
  3786. FUSE_OPT_KEY("debug", FUSE_OPT_KEY_KEEP),
  3787. FUSE_OPT_KEY("-d", FUSE_OPT_KEY_KEEP),
  3788. FUSE_LIB_OPT("debug", debug, 1),
  3789. FUSE_LIB_OPT("-d", debug, 1),
  3790. FUSE_LIB_OPT("hard_remove", hard_remove, 1),
  3791. FUSE_LIB_OPT("use_ino", use_ino, 1),
  3792. FUSE_LIB_OPT("readdir_ino", readdir_ino, 1),
  3793. FUSE_LIB_OPT("direct_io", direct_io, 1),
  3794. FUSE_LIB_OPT("kernel_cache", kernel_cache, 1),
  3795. FUSE_LIB_OPT("auto_cache", auto_cache, 1),
  3796. FUSE_LIB_OPT("noauto_cache", auto_cache, 0),
  3797. FUSE_LIB_OPT("umask=", set_mode, 1),
  3798. FUSE_LIB_OPT("umask=%o", umask, 0),
  3799. FUSE_LIB_OPT("uid=", set_uid, 1),
  3800. FUSE_LIB_OPT("uid=%d", uid, 0),
  3801. FUSE_LIB_OPT("gid=", set_gid, 1),
  3802. FUSE_LIB_OPT("gid=%d", gid, 0),
  3803. FUSE_LIB_OPT("entry_timeout=%lf", entry_timeout, 0),
  3804. FUSE_LIB_OPT("attr_timeout=%lf", attr_timeout, 0),
  3805. FUSE_LIB_OPT("ac_attr_timeout=%lf", ac_attr_timeout, 0),
  3806. FUSE_LIB_OPT("ac_attr_timeout=", ac_attr_timeout_set, 1),
  3807. FUSE_LIB_OPT("negative_timeout=%lf", negative_timeout, 0),
  3808. FUSE_LIB_OPT("noforget", remember, -1),
  3809. FUSE_LIB_OPT("remember=%u", remember, 0),
  3810. FUSE_LIB_OPT("nopath", nopath, 1),
  3811. FUSE_LIB_OPT("intr", intr, 1),
  3812. FUSE_LIB_OPT("intr_signal=%d", intr_signal, 0),
  3813. FUSE_LIB_OPT("modules=%s", modules, 0),
  3814. FUSE_LIB_OPT("threads=%d", threads, 0),
  3815. FUSE_OPT_END
  3816. };
  3817. static void fuse_lib_help(void)
  3818. {
  3819. fprintf(stderr,
  3820. " -o hard_remove immediate removal (don't hide files)\n"
  3821. " -o use_ino let filesystem set inode numbers\n"
  3822. " -o readdir_ino try to fill in d_ino in readdir\n"
  3823. " -o direct_io use direct I/O\n"
  3824. " -o kernel_cache cache files in kernel\n"
  3825. " -o [no]auto_cache enable caching based on modification times (off)\n"
  3826. " -o umask=M set file permissions (octal)\n"
  3827. " -o uid=N set file owner\n"
  3828. " -o gid=N set file group\n"
  3829. " -o entry_timeout=T cache timeout for names (1.0s)\n"
  3830. " -o negative_timeout=T cache timeout for deleted names (0.0s)\n"
  3831. " -o attr_timeout=T cache timeout for attributes (1.0s)\n"
  3832. " -o ac_attr_timeout=T auto cache timeout for attributes (attr_timeout)\n"
  3833. " -o noforget never forget cached inodes\n"
  3834. " -o remember=T remember cached inodes for T seconds (0s)\n"
  3835. " -o nopath don't supply path if not necessary\n"
  3836. " -o intr allow requests to be interrupted\n"
  3837. " -o intr_signal=NUM signal to send on interrupt (%i)\n"
  3838. " -o modules=M1[:M2...] names of modules to push onto filesystem stack\n"
  3839. "\n", FUSE_DEFAULT_INTR_SIGNAL);
  3840. }
  3841. static void fuse_lib_help_modules(void)
  3842. {
  3843. struct fuse_module *m;
  3844. fprintf(stderr, "\nModule options:\n");
  3845. pthread_mutex_lock(&fuse_context_lock);
  3846. for (m = fuse_modules; m; m = m->next) {
  3847. struct fuse_fs *fs = NULL;
  3848. struct fuse_fs *newfs;
  3849. struct fuse_args args = FUSE_ARGS_INIT(0, NULL);
  3850. if (fuse_opt_add_arg(&args, "") != -1 &&
  3851. fuse_opt_add_arg(&args, "-h") != -1) {
  3852. fprintf(stderr, "\n[%s]\n", m->name);
  3853. newfs = m->factory(&args, &fs);
  3854. assert(newfs == NULL);
  3855. }
  3856. fuse_opt_free_args(&args);
  3857. }
  3858. pthread_mutex_unlock(&fuse_context_lock);
  3859. }
  3860. static int fuse_lib_opt_proc(void *data, const char *arg, int key,
  3861. struct fuse_args *outargs)
  3862. {
  3863. (void) arg; (void) outargs;
  3864. if (key == KEY_HELP) {
  3865. struct fuse_config *conf = (struct fuse_config *) data;
  3866. fuse_lib_help();
  3867. conf->help = 1;
  3868. }
  3869. return 1;
  3870. }
  3871. int fuse_is_lib_option(const char *opt)
  3872. {
  3873. return fuse_lowlevel_is_lib_option(opt) ||
  3874. fuse_opt_match(fuse_lib_opts, opt);
  3875. }
  3876. static int fuse_init_intr_signal(int signum, int *installed)
  3877. {
  3878. struct sigaction old_sa;
  3879. if (sigaction(signum, NULL, &old_sa) == -1) {
  3880. perror("fuse: cannot get old signal handler");
  3881. return -1;
  3882. }
  3883. if (old_sa.sa_handler == SIG_DFL) {
  3884. struct sigaction sa;
  3885. memset(&sa, 0, sizeof(struct sigaction));
  3886. sa.sa_handler = fuse_intr_sighandler;
  3887. sigemptyset(&sa.sa_mask);
  3888. if (sigaction(signum, &sa, NULL) == -1) {
  3889. perror("fuse: cannot set interrupt signal handler");
  3890. return -1;
  3891. }
  3892. *installed = 1;
  3893. }
  3894. return 0;
  3895. }
  3896. static void fuse_restore_intr_signal(int signum)
  3897. {
  3898. struct sigaction sa;
  3899. memset(&sa, 0, sizeof(struct sigaction));
  3900. sa.sa_handler = SIG_DFL;
  3901. sigaction(signum, &sa, NULL);
  3902. }
  3903. static int fuse_push_module(struct fuse *f, const char *module,
  3904. struct fuse_args *args)
  3905. {
  3906. struct fuse_fs *fs[2] = { f->fs, NULL };
  3907. struct fuse_fs *newfs;
  3908. struct fuse_module *m = fuse_get_module(module);
  3909. if (!m)
  3910. return -1;
  3911. newfs = m->factory(args, fs);
  3912. if (!newfs) {
  3913. fuse_put_module(m);
  3914. return -1;
  3915. }
  3916. newfs->m = m;
  3917. f->fs = newfs;
  3918. f->nullpath_ok = newfs->op.flag_nullpath_ok && f->nullpath_ok;
  3919. f->conf.nopath = newfs->op.flag_nopath && f->conf.nopath;
  3920. f->utime_omit_ok = newfs->op.flag_utime_omit_ok && f->utime_omit_ok;
  3921. return 0;
  3922. }
  3923. struct fuse_fs *fuse_fs_new(const struct fuse_operations *op, size_t op_size,
  3924. void *user_data)
  3925. {
  3926. struct fuse_fs *fs;
  3927. if (sizeof(struct fuse_operations) < op_size) {
  3928. fprintf(stderr, "fuse: warning: library too old, some operations may not not work\n");
  3929. op_size = sizeof(struct fuse_operations);
  3930. }
  3931. fs = (struct fuse_fs *) calloc(1, sizeof(struct fuse_fs));
  3932. if (!fs) {
  3933. fprintf(stderr, "fuse: failed to allocate fuse_fs object\n");
  3934. return NULL;
  3935. }
  3936. fs->user_data = user_data;
  3937. if (op)
  3938. memcpy(&fs->op, op, op_size);
  3939. return fs;
  3940. }
  3941. static int node_table_init(struct node_table *t)
  3942. {
  3943. t->size = NODE_TABLE_MIN_SIZE;
  3944. t->array = (struct node **) calloc(1, sizeof(struct node *) * t->size);
  3945. if (t->array == NULL) {
  3946. fprintf(stderr, "fuse: memory allocation failed\n");
  3947. return -1;
  3948. }
  3949. t->use = 0;
  3950. t->split = 0;
  3951. return 0;
  3952. }
  3953. static void *fuse_prune_nodes(void *fuse)
  3954. {
  3955. struct fuse *f = fuse;
  3956. int sleep_time;
  3957. while(1) {
  3958. sleep_time = fuse_clean_cache(f);
  3959. sleep(sleep_time);
  3960. }
  3961. return NULL;
  3962. }
  3963. int fuse_start_cleanup_thread(struct fuse *f)
  3964. {
  3965. if (lru_enabled(f))
  3966. return fuse_start_thread(&f->prune_thread, fuse_prune_nodes, f);
  3967. return 0;
  3968. }
  3969. void fuse_stop_cleanup_thread(struct fuse *f)
  3970. {
  3971. if (lru_enabled(f)) {
  3972. pthread_mutex_lock(&f->lock);
  3973. pthread_cancel(f->prune_thread);
  3974. pthread_mutex_unlock(&f->lock);
  3975. pthread_join(f->prune_thread, NULL);
  3976. }
  3977. }
  3978. struct fuse *fuse_new_common(struct fuse_chan *ch, struct fuse_args *args,
  3979. const struct fuse_operations *op,
  3980. size_t op_size, void *user_data, int compat)
  3981. {
  3982. struct fuse *f;
  3983. struct node *root;
  3984. struct fuse_fs *fs;
  3985. struct fuse_lowlevel_ops llop = fuse_path_ops;
  3986. if (fuse_create_context_key() == -1)
  3987. goto out;
  3988. f = (struct fuse *) calloc(1, sizeof(struct fuse));
  3989. if (f == NULL) {
  3990. fprintf(stderr, "fuse: failed to allocate fuse object\n");
  3991. goto out_delete_context_key;
  3992. }
  3993. fs = fuse_fs_new(op, op_size, user_data);
  3994. if (!fs)
  3995. goto out_free;
  3996. fs->compat = compat;
  3997. f->fs = fs;
  3998. f->nullpath_ok = fs->op.flag_nullpath_ok;
  3999. f->conf.nopath = fs->op.flag_nopath;
  4000. f->utime_omit_ok = fs->op.flag_utime_omit_ok;
  4001. /* Oh f**k, this is ugly! */
  4002. if (!fs->op.lock) {
  4003. llop.getlk = NULL;
  4004. llop.setlk = NULL;
  4005. }
  4006. f->conf.entry_timeout = 1.0;
  4007. f->conf.attr_timeout = 1.0;
  4008. f->conf.negative_timeout = 0.0;
  4009. f->conf.intr_signal = FUSE_DEFAULT_INTR_SIGNAL;
  4010. f->pagesize = getpagesize();
  4011. init_list_head(&f->partial_slabs);
  4012. init_list_head(&f->full_slabs);
  4013. init_list_head(&f->lru_table);
  4014. if (fuse_opt_parse(args, &f->conf, fuse_lib_opts,
  4015. fuse_lib_opt_proc) == -1)
  4016. goto out_free_fs;
  4017. if (f->conf.modules) {
  4018. char *module;
  4019. char *next;
  4020. for (module = f->conf.modules; module; module = next) {
  4021. char *p;
  4022. for (p = module; *p && *p != ':'; p++);
  4023. next = *p ? p + 1 : NULL;
  4024. *p = '\0';
  4025. if (module[0] &&
  4026. fuse_push_module(f, module, args) == -1)
  4027. goto out_free_fs;
  4028. }
  4029. }
  4030. if (!f->conf.ac_attr_timeout_set)
  4031. f->conf.ac_attr_timeout = f->conf.attr_timeout;
  4032. #if defined(__FreeBSD__) || defined(__NetBSD__)
  4033. /*
  4034. * In FreeBSD, we always use these settings as inode numbers
  4035. * are needed to make getcwd(3) work.
  4036. */
  4037. f->conf.readdir_ino = 1;
  4038. #endif
  4039. if (compat && compat <= 25) {
  4040. if (fuse_sync_compat_args(args) == -1)
  4041. goto out_free_fs;
  4042. }
  4043. f->se = fuse_lowlevel_new_common(args, &llop, sizeof(llop), f);
  4044. if (f->se == NULL) {
  4045. if (f->conf.help)
  4046. fuse_lib_help_modules();
  4047. goto out_free_fs;
  4048. }
  4049. fuse_session_add_chan(f->se, ch);
  4050. if (f->conf.debug) {
  4051. fprintf(stderr, "nullpath_ok: %i\n", f->nullpath_ok);
  4052. fprintf(stderr, "nopath: %i\n", f->conf.nopath);
  4053. fprintf(stderr, "utime_omit_ok: %i\n", f->utime_omit_ok);
  4054. }
  4055. /* Trace topmost layer by default */
  4056. f->fs->debug = f->conf.debug;
  4057. f->ctr = 0;
  4058. f->generation = 0;
  4059. if (node_table_init(&f->name_table) == -1)
  4060. goto out_free_session;
  4061. if (node_table_init(&f->id_table) == -1)
  4062. goto out_free_name_table;
  4063. fuse_mutex_init(&f->lock);
  4064. root = alloc_node(f);
  4065. if (root == NULL) {
  4066. fprintf(stderr, "fuse: memory allocation failed\n");
  4067. goto out_free_id_table;
  4068. }
  4069. if (lru_enabled(f)) {
  4070. struct node_lru *lnode = node_lru(root);
  4071. init_list_head(&lnode->lru);
  4072. }
  4073. strcpy(root->inline_name, "/");
  4074. root->name = root->inline_name;
  4075. if (f->conf.intr &&
  4076. fuse_init_intr_signal(f->conf.intr_signal,
  4077. &f->intr_installed) == -1)
  4078. goto out_free_root;
  4079. root->parent = NULL;
  4080. root->nodeid = FUSE_ROOT_ID;
  4081. inc_nlookup(root);
  4082. hash_id(f, root);
  4083. return f;
  4084. out_free_root:
  4085. free(root);
  4086. out_free_id_table:
  4087. free(f->id_table.array);
  4088. out_free_name_table:
  4089. free(f->name_table.array);
  4090. out_free_session:
  4091. fuse_session_destroy(f->se);
  4092. out_free_fs:
  4093. /* Horrible compatibility hack to stop the destructor from being
  4094. called on the filesystem without init being called first */
  4095. fs->op.destroy = NULL;
  4096. fuse_fs_destroy(f->fs);
  4097. free(f->conf.modules);
  4098. out_free:
  4099. free(f);
  4100. out_delete_context_key:
  4101. fuse_delete_context_key();
  4102. out:
  4103. return NULL;
  4104. }
  4105. struct fuse *fuse_new(struct fuse_chan *ch, struct fuse_args *args,
  4106. const struct fuse_operations *op, size_t op_size,
  4107. void *user_data)
  4108. {
  4109. return fuse_new_common(ch, args, op, op_size, user_data, 0);
  4110. }
  4111. void fuse_destroy(struct fuse *f)
  4112. {
  4113. size_t i;
  4114. if (f->conf.intr && f->intr_installed)
  4115. fuse_restore_intr_signal(f->conf.intr_signal);
  4116. if (f->fs) {
  4117. struct fuse_context_i *c = fuse_get_context_internal();
  4118. memset(c, 0, sizeof(*c));
  4119. c->ctx.fuse = f;
  4120. for (i = 0; i < f->id_table.size; i++) {
  4121. struct node *node;
  4122. for (node = f->id_table.array[i]; node != NULL;
  4123. node = node->id_next) {
  4124. if (node->is_hidden) {
  4125. char *path;
  4126. if (try_get_path(f, node->nodeid, NULL, &path, NULL, false) == 0) {
  4127. fuse_fs_unlink(f->fs, path);
  4128. free(path);
  4129. }
  4130. }
  4131. }
  4132. }
  4133. }
  4134. for (i = 0; i < f->id_table.size; i++) {
  4135. struct node *node;
  4136. struct node *next;
  4137. for (node = f->id_table.array[i]; node != NULL; node = next) {
  4138. next = node->id_next;
  4139. free_node(f, node);
  4140. f->id_table.use--;
  4141. }
  4142. }
  4143. assert(list_empty(&f->partial_slabs));
  4144. assert(list_empty(&f->full_slabs));
  4145. free(f->id_table.array);
  4146. free(f->name_table.array);
  4147. pthread_mutex_destroy(&f->lock);
  4148. fuse_session_destroy(f->se);
  4149. free(f->conf.modules);
  4150. free(f);
  4151. fuse_delete_context_key();
  4152. }
  4153. static struct fuse *fuse_new_common_compat25(int fd, struct fuse_args *args,
  4154. const struct fuse_operations *op,
  4155. size_t op_size, int compat)
  4156. {
  4157. struct fuse *f = NULL;
  4158. struct fuse_chan *ch = fuse_kern_chan_new(fd);
  4159. if (ch)
  4160. f = fuse_new_common(ch, args, op, op_size, NULL, compat);
  4161. return f;
  4162. }
  4163. /* called with fuse_context_lock held or during initialization (before
  4164. main() has been called) */
  4165. void fuse_register_module(struct fuse_module *mod)
  4166. {
  4167. mod->ctr = 0;
  4168. mod->so = fuse_current_so;
  4169. if (mod->so)
  4170. mod->so->ctr++;
  4171. mod->next = fuse_modules;
  4172. fuse_modules = mod;
  4173. }
  4174. #if !defined(__FreeBSD__) && !defined(__NetBSD__)
  4175. static struct fuse *fuse_new_common_compat(int fd, const char *opts,
  4176. const struct fuse_operations *op,
  4177. size_t op_size, int compat)
  4178. {
  4179. struct fuse *f;
  4180. struct fuse_args args = FUSE_ARGS_INIT(0, NULL);
  4181. if (fuse_opt_add_arg(&args, "") == -1)
  4182. return NULL;
  4183. if (opts &&
  4184. (fuse_opt_add_arg(&args, "-o") == -1 ||
  4185. fuse_opt_add_arg(&args, opts) == -1)) {
  4186. fuse_opt_free_args(&args);
  4187. return NULL;
  4188. }
  4189. f = fuse_new_common_compat25(fd, &args, op, op_size, compat);
  4190. fuse_opt_free_args(&args);
  4191. return f;
  4192. }
  4193. struct fuse *fuse_new_compat22(int fd, const char *opts,
  4194. const struct fuse_operations_compat22 *op,
  4195. size_t op_size)
  4196. {
  4197. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4198. op_size, 22);
  4199. }
  4200. struct fuse *fuse_new_compat2(int fd, const char *opts,
  4201. const struct fuse_operations_compat2 *op)
  4202. {
  4203. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4204. sizeof(struct fuse_operations_compat2),
  4205. 21);
  4206. }
  4207. struct fuse *fuse_new_compat1(int fd, int flags,
  4208. const struct fuse_operations_compat1 *op)
  4209. {
  4210. const char *opts = NULL;
  4211. if (flags & FUSE_DEBUG_COMPAT1)
  4212. opts = "debug";
  4213. return fuse_new_common_compat(fd, opts, (struct fuse_operations *) op,
  4214. sizeof(struct fuse_operations_compat1),
  4215. 11);
  4216. }
  4217. FUSE_SYMVER(".symver fuse_exited,__fuse_exited@");
  4218. FUSE_SYMVER(".symver fuse_process_cmd,__fuse_process_cmd@");
  4219. FUSE_SYMVER(".symver fuse_read_cmd,__fuse_read_cmd@");
  4220. FUSE_SYMVER(".symver fuse_set_getcontext_func,__fuse_set_getcontext_func@");
  4221. FUSE_SYMVER(".symver fuse_new_compat2,fuse_new@");
  4222. FUSE_SYMVER(".symver fuse_new_compat22,fuse_new@FUSE_2.2");
  4223. #endif /* __FreeBSD__ || __NetBSD__ */
  4224. struct fuse *fuse_new_compat25(int fd, struct fuse_args *args,
  4225. const struct fuse_operations_compat25 *op,
  4226. size_t op_size)
  4227. {
  4228. return fuse_new_common_compat25(fd, args, (struct fuse_operations *) op,
  4229. op_size, 25);
  4230. }
  4231. FUSE_SYMVER(".symver fuse_new_compat25,fuse_new@FUSE_2.5");
  4232. int fuse_config_num_threads(const struct fuse *f)
  4233. {
  4234. return f->conf.threads;
  4235. }