You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4592 lines
106 KiB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. /* For pthread_rwlock_t */
  8. #define _GNU_SOURCE
  9. #include "config.h"
  10. #include "fuse_chan.h"
  11. #include "fuse_common_compat.h"
  12. #include "fuse_dirents.h"
  13. #include "fuse_i.h"
  14. #include "fuse_kernel.h"
  15. #include "fuse_lowlevel.h"
  16. #include "fuse_misc.h"
  17. #include "fuse_opt.h"
  18. #include <assert.h>
  19. #include <dlfcn.h>
  20. #include <errno.h>
  21. #include <fcntl.h>
  22. #include <limits.h>
  23. #include <poll.h>
  24. #include <signal.h>
  25. #include <stdbool.h>
  26. #include <stddef.h>
  27. #include <stdint.h>
  28. #include <stdio.h>
  29. #include <stdlib.h>
  30. #include <string.h>
  31. #include <sys/file.h>
  32. #include <sys/mman.h>
  33. #include <sys/param.h>
  34. #include <sys/time.h>
  35. #include <sys/uio.h>
  36. #include <time.h>
  37. #include <unistd.h>
  38. #define FUSE_NODE_SLAB 1
  39. #ifndef MAP_ANONYMOUS
  40. #undef FUSE_NODE_SLAB
  41. #endif
  42. #define FUSE_DEFAULT_INTR_SIGNAL SIGUSR1
  43. #define FUSE_UNKNOWN_INO UINT64_MAX
  44. #define OFFSET_MAX 0x7fffffffffffffffLL
  45. #define NODE_TABLE_MIN_SIZE 8192
  46. struct fuse_config
  47. {
  48. unsigned int uid;
  49. unsigned int gid;
  50. unsigned int umask;
  51. double entry_timeout;
  52. double negative_timeout;
  53. double attr_timeout;
  54. int remember;
  55. int nopath;
  56. int debug;
  57. int use_ino;
  58. int set_mode;
  59. int set_uid;
  60. int set_gid;
  61. int intr;
  62. int intr_signal;
  63. int help;
  64. int threads;
  65. };
  66. struct fuse_fs
  67. {
  68. struct fuse_operations op;
  69. void *user_data;
  70. int debug;
  71. };
  72. struct lock_queue_element {
  73. struct lock_queue_element *next;
  74. pthread_cond_t cond;
  75. fuse_ino_t nodeid1;
  76. const char *name1;
  77. char **path1;
  78. struct node **wnode1;
  79. fuse_ino_t nodeid2;
  80. const char *name2;
  81. char **path2;
  82. struct node **wnode2;
  83. int err;
  84. bool first_locked : 1;
  85. bool second_locked : 1;
  86. bool done : 1;
  87. };
  88. struct node_table {
  89. struct node **array;
  90. size_t use;
  91. size_t size;
  92. size_t split;
  93. };
  94. #define container_of(ptr, type, member) ({ \
  95. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  96. (type *)( (char *)__mptr - offsetof(type,member) );})
  97. #define list_entry(ptr, type, member) \
  98. container_of(ptr, type, member)
  99. struct list_head {
  100. struct list_head *next;
  101. struct list_head *prev;
  102. };
  103. struct node_slab {
  104. struct list_head list; /* must be the first member */
  105. struct list_head freelist;
  106. int used;
  107. };
  108. struct fuse
  109. {
  110. struct fuse_session *se;
  111. struct node_table name_table;
  112. struct node_table id_table;
  113. struct list_head lru_table;
  114. fuse_ino_t ctr;
  115. uint64_t generation;
  116. unsigned int hidectr;
  117. pthread_mutex_t lock;
  118. struct fuse_config conf;
  119. int intr_installed;
  120. struct fuse_fs *fs;
  121. int nullpath_ok;
  122. int utime_omit_ok;
  123. struct lock_queue_element *lockq;
  124. int pagesize;
  125. struct list_head partial_slabs;
  126. struct list_head full_slabs;
  127. pthread_t prune_thread;
  128. };
  129. struct lock {
  130. int type;
  131. off_t start;
  132. off_t end;
  133. pid_t pid;
  134. uint64_t owner;
  135. struct lock *next;
  136. };
  137. struct node
  138. {
  139. struct node *name_next;
  140. struct node *id_next;
  141. fuse_ino_t nodeid;
  142. uint64_t generation;
  143. int refctr;
  144. struct node *parent;
  145. char *name;
  146. uint64_t nlookup;
  147. int open_count;
  148. struct lock *locks;
  149. uint64_t hidden_fh;
  150. char is_hidden;
  151. int treelock;
  152. struct stat stat_cache;
  153. char stat_cache_valid;
  154. char inline_name[32];
  155. };
  156. #define TREELOCK_WRITE -1
  157. #define TREELOCK_WAIT_OFFSET INT_MIN
  158. struct node_lru {
  159. struct node node;
  160. struct list_head lru;
  161. struct timespec forget_time;
  162. };
  163. struct fuse_dh
  164. {
  165. pthread_mutex_t lock;
  166. uint64_t fh;
  167. fuse_dirents_t d;
  168. };
  169. struct fuse_context_i {
  170. struct fuse_context ctx;
  171. fuse_req_t req;
  172. };
  173. static pthread_key_t fuse_context_key;
  174. static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
  175. static int fuse_context_ref;
  176. static void init_list_head(struct list_head *list)
  177. {
  178. list->next = list;
  179. list->prev = list;
  180. }
  181. static int list_empty(const struct list_head *head)
  182. {
  183. return head->next == head;
  184. }
  185. static void list_add(struct list_head *new, struct list_head *prev,
  186. struct list_head *next)
  187. {
  188. next->prev = new;
  189. new->next = next;
  190. new->prev = prev;
  191. prev->next = new;
  192. }
  193. static inline void list_add_head(struct list_head *new, struct list_head *head)
  194. {
  195. list_add(new, head, head->next);
  196. }
  197. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  198. {
  199. list_add(new, head->prev, head);
  200. }
  201. static inline void list_del(struct list_head *entry)
  202. {
  203. struct list_head *prev = entry->prev;
  204. struct list_head *next = entry->next;
  205. next->prev = prev;
  206. prev->next = next;
  207. }
  208. static inline int lru_enabled(struct fuse *f)
  209. {
  210. return f->conf.remember > 0;
  211. }
  212. static struct node_lru *node_lru(struct node *node)
  213. {
  214. return (struct node_lru *) node;
  215. }
  216. static size_t get_node_size(struct fuse *f)
  217. {
  218. if (lru_enabled(f))
  219. return sizeof(struct node_lru);
  220. else
  221. return sizeof(struct node);
  222. }
  223. #ifdef FUSE_NODE_SLAB
  224. static struct node_slab *list_to_slab(struct list_head *head)
  225. {
  226. return (struct node_slab *) head;
  227. }
  228. static struct node_slab *node_to_slab(struct fuse *f, struct node *node)
  229. {
  230. return (struct node_slab *) (((uintptr_t) node) & ~((uintptr_t) f->pagesize - 1));
  231. }
  232. static int alloc_slab(struct fuse *f)
  233. {
  234. void *mem;
  235. struct node_slab *slab;
  236. char *start;
  237. size_t num;
  238. size_t i;
  239. size_t node_size = get_node_size(f);
  240. mem = mmap(NULL, f->pagesize, PROT_READ | PROT_WRITE,
  241. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  242. if (mem == MAP_FAILED)
  243. return -1;
  244. slab = mem;
  245. init_list_head(&slab->freelist);
  246. slab->used = 0;
  247. num = (f->pagesize - sizeof(struct node_slab)) / node_size;
  248. start = (char *) mem + f->pagesize - num * node_size;
  249. for (i = 0; i < num; i++) {
  250. struct list_head *n;
  251. n = (struct list_head *) (start + i * node_size);
  252. list_add_tail(n, &slab->freelist);
  253. }
  254. list_add_tail(&slab->list, &f->partial_slabs);
  255. return 0;
  256. }
  257. static struct node *alloc_node(struct fuse *f)
  258. {
  259. struct node_slab *slab;
  260. struct list_head *node;
  261. if (list_empty(&f->partial_slabs)) {
  262. int res = alloc_slab(f);
  263. if (res != 0)
  264. return NULL;
  265. }
  266. slab = list_to_slab(f->partial_slabs.next);
  267. slab->used++;
  268. node = slab->freelist.next;
  269. list_del(node);
  270. if (list_empty(&slab->freelist)) {
  271. list_del(&slab->list);
  272. list_add_tail(&slab->list, &f->full_slabs);
  273. }
  274. memset(node, 0, sizeof(struct node));
  275. return (struct node *) node;
  276. }
  277. static void free_slab(struct fuse *f, struct node_slab *slab)
  278. {
  279. int res;
  280. list_del(&slab->list);
  281. res = munmap(slab, f->pagesize);
  282. if (res == -1)
  283. fprintf(stderr, "fuse warning: munmap(%p) failed\n", slab);
  284. }
  285. static void free_node_mem(struct fuse *f, struct node *node)
  286. {
  287. struct node_slab *slab = node_to_slab(f, node);
  288. struct list_head *n = (struct list_head *) node;
  289. slab->used--;
  290. if (slab->used) {
  291. if (list_empty(&slab->freelist)) {
  292. list_del(&slab->list);
  293. list_add_tail(&slab->list, &f->partial_slabs);
  294. }
  295. list_add_head(n, &slab->freelist);
  296. } else {
  297. free_slab(f, slab);
  298. }
  299. }
  300. #else
  301. static struct node *alloc_node(struct fuse *f)
  302. {
  303. return (struct node *) calloc(1, get_node_size(f));
  304. }
  305. static void free_node_mem(struct fuse *f, struct node *node)
  306. {
  307. (void) f;
  308. free(node);
  309. }
  310. #endif
  311. static size_t id_hash(struct fuse *f, fuse_ino_t ino)
  312. {
  313. uint64_t hash = ((uint32_t) ino * 2654435761U) % f->id_table.size;
  314. uint64_t oldhash = hash % (f->id_table.size / 2);
  315. if (oldhash >= f->id_table.split)
  316. return oldhash;
  317. else
  318. return hash;
  319. }
  320. static struct node *get_node_nocheck(struct fuse *f, fuse_ino_t nodeid)
  321. {
  322. size_t hash = id_hash(f, nodeid);
  323. struct node *node;
  324. for (node = f->id_table.array[hash]; node != NULL; node = node->id_next)
  325. if (node->nodeid == nodeid)
  326. return node;
  327. return NULL;
  328. }
  329. static
  330. struct node*
  331. get_node(struct fuse *f,
  332. const fuse_ino_t nodeid)
  333. {
  334. struct node *node = get_node_nocheck(f, nodeid);
  335. if(!node)
  336. {
  337. fprintf(stderr, "fuse internal error: node %llu not found\n",
  338. (unsigned long long) nodeid);
  339. abort();
  340. }
  341. return node;
  342. }
  343. static void curr_time(struct timespec *now);
  344. static double diff_timespec(const struct timespec *t1,
  345. const struct timespec *t2);
  346. static void remove_node_lru(struct node *node)
  347. {
  348. struct node_lru *lnode = node_lru(node);
  349. list_del(&lnode->lru);
  350. init_list_head(&lnode->lru);
  351. }
  352. static void set_forget_time(struct fuse *f, struct node *node)
  353. {
  354. struct node_lru *lnode = node_lru(node);
  355. list_del(&lnode->lru);
  356. list_add_tail(&lnode->lru, &f->lru_table);
  357. curr_time(&lnode->forget_time);
  358. }
  359. static
  360. void
  361. free_node(struct fuse *f_,
  362. struct node *node_)
  363. {
  364. if(node_->name != node_->inline_name)
  365. free(node_->name);
  366. if(node_->is_hidden)
  367. fuse_fs_free_hide(f_->fs,node_->hidden_fh);
  368. free_node_mem(f_,node_);
  369. }
  370. static void node_table_reduce(struct node_table *t)
  371. {
  372. size_t newsize = t->size / 2;
  373. void *newarray;
  374. if (newsize < NODE_TABLE_MIN_SIZE)
  375. return;
  376. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  377. if (newarray != NULL)
  378. t->array = newarray;
  379. t->size = newsize;
  380. t->split = t->size / 2;
  381. }
  382. static void remerge_id(struct fuse *f)
  383. {
  384. struct node_table *t = &f->id_table;
  385. int iter;
  386. if (t->split == 0)
  387. node_table_reduce(t);
  388. for (iter = 8; t->split > 0 && iter; iter--) {
  389. struct node **upper;
  390. t->split--;
  391. upper = &t->array[t->split + t->size / 2];
  392. if (*upper) {
  393. struct node **nodep;
  394. for (nodep = &t->array[t->split]; *nodep;
  395. nodep = &(*nodep)->id_next);
  396. *nodep = *upper;
  397. *upper = NULL;
  398. break;
  399. }
  400. }
  401. }
  402. static void unhash_id(struct fuse *f, struct node *node)
  403. {
  404. struct node **nodep = &f->id_table.array[id_hash(f, node->nodeid)];
  405. for (; *nodep != NULL; nodep = &(*nodep)->id_next)
  406. if (*nodep == node) {
  407. *nodep = node->id_next;
  408. f->id_table.use--;
  409. if(f->id_table.use < f->id_table.size / 4)
  410. remerge_id(f);
  411. return;
  412. }
  413. }
  414. static int node_table_resize(struct node_table *t)
  415. {
  416. size_t newsize = t->size * 2;
  417. void *newarray;
  418. newarray = realloc(t->array, sizeof(struct node *) * newsize);
  419. if (newarray == NULL)
  420. return -1;
  421. t->array = newarray;
  422. memset(t->array + t->size, 0, t->size * sizeof(struct node *));
  423. t->size = newsize;
  424. t->split = 0;
  425. return 0;
  426. }
  427. static void rehash_id(struct fuse *f)
  428. {
  429. struct node_table *t = &f->id_table;
  430. struct node **nodep;
  431. struct node **next;
  432. size_t hash;
  433. if (t->split == t->size / 2)
  434. return;
  435. hash = t->split;
  436. t->split++;
  437. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  438. struct node *node = *nodep;
  439. size_t newhash = id_hash(f, node->nodeid);
  440. if (newhash != hash) {
  441. next = nodep;
  442. *nodep = node->id_next;
  443. node->id_next = t->array[newhash];
  444. t->array[newhash] = node;
  445. } else {
  446. next = &node->id_next;
  447. }
  448. }
  449. if (t->split == t->size / 2)
  450. node_table_resize(t);
  451. }
  452. static void hash_id(struct fuse *f, struct node *node)
  453. {
  454. size_t hash = id_hash(f, node->nodeid);
  455. node->id_next = f->id_table.array[hash];
  456. f->id_table.array[hash] = node;
  457. f->id_table.use++;
  458. if (f->id_table.use >= f->id_table.size / 2)
  459. rehash_id(f);
  460. }
  461. static size_t name_hash(struct fuse *f, fuse_ino_t parent,
  462. const char *name)
  463. {
  464. uint64_t hash = parent;
  465. uint64_t oldhash;
  466. for (; *name; name++)
  467. hash = hash * 31 + (unsigned char) *name;
  468. hash %= f->name_table.size;
  469. oldhash = hash % (f->name_table.size / 2);
  470. if (oldhash >= f->name_table.split)
  471. return oldhash;
  472. else
  473. return hash;
  474. }
  475. static void unref_node(struct fuse *f, struct node *node);
  476. static void remerge_name(struct fuse *f)
  477. {
  478. struct node_table *t = &f->name_table;
  479. int iter;
  480. if (t->split == 0)
  481. node_table_reduce(t);
  482. for (iter = 8; t->split > 0 && iter; iter--) {
  483. struct node **upper;
  484. t->split--;
  485. upper = &t->array[t->split + t->size / 2];
  486. if (*upper) {
  487. struct node **nodep;
  488. for (nodep = &t->array[t->split]; *nodep;
  489. nodep = &(*nodep)->name_next);
  490. *nodep = *upper;
  491. *upper = NULL;
  492. break;
  493. }
  494. }
  495. }
  496. static void unhash_name(struct fuse *f, struct node *node)
  497. {
  498. if (node->name) {
  499. size_t hash = name_hash(f, node->parent->nodeid, node->name);
  500. struct node **nodep = &f->name_table.array[hash];
  501. for (; *nodep != NULL; nodep = &(*nodep)->name_next)
  502. if (*nodep == node) {
  503. *nodep = node->name_next;
  504. node->name_next = NULL;
  505. unref_node(f, node->parent);
  506. if (node->name != node->inline_name)
  507. free(node->name);
  508. node->name = NULL;
  509. node->parent = NULL;
  510. f->name_table.use--;
  511. if (f->name_table.use < f->name_table.size / 4)
  512. remerge_name(f);
  513. return;
  514. }
  515. fprintf(stderr,
  516. "fuse internal error: unable to unhash node: %llu\n",
  517. (unsigned long long) node->nodeid);
  518. abort();
  519. }
  520. }
  521. static void rehash_name(struct fuse *f)
  522. {
  523. struct node_table *t = &f->name_table;
  524. struct node **nodep;
  525. struct node **next;
  526. size_t hash;
  527. if (t->split == t->size / 2)
  528. return;
  529. hash = t->split;
  530. t->split++;
  531. for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
  532. struct node *node = *nodep;
  533. size_t newhash = name_hash(f, node->parent->nodeid, node->name);
  534. if (newhash != hash) {
  535. next = nodep;
  536. *nodep = node->name_next;
  537. node->name_next = t->array[newhash];
  538. t->array[newhash] = node;
  539. } else {
  540. next = &node->name_next;
  541. }
  542. }
  543. if (t->split == t->size / 2)
  544. node_table_resize(t);
  545. }
  546. static int hash_name(struct fuse *f, struct node *node, fuse_ino_t parentid,
  547. const char *name)
  548. {
  549. size_t hash = name_hash(f, parentid, name);
  550. struct node *parent = get_node(f, parentid);
  551. if (strlen(name) < sizeof(node->inline_name)) {
  552. strcpy(node->inline_name, name);
  553. node->name = node->inline_name;
  554. } else {
  555. node->name = strdup(name);
  556. if (node->name == NULL)
  557. return -1;
  558. }
  559. parent->refctr ++;
  560. node->parent = parent;
  561. node->name_next = f->name_table.array[hash];
  562. f->name_table.array[hash] = node;
  563. f->name_table.use++;
  564. if (f->name_table.use >= f->name_table.size / 2)
  565. rehash_name(f);
  566. return 0;
  567. }
  568. static void delete_node(struct fuse *f, struct node *node)
  569. {
  570. if (f->conf.debug)
  571. fprintf(stderr, "DELETE: %llu\n",
  572. (unsigned long long) node->nodeid);
  573. assert(node->treelock == 0);
  574. unhash_name(f, node);
  575. if (lru_enabled(f))
  576. remove_node_lru(node);
  577. unhash_id(f, node);
  578. free_node(f, node);
  579. }
  580. static void unref_node(struct fuse *f, struct node *node)
  581. {
  582. assert(node->refctr > 0);
  583. node->refctr --;
  584. if (!node->refctr)
  585. delete_node(f, node);
  586. }
  587. static
  588. uint64_t
  589. rand64(void)
  590. {
  591. uint64_t rv;
  592. rv = rand();
  593. rv <<= 32;
  594. rv |= rand();
  595. return rv;
  596. }
  597. static
  598. fuse_ino_t
  599. next_id(struct fuse *f)
  600. {
  601. do
  602. {
  603. f->ctr = ((f->ctr + 1) & UINT64_MAX);
  604. if(f->ctr == 0)
  605. f->generation++;
  606. } while((f->ctr == 0) ||
  607. (f->ctr == FUSE_UNKNOWN_INO) ||
  608. (get_node_nocheck(f, f->ctr) != NULL));
  609. return f->ctr;
  610. }
  611. static struct node *lookup_node(struct fuse *f, fuse_ino_t parent,
  612. const char *name)
  613. {
  614. size_t hash = name_hash(f, parent, name);
  615. struct node *node;
  616. for (node = f->name_table.array[hash]; node != NULL; node = node->name_next)
  617. if (node->parent->nodeid == parent &&
  618. strcmp(node->name, name) == 0)
  619. return node;
  620. return NULL;
  621. }
  622. static void inc_nlookup(struct node *node)
  623. {
  624. if (!node->nlookup)
  625. node->refctr++;
  626. node->nlookup++;
  627. }
  628. static struct node *find_node(struct fuse *f, fuse_ino_t parent,
  629. const char *name)
  630. {
  631. struct node *node;
  632. pthread_mutex_lock(&f->lock);
  633. if (!name)
  634. node = get_node(f, parent);
  635. else
  636. node = lookup_node(f, parent, name);
  637. if (node == NULL) {
  638. node = alloc_node(f);
  639. if (node == NULL)
  640. goto out_err;
  641. node->nodeid = next_id(f);
  642. node->generation = f->generation;
  643. if (f->conf.remember)
  644. inc_nlookup(node);
  645. if (hash_name(f, node, parent, name) == -1) {
  646. free_node(f, node);
  647. node = NULL;
  648. goto out_err;
  649. }
  650. hash_id(f, node);
  651. if (lru_enabled(f)) {
  652. struct node_lru *lnode = node_lru(node);
  653. init_list_head(&lnode->lru);
  654. }
  655. } else if (lru_enabled(f) && node->nlookup == 1) {
  656. remove_node_lru(node);
  657. }
  658. inc_nlookup(node);
  659. out_err:
  660. pthread_mutex_unlock(&f->lock);
  661. return node;
  662. }
  663. static char *add_name(char **buf, unsigned *bufsize, char *s, const char *name)
  664. {
  665. size_t len = strlen(name);
  666. if (s - len <= *buf) {
  667. unsigned pathlen = *bufsize - (s - *buf);
  668. unsigned newbufsize = *bufsize;
  669. char *newbuf;
  670. while (newbufsize < pathlen + len + 1) {
  671. if (newbufsize >= 0x80000000)
  672. newbufsize = 0xffffffff;
  673. else
  674. newbufsize *= 2;
  675. }
  676. newbuf = realloc(*buf, newbufsize);
  677. if (newbuf == NULL)
  678. return NULL;
  679. *buf = newbuf;
  680. s = newbuf + newbufsize - pathlen;
  681. memmove(s, newbuf + *bufsize - pathlen, pathlen);
  682. *bufsize = newbufsize;
  683. }
  684. s -= len;
  685. strncpy(s, name, len);
  686. s--;
  687. *s = '/';
  688. return s;
  689. }
  690. static void unlock_path(struct fuse *f, fuse_ino_t nodeid, struct node *wnode,
  691. struct node *end)
  692. {
  693. struct node *node;
  694. if (wnode) {
  695. assert(wnode->treelock == TREELOCK_WRITE);
  696. wnode->treelock = 0;
  697. }
  698. for (node = get_node(f, nodeid);
  699. node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent) {
  700. assert(node->treelock != 0);
  701. assert(node->treelock != TREELOCK_WAIT_OFFSET);
  702. assert(node->treelock != TREELOCK_WRITE);
  703. node->treelock--;
  704. if (node->treelock == TREELOCK_WAIT_OFFSET)
  705. node->treelock = 0;
  706. }
  707. }
  708. static int try_get_path(struct fuse *f, fuse_ino_t nodeid, const char *name,
  709. char **path, struct node **wnodep, bool need_lock)
  710. {
  711. unsigned bufsize = 256;
  712. char *buf;
  713. char *s;
  714. struct node *node;
  715. struct node *wnode = NULL;
  716. int err;
  717. *path = NULL;
  718. err = -ENOMEM;
  719. buf = malloc(bufsize);
  720. if (buf == NULL)
  721. goto out_err;
  722. s = buf + bufsize - 1;
  723. *s = '\0';
  724. if (name != NULL) {
  725. s = add_name(&buf, &bufsize, s, name);
  726. err = -ENOMEM;
  727. if (s == NULL)
  728. goto out_free;
  729. }
  730. if (wnodep) {
  731. assert(need_lock);
  732. wnode = lookup_node(f, nodeid, name);
  733. if (wnode) {
  734. if (wnode->treelock != 0) {
  735. if (wnode->treelock > 0)
  736. wnode->treelock += TREELOCK_WAIT_OFFSET;
  737. err = -EAGAIN;
  738. goto out_free;
  739. }
  740. wnode->treelock = TREELOCK_WRITE;
  741. }
  742. }
  743. for (node = get_node(f, nodeid); node->nodeid != FUSE_ROOT_ID;
  744. node = node->parent) {
  745. err = -ENOENT;
  746. if (node->name == NULL || node->parent == NULL)
  747. goto out_unlock;
  748. err = -ENOMEM;
  749. s = add_name(&buf, &bufsize, s, node->name);
  750. if (s == NULL)
  751. goto out_unlock;
  752. if (need_lock) {
  753. err = -EAGAIN;
  754. if (node->treelock < 0)
  755. goto out_unlock;
  756. node->treelock++;
  757. }
  758. }
  759. if (s[0])
  760. memmove(buf, s, bufsize - (s - buf));
  761. else
  762. strcpy(buf, "/");
  763. *path = buf;
  764. if (wnodep)
  765. *wnodep = wnode;
  766. return 0;
  767. out_unlock:
  768. if (need_lock)
  769. unlock_path(f, nodeid, wnode, node);
  770. out_free:
  771. free(buf);
  772. out_err:
  773. return err;
  774. }
  775. static void queue_element_unlock(struct fuse *f, struct lock_queue_element *qe)
  776. {
  777. struct node *wnode;
  778. if (qe->first_locked) {
  779. wnode = qe->wnode1 ? *qe->wnode1 : NULL;
  780. unlock_path(f, qe->nodeid1, wnode, NULL);
  781. qe->first_locked = false;
  782. }
  783. if (qe->second_locked) {
  784. wnode = qe->wnode2 ? *qe->wnode2 : NULL;
  785. unlock_path(f, qe->nodeid2, wnode, NULL);
  786. qe->second_locked = false;
  787. }
  788. }
  789. static void queue_element_wakeup(struct fuse *f, struct lock_queue_element *qe)
  790. {
  791. int err;
  792. bool first = (qe == f->lockq);
  793. if (!qe->path1) {
  794. /* Just waiting for it to be unlocked */
  795. if (get_node(f, qe->nodeid1)->treelock == 0)
  796. pthread_cond_signal(&qe->cond);
  797. return;
  798. }
  799. if (!qe->first_locked) {
  800. err = try_get_path(f, qe->nodeid1, qe->name1, qe->path1,
  801. qe->wnode1, true);
  802. if (!err)
  803. qe->first_locked = true;
  804. else if (err != -EAGAIN)
  805. goto err_unlock;
  806. }
  807. if (!qe->second_locked && qe->path2) {
  808. err = try_get_path(f, qe->nodeid2, qe->name2, qe->path2,
  809. qe->wnode2, true);
  810. if (!err)
  811. qe->second_locked = true;
  812. else if (err != -EAGAIN)
  813. goto err_unlock;
  814. }
  815. if (qe->first_locked && (qe->second_locked || !qe->path2)) {
  816. err = 0;
  817. goto done;
  818. }
  819. /*
  820. * Only let the first element be partially locked otherwise there could
  821. * be a deadlock.
  822. *
  823. * But do allow the first element to be partially locked to prevent
  824. * starvation.
  825. */
  826. if (!first)
  827. queue_element_unlock(f, qe);
  828. /* keep trying */
  829. return;
  830. err_unlock:
  831. queue_element_unlock(f, qe);
  832. done:
  833. qe->err = err;
  834. qe->done = true;
  835. pthread_cond_signal(&qe->cond);
  836. }
  837. static void wake_up_queued(struct fuse *f)
  838. {
  839. struct lock_queue_element *qe;
  840. for (qe = f->lockq; qe != NULL; qe = qe->next)
  841. queue_element_wakeup(f, qe);
  842. }
  843. static void debug_path(struct fuse *f, const char *msg, fuse_ino_t nodeid,
  844. const char *name, bool wr)
  845. {
  846. if (f->conf.debug) {
  847. struct node *wnode = NULL;
  848. if (wr)
  849. wnode = lookup_node(f, nodeid, name);
  850. if (wnode)
  851. fprintf(stderr, "%s %li (w)\n", msg, wnode->nodeid);
  852. else
  853. fprintf(stderr, "%s %li\n", msg, nodeid);
  854. }
  855. }
  856. static void queue_path(struct fuse *f, struct lock_queue_element *qe)
  857. {
  858. struct lock_queue_element **qp;
  859. qe->done = false;
  860. qe->first_locked = false;
  861. qe->second_locked = false;
  862. pthread_cond_init(&qe->cond, NULL);
  863. qe->next = NULL;
  864. for (qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
  865. *qp = qe;
  866. }
  867. static void dequeue_path(struct fuse *f, struct lock_queue_element *qe)
  868. {
  869. struct lock_queue_element **qp;
  870. pthread_cond_destroy(&qe->cond);
  871. for (qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
  872. *qp = qe->next;
  873. }
  874. static int wait_path(struct fuse *f, struct lock_queue_element *qe)
  875. {
  876. queue_path(f, qe);
  877. do {
  878. pthread_cond_wait(&qe->cond, &f->lock);
  879. } while (!qe->done);
  880. dequeue_path(f, qe);
  881. return qe->err;
  882. }
  883. static int get_path_common(struct fuse *f, fuse_ino_t nodeid, const char *name,
  884. char **path, struct node **wnode)
  885. {
  886. int err;
  887. pthread_mutex_lock(&f->lock);
  888. err = try_get_path(f, nodeid, name, path, wnode, true);
  889. if (err == -EAGAIN) {
  890. struct lock_queue_element qe = {
  891. .nodeid1 = nodeid,
  892. .name1 = name,
  893. .path1 = path,
  894. .wnode1 = wnode,
  895. };
  896. debug_path(f, "QUEUE PATH", nodeid, name, !!wnode);
  897. err = wait_path(f, &qe);
  898. debug_path(f, "DEQUEUE PATH", nodeid, name, !!wnode);
  899. }
  900. pthread_mutex_unlock(&f->lock);
  901. return err;
  902. }
  903. static int get_path(struct fuse *f, fuse_ino_t nodeid, char **path)
  904. {
  905. return get_path_common(f, nodeid, NULL, path, NULL);
  906. }
  907. static
  908. int
  909. get_path_nullok(struct fuse *f,
  910. fuse_ino_t nodeid,
  911. char **path)
  912. {
  913. int err = 0;
  914. if(f->conf.nopath)
  915. {
  916. *path = NULL;
  917. }
  918. else
  919. {
  920. err = get_path_common(f,nodeid,NULL,path,NULL);
  921. if((err == -ENOENT) && f->nullpath_ok)
  922. err = 0;
  923. }
  924. return err;
  925. }
  926. static int get_path_name(struct fuse *f, fuse_ino_t nodeid, const char *name,
  927. char **path)
  928. {
  929. return get_path_common(f, nodeid, name, path, NULL);
  930. }
  931. static int get_path_wrlock(struct fuse *f, fuse_ino_t nodeid, const char *name,
  932. char **path, struct node **wnode)
  933. {
  934. return get_path_common(f, nodeid, name, path, wnode);
  935. }
  936. static int try_get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  937. fuse_ino_t nodeid2, const char *name2,
  938. char **path1, char **path2,
  939. struct node **wnode1, struct node **wnode2)
  940. {
  941. int err;
  942. /* FIXME: locking two paths needs deadlock checking */
  943. err = try_get_path(f, nodeid1, name1, path1, wnode1, true);
  944. if (!err) {
  945. err = try_get_path(f, nodeid2, name2, path2, wnode2, true);
  946. if (err) {
  947. struct node *wn1 = wnode1 ? *wnode1 : NULL;
  948. unlock_path(f, nodeid1, wn1, NULL);
  949. free(*path1);
  950. }
  951. }
  952. return err;
  953. }
  954. static int get_path2(struct fuse *f, fuse_ino_t nodeid1, const char *name1,
  955. fuse_ino_t nodeid2, const char *name2,
  956. char **path1, char **path2,
  957. struct node **wnode1, struct node **wnode2)
  958. {
  959. int err;
  960. pthread_mutex_lock(&f->lock);
  961. err = try_get_path2(f, nodeid1, name1, nodeid2, name2,
  962. path1, path2, wnode1, wnode2);
  963. if (err == -EAGAIN) {
  964. struct lock_queue_element qe = {
  965. .nodeid1 = nodeid1,
  966. .name1 = name1,
  967. .path1 = path1,
  968. .wnode1 = wnode1,
  969. .nodeid2 = nodeid2,
  970. .name2 = name2,
  971. .path2 = path2,
  972. .wnode2 = wnode2,
  973. };
  974. debug_path(f, "QUEUE PATH1", nodeid1, name1, !!wnode1);
  975. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  976. err = wait_path(f, &qe);
  977. debug_path(f, "DEQUEUE PATH1", nodeid1, name1, !!wnode1);
  978. debug_path(f, " PATH2", nodeid2, name2, !!wnode2);
  979. }
  980. pthread_mutex_unlock(&f->lock);
  981. return err;
  982. }
  983. static void free_path_wrlock(struct fuse *f, fuse_ino_t nodeid,
  984. struct node *wnode, char *path)
  985. {
  986. pthread_mutex_lock(&f->lock);
  987. unlock_path(f, nodeid, wnode, NULL);
  988. if (f->lockq)
  989. wake_up_queued(f);
  990. pthread_mutex_unlock(&f->lock);
  991. free(path);
  992. }
  993. static void free_path(struct fuse *f, fuse_ino_t nodeid, char *path)
  994. {
  995. if (path)
  996. free_path_wrlock(f, nodeid, NULL, path);
  997. }
  998. static void free_path2(struct fuse *f, fuse_ino_t nodeid1, fuse_ino_t nodeid2,
  999. struct node *wnode1, struct node *wnode2,
  1000. char *path1, char *path2)
  1001. {
  1002. pthread_mutex_lock(&f->lock);
  1003. unlock_path(f, nodeid1, wnode1, NULL);
  1004. unlock_path(f, nodeid2, wnode2, NULL);
  1005. wake_up_queued(f);
  1006. pthread_mutex_unlock(&f->lock);
  1007. free(path1);
  1008. free(path2);
  1009. }
  1010. static
  1011. void
  1012. forget_node(struct fuse *f,
  1013. const fuse_ino_t nodeid,
  1014. const uint64_t nlookup)
  1015. {
  1016. struct node *node;
  1017. if(nodeid == FUSE_ROOT_ID)
  1018. return;
  1019. pthread_mutex_lock(&f->lock);
  1020. node = get_node(f, nodeid);
  1021. /*
  1022. * Node may still be locked due to interrupt idiocy in open,
  1023. * create and opendir
  1024. */
  1025. while(node->nlookup == nlookup && node->treelock)
  1026. {
  1027. struct lock_queue_element qe = {
  1028. .nodeid1 = nodeid,
  1029. };
  1030. debug_path(f, "QUEUE PATH (forget)", nodeid, NULL, false);
  1031. queue_path(f, &qe);
  1032. do
  1033. {
  1034. pthread_cond_wait(&qe.cond, &f->lock);
  1035. }
  1036. while((node->nlookup == nlookup) && node->treelock);
  1037. dequeue_path(f, &qe);
  1038. debug_path(f, "DEQUEUE_PATH (forget)", nodeid, NULL, false);
  1039. }
  1040. assert(node->nlookup >= nlookup);
  1041. node->nlookup -= nlookup;
  1042. if(!node->nlookup)
  1043. unref_node(f, node);
  1044. else if(lru_enabled(f) && node->nlookup == 1)
  1045. set_forget_time(f, node);
  1046. pthread_mutex_unlock(&f->lock);
  1047. }
  1048. static void unlink_node(struct fuse *f, struct node *node)
  1049. {
  1050. if (f->conf.remember) {
  1051. assert(node->nlookup > 1);
  1052. node->nlookup--;
  1053. }
  1054. unhash_name(f, node);
  1055. }
  1056. static void remove_node(struct fuse *f, fuse_ino_t dir, const char *name)
  1057. {
  1058. struct node *node;
  1059. pthread_mutex_lock(&f->lock);
  1060. node = lookup_node(f, dir, name);
  1061. if (node != NULL)
  1062. unlink_node(f, node);
  1063. pthread_mutex_unlock(&f->lock);
  1064. }
  1065. static int rename_node(struct fuse *f, fuse_ino_t olddir, const char *oldname,
  1066. fuse_ino_t newdir, const char *newname)
  1067. {
  1068. struct node *node;
  1069. struct node *newnode;
  1070. int err = 0;
  1071. pthread_mutex_lock(&f->lock);
  1072. node = lookup_node(f, olddir, oldname);
  1073. newnode = lookup_node(f, newdir, newname);
  1074. if (node == NULL)
  1075. goto out;
  1076. if (newnode != NULL)
  1077. unlink_node(f, newnode);
  1078. unhash_name(f, node);
  1079. if (hash_name(f, node, newdir, newname) == -1) {
  1080. err = -ENOMEM;
  1081. goto out;
  1082. }
  1083. out:
  1084. pthread_mutex_unlock(&f->lock);
  1085. return err;
  1086. }
  1087. static void set_stat(struct fuse *f, fuse_ino_t nodeid, struct stat *stbuf)
  1088. {
  1089. if (!f->conf.use_ino)
  1090. stbuf->st_ino = nodeid;
  1091. if (f->conf.set_mode)
  1092. stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
  1093. (0777 & ~f->conf.umask);
  1094. if (f->conf.set_uid)
  1095. stbuf->st_uid = f->conf.uid;
  1096. if (f->conf.set_gid)
  1097. stbuf->st_gid = f->conf.gid;
  1098. }
  1099. static struct fuse *req_fuse(fuse_req_t req)
  1100. {
  1101. return (struct fuse *) fuse_req_userdata(req);
  1102. }
  1103. static void fuse_intr_sighandler(int sig)
  1104. {
  1105. (void) sig;
  1106. /* Nothing to do */
  1107. }
  1108. struct fuse_intr_data {
  1109. pthread_t id;
  1110. pthread_cond_t cond;
  1111. int finished;
  1112. };
  1113. static void fuse_interrupt(fuse_req_t req, void *d_)
  1114. {
  1115. struct fuse_intr_data *d = d_;
  1116. struct fuse *f = req_fuse(req);
  1117. if (d->id == pthread_self())
  1118. return;
  1119. pthread_mutex_lock(&f->lock);
  1120. while (!d->finished) {
  1121. struct timeval now;
  1122. struct timespec timeout;
  1123. pthread_kill(d->id, f->conf.intr_signal);
  1124. gettimeofday(&now, NULL);
  1125. timeout.tv_sec = now.tv_sec + 1;
  1126. timeout.tv_nsec = now.tv_usec * 1000;
  1127. pthread_cond_timedwait(&d->cond, &f->lock, &timeout);
  1128. }
  1129. pthread_mutex_unlock(&f->lock);
  1130. }
  1131. static void fuse_do_finish_interrupt(struct fuse *f, fuse_req_t req,
  1132. struct fuse_intr_data *d)
  1133. {
  1134. pthread_mutex_lock(&f->lock);
  1135. d->finished = 1;
  1136. pthread_cond_broadcast(&d->cond);
  1137. pthread_mutex_unlock(&f->lock);
  1138. fuse_req_interrupt_func(req, NULL, NULL);
  1139. pthread_cond_destroy(&d->cond);
  1140. }
  1141. static void fuse_do_prepare_interrupt(fuse_req_t req, struct fuse_intr_data *d)
  1142. {
  1143. d->id = pthread_self();
  1144. pthread_cond_init(&d->cond, NULL);
  1145. d->finished = 0;
  1146. fuse_req_interrupt_func(req, fuse_interrupt, d);
  1147. }
  1148. static inline void fuse_finish_interrupt(struct fuse *f, fuse_req_t req,
  1149. struct fuse_intr_data *d)
  1150. {
  1151. if (f->conf.intr)
  1152. fuse_do_finish_interrupt(f, req, d);
  1153. }
  1154. static inline void fuse_prepare_interrupt(struct fuse *f, fuse_req_t req,
  1155. struct fuse_intr_data *d)
  1156. {
  1157. if (f->conf.intr)
  1158. fuse_do_prepare_interrupt(req, d);
  1159. }
  1160. int fuse_fs_getattr(struct fuse_fs *fs, const char *path, struct stat *buf)
  1161. {
  1162. fuse_get_context()->private_data = fs->user_data;
  1163. if (fs->op.getattr) {
  1164. if (fs->debug)
  1165. fprintf(stderr, "getattr %s\n", path);
  1166. return fs->op.getattr(path, buf);
  1167. } else {
  1168. return -ENOSYS;
  1169. }
  1170. }
  1171. int fuse_fs_fgetattr(struct fuse_fs *fs, const char *path, struct stat *buf,
  1172. struct fuse_file_info *fi)
  1173. {
  1174. fuse_get_context()->private_data = fs->user_data;
  1175. if (fs->op.fgetattr) {
  1176. if (fs->debug)
  1177. fprintf(stderr, "fgetattr[%llu] %s\n",
  1178. (unsigned long long) fi->fh, path);
  1179. return fs->op.fgetattr(path, buf, fi);
  1180. } else if (path && fs->op.getattr) {
  1181. if (fs->debug)
  1182. fprintf(stderr, "getattr %s\n", path);
  1183. return fs->op.getattr(path, buf);
  1184. } else {
  1185. return -ENOSYS;
  1186. }
  1187. }
  1188. int
  1189. fuse_fs_rename(struct fuse_fs *fs,
  1190. const char *oldpath,
  1191. const char *newpath)
  1192. {
  1193. fuse_get_context()->private_data = fs->user_data;
  1194. if(fs->op.rename)
  1195. return fs->op.rename(oldpath, newpath);
  1196. return -ENOSYS;
  1197. }
  1198. int
  1199. fuse_fs_prepare_hide(struct fuse_fs *fs_,
  1200. const char *path_,
  1201. uint64_t *fh_)
  1202. {
  1203. fuse_get_context()->private_data = fs_->user_data;
  1204. if(fs_->op.prepare_hide)
  1205. return fs_->op.prepare_hide(path_,fh_);
  1206. return -ENOSYS;
  1207. }
  1208. int
  1209. fuse_fs_free_hide(struct fuse_fs *fs_,
  1210. uint64_t fh_)
  1211. {
  1212. fuse_get_context()->private_data = fs_->user_data;
  1213. if(fs_->op.free_hide)
  1214. return fs_->op.free_hide(fh_);
  1215. return -ENOSYS;
  1216. }
  1217. int fuse_fs_unlink(struct fuse_fs *fs, const char *path)
  1218. {
  1219. fuse_get_context()->private_data = fs->user_data;
  1220. if (fs->op.unlink) {
  1221. if (fs->debug)
  1222. fprintf(stderr, "unlink %s\n", path);
  1223. return fs->op.unlink(path);
  1224. } else {
  1225. return -ENOSYS;
  1226. }
  1227. }
  1228. int fuse_fs_rmdir(struct fuse_fs *fs, const char *path)
  1229. {
  1230. fuse_get_context()->private_data = fs->user_data;
  1231. if (fs->op.rmdir) {
  1232. if (fs->debug)
  1233. fprintf(stderr, "rmdir %s\n", path);
  1234. return fs->op.rmdir(path);
  1235. } else {
  1236. return -ENOSYS;
  1237. }
  1238. }
  1239. int fuse_fs_symlink(struct fuse_fs *fs, const char *linkname, const char *path)
  1240. {
  1241. fuse_get_context()->private_data = fs->user_data;
  1242. if (fs->op.symlink) {
  1243. if (fs->debug)
  1244. fprintf(stderr, "symlink %s %s\n", linkname, path);
  1245. return fs->op.symlink(linkname, path);
  1246. } else {
  1247. return -ENOSYS;
  1248. }
  1249. }
  1250. int fuse_fs_link(struct fuse_fs *fs, const char *oldpath, const char *newpath)
  1251. {
  1252. fuse_get_context()->private_data = fs->user_data;
  1253. if (fs->op.link) {
  1254. if (fs->debug)
  1255. fprintf(stderr, "link %s %s\n", oldpath, newpath);
  1256. return fs->op.link(oldpath, newpath);
  1257. } else {
  1258. return -ENOSYS;
  1259. }
  1260. }
  1261. int fuse_fs_release(struct fuse_fs *fs, const char *path,
  1262. struct fuse_file_info *fi)
  1263. {
  1264. fuse_get_context()->private_data = fs->user_data;
  1265. if (fs->op.release) {
  1266. if (fs->debug)
  1267. fprintf(stderr, "release%s[%llu] flags: 0x%x\n",
  1268. fi->flush ? "+flush" : "",
  1269. (unsigned long long) fi->fh, fi->flags);
  1270. return fs->op.release(path, fi);
  1271. } else {
  1272. return 0;
  1273. }
  1274. }
  1275. int fuse_fs_opendir(struct fuse_fs *fs, const char *path,
  1276. struct fuse_file_info *fi)
  1277. {
  1278. fuse_get_context()->private_data = fs->user_data;
  1279. if (fs->op.opendir) {
  1280. int err;
  1281. if (fs->debug)
  1282. fprintf(stderr, "opendir flags: 0x%x %s\n", fi->flags,
  1283. path);
  1284. err = fs->op.opendir(path,fi);
  1285. if (fs->debug && !err)
  1286. fprintf(stderr, " opendir[%lli] flags: 0x%x %s\n",
  1287. (unsigned long long) fi->fh, fi->flags, path);
  1288. return err;
  1289. } else {
  1290. return 0;
  1291. }
  1292. }
  1293. int fuse_fs_open(struct fuse_fs *fs, const char *path,
  1294. struct fuse_file_info *fi)
  1295. {
  1296. fuse_get_context()->private_data = fs->user_data;
  1297. if (fs->op.open) {
  1298. int err;
  1299. if (fs->debug)
  1300. fprintf(stderr, "open flags: 0x%x %s\n", fi->flags,
  1301. path);
  1302. err = fs->op.open(path,fi);
  1303. if (fs->debug && !err)
  1304. fprintf(stderr, " open[%lli] flags: 0x%x %s\n",
  1305. (unsigned long long) fi->fh, fi->flags, path);
  1306. return err;
  1307. } else {
  1308. return 0;
  1309. }
  1310. }
  1311. static void fuse_free_buf(struct fuse_bufvec *buf)
  1312. {
  1313. if (buf != NULL) {
  1314. size_t i;
  1315. for (i = 0; i < buf->count; i++)
  1316. free(buf->buf[i].mem);
  1317. free(buf);
  1318. }
  1319. }
  1320. int fuse_fs_read_buf(struct fuse_fs *fs, const char *path,
  1321. struct fuse_bufvec **bufp, size_t size, off_t off,
  1322. struct fuse_file_info *fi)
  1323. {
  1324. fuse_get_context()->private_data = fs->user_data;
  1325. if (fs->op.read || fs->op.read_buf) {
  1326. int res;
  1327. if (fs->debug)
  1328. fprintf(stderr,
  1329. "read[%llu] %zu bytes from %llu flags: 0x%x\n",
  1330. (unsigned long long) fi->fh,
  1331. size, (unsigned long long) off, fi->flags);
  1332. if (fs->op.read_buf) {
  1333. res = fs->op.read_buf(path, bufp, size, off, fi);
  1334. } else {
  1335. struct fuse_bufvec *buf;
  1336. void *mem;
  1337. buf = malloc(sizeof(struct fuse_bufvec));
  1338. if (buf == NULL)
  1339. return -ENOMEM;
  1340. mem = malloc(size);
  1341. if (mem == NULL) {
  1342. free(buf);
  1343. return -ENOMEM;
  1344. }
  1345. *buf = FUSE_BUFVEC_INIT(size);
  1346. buf->buf[0].mem = mem;
  1347. *bufp = buf;
  1348. res = fs->op.read(path, mem, size, off, fi);
  1349. if (res >= 0)
  1350. buf->buf[0].size = res;
  1351. }
  1352. if (fs->debug && res >= 0)
  1353. fprintf(stderr, " read[%llu] %zu bytes from %llu\n",
  1354. (unsigned long long) fi->fh,
  1355. fuse_buf_size(*bufp),
  1356. (unsigned long long) off);
  1357. if (res >= 0 && fuse_buf_size(*bufp) > (int) size)
  1358. fprintf(stderr, "fuse: read too many bytes\n");
  1359. if (res < 0)
  1360. return res;
  1361. return 0;
  1362. } else {
  1363. return -ENOSYS;
  1364. }
  1365. }
  1366. int fuse_fs_read(struct fuse_fs *fs, const char *path, char *mem, size_t size,
  1367. off_t off, struct fuse_file_info *fi)
  1368. {
  1369. int res;
  1370. struct fuse_bufvec *buf = NULL;
  1371. res = fuse_fs_read_buf(fs, path, &buf, size, off, fi);
  1372. if (res == 0) {
  1373. struct fuse_bufvec dst = FUSE_BUFVEC_INIT(size);
  1374. dst.buf[0].mem = mem;
  1375. res = fuse_buf_copy(&dst, buf, 0);
  1376. }
  1377. fuse_free_buf(buf);
  1378. return res;
  1379. }
  1380. int fuse_fs_write_buf(struct fuse_fs *fs, const char *path,
  1381. struct fuse_bufvec *buf, off_t off,
  1382. struct fuse_file_info *fi)
  1383. {
  1384. fuse_get_context()->private_data = fs->user_data;
  1385. if (fs->op.write_buf || fs->op.write) {
  1386. int res;
  1387. size_t size = fuse_buf_size(buf);
  1388. assert(buf->idx == 0 && buf->off == 0);
  1389. if (fs->debug)
  1390. fprintf(stderr,
  1391. "write%s[%llu] %zu bytes to %llu flags: 0x%x\n",
  1392. fi->writepage ? "page" : "",
  1393. (unsigned long long) fi->fh,
  1394. size,
  1395. (unsigned long long) off,
  1396. fi->flags);
  1397. if (fs->op.write_buf) {
  1398. res = fs->op.write_buf(path, buf, off, fi);
  1399. } else {
  1400. void *mem = NULL;
  1401. struct fuse_buf *flatbuf;
  1402. struct fuse_bufvec tmp = FUSE_BUFVEC_INIT(size);
  1403. if (buf->count == 1 &&
  1404. !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
  1405. flatbuf = &buf->buf[0];
  1406. } else {
  1407. res = -ENOMEM;
  1408. mem = malloc(size);
  1409. if (mem == NULL)
  1410. goto out;
  1411. tmp.buf[0].mem = mem;
  1412. res = fuse_buf_copy(&tmp, buf, 0);
  1413. if (res <= 0)
  1414. goto out_free;
  1415. tmp.buf[0].size = res;
  1416. flatbuf = &tmp.buf[0];
  1417. }
  1418. res = fs->op.write(path, flatbuf->mem, flatbuf->size,
  1419. off, fi);
  1420. out_free:
  1421. free(mem);
  1422. }
  1423. out:
  1424. if (fs->debug && res >= 0)
  1425. fprintf(stderr, " write%s[%llu] %u bytes to %llu\n",
  1426. fi->writepage ? "page" : "",
  1427. (unsigned long long) fi->fh, res,
  1428. (unsigned long long) off);
  1429. if (res > (int) size)
  1430. fprintf(stderr, "fuse: wrote too many bytes\n");
  1431. return res;
  1432. } else {
  1433. return -ENOSYS;
  1434. }
  1435. }
  1436. int fuse_fs_write(struct fuse_fs *fs, const char *path, const char *mem,
  1437. size_t size, off_t off, struct fuse_file_info *fi)
  1438. {
  1439. struct fuse_bufvec bufv = FUSE_BUFVEC_INIT(size);
  1440. bufv.buf[0].mem = (void *) mem;
  1441. return fuse_fs_write_buf(fs, path, &bufv, off, fi);
  1442. }
  1443. int fuse_fs_fsync(struct fuse_fs *fs, const char *path, int datasync,
  1444. struct fuse_file_info *fi)
  1445. {
  1446. fuse_get_context()->private_data = fs->user_data;
  1447. if (fs->op.fsync) {
  1448. if (fs->debug)
  1449. fprintf(stderr, "fsync[%llu] datasync: %i\n",
  1450. (unsigned long long) fi->fh, datasync);
  1451. return fs->op.fsync(path, datasync, fi);
  1452. } else {
  1453. return -ENOSYS;
  1454. }
  1455. }
  1456. int fuse_fs_fsyncdir(struct fuse_fs *fs, const char *path, int datasync,
  1457. struct fuse_file_info *fi)
  1458. {
  1459. fuse_get_context()->private_data = fs->user_data;
  1460. if (fs->op.fsyncdir) {
  1461. if (fs->debug)
  1462. fprintf(stderr, "fsyncdir[%llu] datasync: %i\n",
  1463. (unsigned long long) fi->fh, datasync);
  1464. return fs->op.fsyncdir(path, datasync, fi);
  1465. } else {
  1466. return -ENOSYS;
  1467. }
  1468. }
  1469. int fuse_fs_flush(struct fuse_fs *fs, const char *path,
  1470. struct fuse_file_info *fi)
  1471. {
  1472. fuse_get_context()->private_data = fs->user_data;
  1473. if (fs->op.flush) {
  1474. if (fs->debug)
  1475. fprintf(stderr, "flush[%llu]\n",
  1476. (unsigned long long) fi->fh);
  1477. return fs->op.flush(path, fi);
  1478. } else {
  1479. return -ENOSYS;
  1480. }
  1481. }
  1482. int
  1483. fuse_fs_statfs(struct fuse_fs *fs,
  1484. const char *path,
  1485. struct statvfs *buf)
  1486. {
  1487. fuse_get_context()->private_data = fs->user_data;
  1488. if(fs->debug)
  1489. fprintf(stderr, "statfs %s\n", path);
  1490. if(fs->op.statfs == NULL)
  1491. {
  1492. buf->f_namemax = 255;
  1493. buf->f_bsize = 512;
  1494. return 0;
  1495. }
  1496. return fs->op.statfs(path,buf);
  1497. }
  1498. int fuse_fs_releasedir(struct fuse_fs *fs, const char *path,
  1499. struct fuse_file_info *fi)
  1500. {
  1501. fuse_get_context()->private_data = fs->user_data;
  1502. if (fs->op.releasedir) {
  1503. if (fs->debug)
  1504. fprintf(stderr, "releasedir[%llu] flags: 0x%x\n",
  1505. (unsigned long long) fi->fh, fi->flags);
  1506. return fs->op.releasedir(path, fi);
  1507. } else {
  1508. return 0;
  1509. }
  1510. }
  1511. int
  1512. fuse_fs_readdir(struct fuse_fs *fs,
  1513. struct fuse_file_info *fi,
  1514. fuse_dirents_t *buf)
  1515. {
  1516. if(fs->op.readdir == NULL)
  1517. return -ENOSYS;
  1518. fuse_get_context()->private_data = fs->user_data;
  1519. return fs->op.readdir(fi,buf);
  1520. }
  1521. int
  1522. fuse_fs_readdir_plus(struct fuse_fs *fs_,
  1523. struct fuse_file_info *ffi_,
  1524. fuse_dirents_t *buf_)
  1525. {
  1526. if(fs_->op.readdir_plus == NULL)
  1527. return -ENOSYS;
  1528. fuse_get_context()->private_data = fs_->user_data;
  1529. return fs_->op.readdir_plus(ffi_,buf_);
  1530. }
  1531. int fuse_fs_create(struct fuse_fs *fs, const char *path, mode_t mode,
  1532. struct fuse_file_info *fi)
  1533. {
  1534. fuse_get_context()->private_data = fs->user_data;
  1535. if (fs->op.create) {
  1536. int err;
  1537. if (fs->debug)
  1538. fprintf(stderr,
  1539. "create flags: 0x%x %s 0%o umask=0%03o\n",
  1540. fi->flags, path, mode,
  1541. fuse_get_context()->umask);
  1542. err = fs->op.create(path, mode, fi);
  1543. if (fs->debug && !err)
  1544. fprintf(stderr, " create[%llu] flags: 0x%x %s\n",
  1545. (unsigned long long) fi->fh, fi->flags, path);
  1546. return err;
  1547. } else {
  1548. return -ENOSYS;
  1549. }
  1550. }
  1551. int fuse_fs_lock(struct fuse_fs *fs, const char *path,
  1552. struct fuse_file_info *fi, int cmd, struct flock *lock)
  1553. {
  1554. fuse_get_context()->private_data = fs->user_data;
  1555. if (fs->op.lock) {
  1556. if (fs->debug)
  1557. fprintf(stderr, "lock[%llu] %s %s start: %llu len: %llu pid: %llu\n",
  1558. (unsigned long long) fi->fh,
  1559. (cmd == F_GETLK ? "F_GETLK" :
  1560. (cmd == F_SETLK ? "F_SETLK" :
  1561. (cmd == F_SETLKW ? "F_SETLKW" : "???"))),
  1562. (lock->l_type == F_RDLCK ? "F_RDLCK" :
  1563. (lock->l_type == F_WRLCK ? "F_WRLCK" :
  1564. (lock->l_type == F_UNLCK ? "F_UNLCK" :
  1565. "???"))),
  1566. (unsigned long long) lock->l_start,
  1567. (unsigned long long) lock->l_len,
  1568. (unsigned long long) lock->l_pid);
  1569. return fs->op.lock(path, fi, cmd, lock);
  1570. } else {
  1571. return -ENOSYS;
  1572. }
  1573. }
  1574. int fuse_fs_flock(struct fuse_fs *fs, const char *path,
  1575. struct fuse_file_info *fi, int op)
  1576. {
  1577. fuse_get_context()->private_data = fs->user_data;
  1578. if (fs->op.flock) {
  1579. if (fs->debug) {
  1580. int xop = op & ~LOCK_NB;
  1581. fprintf(stderr, "lock[%llu] %s%s\n",
  1582. (unsigned long long) fi->fh,
  1583. xop == LOCK_SH ? "LOCK_SH" :
  1584. (xop == LOCK_EX ? "LOCK_EX" :
  1585. (xop == LOCK_UN ? "LOCK_UN" : "???")),
  1586. (op & LOCK_NB) ? "|LOCK_NB" : "");
  1587. }
  1588. return fs->op.flock(path, fi, op);
  1589. } else {
  1590. return -ENOSYS;
  1591. }
  1592. }
  1593. int fuse_fs_chown(struct fuse_fs *fs, const char *path, uid_t uid, gid_t gid)
  1594. {
  1595. fuse_get_context()->private_data = fs->user_data;
  1596. if (fs->op.chown) {
  1597. if (fs->debug)
  1598. fprintf(stderr, "chown %s %lu %lu\n", path,
  1599. (unsigned long) uid, (unsigned long) gid);
  1600. return fs->op.chown(path, uid, gid);
  1601. } else {
  1602. return -ENOSYS;
  1603. }
  1604. }
  1605. int
  1606. fuse_fs_fchown(struct fuse_fs *fs_,
  1607. const struct fuse_file_info *ffi_,
  1608. const uid_t uid_,
  1609. const gid_t gid_)
  1610. {
  1611. fuse_get_context()->private_data = fs_->user_data;
  1612. if(fs_->op.fchown)
  1613. return fs_->op.fchown(ffi_,uid_,gid_);
  1614. return -ENOSYS;
  1615. }
  1616. int fuse_fs_truncate(struct fuse_fs *fs, const char *path, off_t size)
  1617. {
  1618. fuse_get_context()->private_data = fs->user_data;
  1619. if (fs->op.truncate) {
  1620. if (fs->debug)
  1621. fprintf(stderr, "truncate %s %llu\n", path,
  1622. (unsigned long long) size);
  1623. return fs->op.truncate(path, size);
  1624. } else {
  1625. return -ENOSYS;
  1626. }
  1627. }
  1628. int fuse_fs_ftruncate(struct fuse_fs *fs, const char *path, off_t size,
  1629. struct fuse_file_info *fi)
  1630. {
  1631. fuse_get_context()->private_data = fs->user_data;
  1632. if (fs->op.ftruncate) {
  1633. if (fs->debug)
  1634. fprintf(stderr, "ftruncate[%llu] %llu\n",
  1635. (unsigned long long) fi->fh,
  1636. (unsigned long long) size);
  1637. return fs->op.ftruncate(path, size, fi);
  1638. } else if (path && fs->op.truncate) {
  1639. if (fs->debug)
  1640. fprintf(stderr, "truncate %s %llu\n", path,
  1641. (unsigned long long) size);
  1642. return fs->op.truncate(path, size);
  1643. } else {
  1644. return -ENOSYS;
  1645. }
  1646. }
  1647. int fuse_fs_utimens(struct fuse_fs *fs, const char *path,
  1648. const struct timespec tv[2])
  1649. {
  1650. fuse_get_context()->private_data = fs->user_data;
  1651. if (fs->op.utimens) {
  1652. if (fs->debug)
  1653. fprintf(stderr, "utimens %s %li.%09lu %li.%09lu\n",
  1654. path, tv[0].tv_sec, tv[0].tv_nsec,
  1655. tv[1].tv_sec, tv[1].tv_nsec);
  1656. return fs->op.utimens(path, tv);
  1657. } else if(fs->op.utime) {
  1658. struct utimbuf buf;
  1659. if (fs->debug)
  1660. fprintf(stderr, "utime %s %li %li\n", path,
  1661. tv[0].tv_sec, tv[1].tv_sec);
  1662. buf.actime = tv[0].tv_sec;
  1663. buf.modtime = tv[1].tv_sec;
  1664. return fs->op.utime(path, &buf);
  1665. } else {
  1666. return -ENOSYS;
  1667. }
  1668. }
  1669. int
  1670. fuse_fs_futimens(struct fuse_fs *fs_,
  1671. const struct fuse_file_info *ffi_,
  1672. const struct timespec tv_[2])
  1673. {
  1674. fuse_get_context()->private_data = fs_->user_data;
  1675. if(fs_->op.futimens)
  1676. return fs_->op.futimens(ffi_,tv_);
  1677. return -ENOSYS;
  1678. }
  1679. int fuse_fs_access(struct fuse_fs *fs, const char *path, int mask)
  1680. {
  1681. fuse_get_context()->private_data = fs->user_data;
  1682. if (fs->op.access) {
  1683. if (fs->debug)
  1684. fprintf(stderr, "access %s 0%o\n", path, mask);
  1685. return fs->op.access(path, mask);
  1686. } else {
  1687. return -ENOSYS;
  1688. }
  1689. }
  1690. int fuse_fs_readlink(struct fuse_fs *fs, const char *path, char *buf,
  1691. size_t len)
  1692. {
  1693. fuse_get_context()->private_data = fs->user_data;
  1694. if (fs->op.readlink) {
  1695. if (fs->debug)
  1696. fprintf(stderr, "readlink %s %lu\n", path,
  1697. (unsigned long) len);
  1698. return fs->op.readlink(path, buf, len);
  1699. } else {
  1700. return -ENOSYS;
  1701. }
  1702. }
  1703. int fuse_fs_mknod(struct fuse_fs *fs, const char *path, mode_t mode,
  1704. dev_t rdev)
  1705. {
  1706. fuse_get_context()->private_data = fs->user_data;
  1707. if (fs->op.mknod) {
  1708. if (fs->debug)
  1709. fprintf(stderr, "mknod %s 0%o 0x%llx umask=0%03o\n",
  1710. path, mode, (unsigned long long) rdev,
  1711. fuse_get_context()->umask);
  1712. return fs->op.mknod(path, mode, rdev);
  1713. } else {
  1714. return -ENOSYS;
  1715. }
  1716. }
  1717. int fuse_fs_mkdir(struct fuse_fs *fs, const char *path, mode_t mode)
  1718. {
  1719. fuse_get_context()->private_data = fs->user_data;
  1720. if (fs->op.mkdir) {
  1721. if (fs->debug)
  1722. fprintf(stderr, "mkdir %s 0%o umask=0%03o\n",
  1723. path, mode, fuse_get_context()->umask);
  1724. return fs->op.mkdir(path, mode);
  1725. } else {
  1726. return -ENOSYS;
  1727. }
  1728. }
  1729. int fuse_fs_setxattr(struct fuse_fs *fs, const char *path, const char *name,
  1730. const char *value, size_t size, int flags)
  1731. {
  1732. fuse_get_context()->private_data = fs->user_data;
  1733. if (fs->op.setxattr) {
  1734. if (fs->debug)
  1735. fprintf(stderr, "setxattr %s %s %lu 0x%x\n",
  1736. path, name, (unsigned long) size, flags);
  1737. return fs->op.setxattr(path, name, value, size, flags);
  1738. } else {
  1739. return -ENOSYS;
  1740. }
  1741. }
  1742. int fuse_fs_getxattr(struct fuse_fs *fs, const char *path, const char *name,
  1743. char *value, size_t size)
  1744. {
  1745. fuse_get_context()->private_data = fs->user_data;
  1746. if (fs->op.getxattr) {
  1747. if (fs->debug)
  1748. fprintf(stderr, "getxattr %s %s %lu\n",
  1749. path, name, (unsigned long) size);
  1750. return fs->op.getxattr(path, name, value, size);
  1751. } else {
  1752. return -ENOSYS;
  1753. }
  1754. }
  1755. int fuse_fs_listxattr(struct fuse_fs *fs, const char *path, char *list,
  1756. size_t size)
  1757. {
  1758. fuse_get_context()->private_data = fs->user_data;
  1759. if (fs->op.listxattr) {
  1760. if (fs->debug)
  1761. fprintf(stderr, "listxattr %s %lu\n",
  1762. path, (unsigned long) size);
  1763. return fs->op.listxattr(path, list, size);
  1764. } else {
  1765. return -ENOSYS;
  1766. }
  1767. }
  1768. int fuse_fs_bmap(struct fuse_fs *fs, const char *path, size_t blocksize,
  1769. uint64_t *idx)
  1770. {
  1771. fuse_get_context()->private_data = fs->user_data;
  1772. if (fs->op.bmap) {
  1773. if (fs->debug)
  1774. fprintf(stderr, "bmap %s blocksize: %lu index: %llu\n",
  1775. path, (unsigned long) blocksize,
  1776. (unsigned long long) *idx);
  1777. return fs->op.bmap(path, blocksize, idx);
  1778. } else {
  1779. return -ENOSYS;
  1780. }
  1781. }
  1782. int fuse_fs_removexattr(struct fuse_fs *fs, const char *path, const char *name)
  1783. {
  1784. fuse_get_context()->private_data = fs->user_data;
  1785. if (fs->op.removexattr) {
  1786. if (fs->debug)
  1787. fprintf(stderr, "removexattr %s %s\n", path, name);
  1788. return fs->op.removexattr(path, name);
  1789. } else {
  1790. return -ENOSYS;
  1791. }
  1792. }
  1793. int fuse_fs_ioctl(struct fuse_fs *fs, const char *path, int cmd, void *arg,
  1794. struct fuse_file_info *fi, unsigned int flags,
  1795. void *data, uint32_t *out_size)
  1796. {
  1797. fuse_get_context()->private_data = fs->user_data;
  1798. if (fs->op.ioctl) {
  1799. if (fs->debug)
  1800. fprintf(stderr, "ioctl[%llu] 0x%x flags: 0x%x\n",
  1801. (unsigned long long) fi->fh, cmd, flags);
  1802. return fs->op.ioctl(path, cmd, arg, fi, flags, data, out_size);
  1803. } else
  1804. return -ENOSYS;
  1805. }
  1806. int fuse_fs_poll(struct fuse_fs *fs, const char *path,
  1807. struct fuse_file_info *fi, struct fuse_pollhandle *ph,
  1808. unsigned *reventsp)
  1809. {
  1810. fuse_get_context()->private_data = fs->user_data;
  1811. if (fs->op.poll) {
  1812. int res;
  1813. if (fs->debug)
  1814. fprintf(stderr, "poll[%llu] ph: %p\n",
  1815. (unsigned long long) fi->fh, ph);
  1816. res = fs->op.poll(path, fi, ph, reventsp);
  1817. if (fs->debug && !res)
  1818. fprintf(stderr, " poll[%llu] revents: 0x%x\n",
  1819. (unsigned long long) fi->fh, *reventsp);
  1820. return res;
  1821. } else
  1822. return -ENOSYS;
  1823. }
  1824. int fuse_fs_fallocate(struct fuse_fs *fs, const char *path, int mode,
  1825. off_t offset, off_t length, struct fuse_file_info *fi)
  1826. {
  1827. fuse_get_context()->private_data = fs->user_data;
  1828. if (fs->op.fallocate) {
  1829. if (fs->debug)
  1830. fprintf(stderr, "fallocate %s mode %x, offset: %llu, length: %llu\n",
  1831. path,
  1832. mode,
  1833. (unsigned long long) offset,
  1834. (unsigned long long) length);
  1835. return fs->op.fallocate(path, mode, offset, length, fi);
  1836. } else
  1837. return -ENOSYS;
  1838. }
  1839. ssize_t
  1840. fuse_fs_copy_file_range(struct fuse_fs *fs_,
  1841. const char *path_in_,
  1842. struct fuse_file_info *ffi_in_,
  1843. off_t off_in_,
  1844. const char *path_out_,
  1845. struct fuse_file_info *ffi_out_,
  1846. off_t off_out_,
  1847. size_t len_,
  1848. int flags_)
  1849. {
  1850. fuse_get_context()->private_data = fs_->user_data;
  1851. if(fs_->op.copy_file_range == NULL)
  1852. return -ENOSYS;
  1853. return fs_->op.copy_file_range(path_in_,
  1854. ffi_in_,
  1855. off_in_,
  1856. path_out_,
  1857. ffi_out_,
  1858. off_out_,
  1859. len_,
  1860. flags_);
  1861. }
  1862. int
  1863. node_open(const struct node *node_)
  1864. {
  1865. return ((node_ != NULL) &&
  1866. (node_->open_count > 0));
  1867. }
  1868. #ifndef CLOCK_MONOTONIC
  1869. #define CLOCK_MONOTONIC CLOCK_REALTIME
  1870. #endif
  1871. static void curr_time(struct timespec *now)
  1872. {
  1873. static clockid_t clockid = CLOCK_MONOTONIC;
  1874. int res = clock_gettime(clockid, now);
  1875. if (res == -1 && errno == EINVAL) {
  1876. clockid = CLOCK_REALTIME;
  1877. res = clock_gettime(clockid, now);
  1878. }
  1879. if (res == -1) {
  1880. perror("fuse: clock_gettime");
  1881. abort();
  1882. }
  1883. }
  1884. static
  1885. void
  1886. update_stat(struct node *node_,
  1887. const struct stat *stnew_)
  1888. {
  1889. struct stat *stold;
  1890. stold = &node_->stat_cache;
  1891. if((node_->stat_cache_valid) &&
  1892. ((stold->st_mtim.tv_sec != stnew_->st_mtim.tv_sec) ||
  1893. (stold->st_mtim.tv_nsec != stnew_->st_mtim.tv_nsec) ||
  1894. (stold->st_size != stnew_->st_size)))
  1895. node_->stat_cache_valid = 0;
  1896. *stold = *stnew_;
  1897. }
  1898. static int lookup_path(struct fuse *f, fuse_ino_t nodeid,
  1899. const char *name, const char *path,
  1900. struct fuse_entry_param *e, struct fuse_file_info *fi)
  1901. {
  1902. int res;
  1903. memset(e, 0, sizeof(struct fuse_entry_param));
  1904. if (fi)
  1905. res = fuse_fs_fgetattr(f->fs, path, &e->attr, fi);
  1906. else
  1907. res = fuse_fs_getattr(f->fs, path, &e->attr);
  1908. if (res == 0) {
  1909. struct node *node;
  1910. node = find_node(f, nodeid, name);
  1911. if (node == NULL)
  1912. res = -ENOMEM;
  1913. else {
  1914. e->ino = node->nodeid;
  1915. e->generation = node->generation;
  1916. e->entry_timeout = f->conf.entry_timeout;
  1917. e->attr_timeout = f->conf.attr_timeout;
  1918. pthread_mutex_lock(&f->lock);
  1919. update_stat(node, &e->attr);
  1920. pthread_mutex_unlock(&f->lock);
  1921. set_stat(f, e->ino, &e->attr);
  1922. if(f->conf.debug)
  1923. fprintf(stderr,
  1924. " NODEID: %llu\n"
  1925. " GEN: %llu\n",
  1926. (unsigned long long)e->ino,
  1927. (unsigned long long)e->generation);
  1928. }
  1929. }
  1930. return res;
  1931. }
  1932. static struct fuse_context_i *fuse_get_context_internal(void)
  1933. {
  1934. struct fuse_context_i *c;
  1935. c = (struct fuse_context_i *) pthread_getspecific(fuse_context_key);
  1936. if (c == NULL) {
  1937. c = (struct fuse_context_i *)
  1938. calloc(1, sizeof(struct fuse_context_i));
  1939. if (c == NULL) {
  1940. /* This is hard to deal with properly, so just
  1941. abort. If memory is so low that the
  1942. context cannot be allocated, there's not
  1943. much hope for the filesystem anyway */
  1944. fprintf(stderr, "fuse: failed to allocate thread specific data\n");
  1945. abort();
  1946. }
  1947. pthread_setspecific(fuse_context_key, c);
  1948. }
  1949. return c;
  1950. }
  1951. static void fuse_freecontext(void *data)
  1952. {
  1953. free(data);
  1954. }
  1955. static int fuse_create_context_key(void)
  1956. {
  1957. int err = 0;
  1958. pthread_mutex_lock(&fuse_context_lock);
  1959. if (!fuse_context_ref) {
  1960. err = pthread_key_create(&fuse_context_key, fuse_freecontext);
  1961. if (err) {
  1962. fprintf(stderr, "fuse: failed to create thread specific key: %s\n",
  1963. strerror(err));
  1964. pthread_mutex_unlock(&fuse_context_lock);
  1965. return -1;
  1966. }
  1967. }
  1968. fuse_context_ref++;
  1969. pthread_mutex_unlock(&fuse_context_lock);
  1970. return 0;
  1971. }
  1972. static void fuse_delete_context_key(void)
  1973. {
  1974. pthread_mutex_lock(&fuse_context_lock);
  1975. fuse_context_ref--;
  1976. if (!fuse_context_ref) {
  1977. free(pthread_getspecific(fuse_context_key));
  1978. pthread_key_delete(fuse_context_key);
  1979. }
  1980. pthread_mutex_unlock(&fuse_context_lock);
  1981. }
  1982. static struct fuse *req_fuse_prepare(fuse_req_t req)
  1983. {
  1984. struct fuse_context_i *c = fuse_get_context_internal();
  1985. const struct fuse_ctx *ctx = fuse_req_ctx(req);
  1986. c->req = req;
  1987. c->ctx.fuse = req_fuse(req);
  1988. c->ctx.uid = ctx->uid;
  1989. c->ctx.gid = ctx->gid;
  1990. c->ctx.pid = ctx->pid;
  1991. c->ctx.umask = ctx->umask;
  1992. return c->ctx.fuse;
  1993. }
  1994. /* fuse_reply_err() uses non-negated errno values */
  1995. static
  1996. inline
  1997. void
  1998. reply_err(fuse_req_t req_,
  1999. int err_)
  2000. {
  2001. fuse_reply_err(req_,-err_);
  2002. }
  2003. static
  2004. inline
  2005. void
  2006. reply_ENOMEM(fuse_req_t req_)
  2007. {
  2008. fuse_reply_err(req_,ENOMEM);
  2009. }
  2010. static void reply_entry(fuse_req_t req, const struct fuse_entry_param *e,
  2011. int err)
  2012. {
  2013. if (!err) {
  2014. struct fuse *f = req_fuse(req);
  2015. if (fuse_reply_entry(req, e) == -ENOENT) {
  2016. /* Skip forget for negative result */
  2017. if (e->ino != 0)
  2018. forget_node(f, e->ino, 1);
  2019. }
  2020. } else
  2021. reply_err(req, err);
  2022. }
  2023. void fuse_fs_init(struct fuse_fs *fs, struct fuse_conn_info *conn)
  2024. {
  2025. fuse_get_context()->private_data = fs->user_data;
  2026. if (!fs->op.write_buf)
  2027. conn->want &= ~FUSE_CAP_SPLICE_READ;
  2028. if (!fs->op.lock)
  2029. conn->want &= ~FUSE_CAP_POSIX_LOCKS;
  2030. if (!fs->op.flock)
  2031. conn->want &= ~FUSE_CAP_FLOCK_LOCKS;
  2032. if (fs->op.init)
  2033. fs->user_data = fs->op.init(conn);
  2034. }
  2035. static void fuse_lib_init(void *data, struct fuse_conn_info *conn)
  2036. {
  2037. struct fuse *f = (struct fuse *) data;
  2038. struct fuse_context_i *c = fuse_get_context_internal();
  2039. memset(c, 0, sizeof(*c));
  2040. c->ctx.fuse = f;
  2041. conn->want |= FUSE_CAP_EXPORT_SUPPORT;
  2042. fuse_fs_init(f->fs, conn);
  2043. }
  2044. void fuse_fs_destroy(struct fuse_fs *fs)
  2045. {
  2046. fuse_get_context()->private_data = fs->user_data;
  2047. if (fs->op.destroy)
  2048. fs->op.destroy(fs->user_data);
  2049. free(fs);
  2050. }
  2051. static void fuse_lib_destroy(void *data)
  2052. {
  2053. struct fuse *f = (struct fuse *) data;
  2054. struct fuse_context_i *c = fuse_get_context_internal();
  2055. memset(c, 0, sizeof(*c));
  2056. c->ctx.fuse = f;
  2057. fuse_fs_destroy(f->fs);
  2058. f->fs = NULL;
  2059. }
  2060. static void fuse_lib_lookup(fuse_req_t req, fuse_ino_t parent,
  2061. const char *name)
  2062. {
  2063. struct fuse *f = req_fuse_prepare(req);
  2064. struct fuse_entry_param e;
  2065. char *path;
  2066. int err;
  2067. struct node *dot = NULL;
  2068. if (name[0] == '.') {
  2069. int len = strlen(name);
  2070. if (len == 1 || (name[1] == '.' && len == 2)) {
  2071. pthread_mutex_lock(&f->lock);
  2072. if (len == 1) {
  2073. if (f->conf.debug)
  2074. fprintf(stderr, "LOOKUP-DOT\n");
  2075. dot = get_node_nocheck(f, parent);
  2076. if (dot == NULL) {
  2077. pthread_mutex_unlock(&f->lock);
  2078. reply_entry(req, &e, -ESTALE);
  2079. return;
  2080. }
  2081. dot->refctr++;
  2082. } else {
  2083. if (f->conf.debug)
  2084. fprintf(stderr, "LOOKUP-DOTDOT\n");
  2085. parent = get_node(f, parent)->parent->nodeid;
  2086. }
  2087. pthread_mutex_unlock(&f->lock);
  2088. name = NULL;
  2089. }
  2090. }
  2091. err = get_path_name(f, parent, name, &path);
  2092. if (!err) {
  2093. struct fuse_intr_data d;
  2094. if (f->conf.debug)
  2095. fprintf(stderr, "LOOKUP %s\n", path);
  2096. fuse_prepare_interrupt(f, req, &d);
  2097. err = lookup_path(f, parent, name, path, &e, NULL);
  2098. if (err == -ENOENT && f->conf.negative_timeout != 0.0) {
  2099. e.ino = 0;
  2100. e.entry_timeout = f->conf.negative_timeout;
  2101. err = 0;
  2102. }
  2103. fuse_finish_interrupt(f, req, &d);
  2104. free_path(f, parent, path);
  2105. }
  2106. if (dot) {
  2107. pthread_mutex_lock(&f->lock);
  2108. unref_node(f, dot);
  2109. pthread_mutex_unlock(&f->lock);
  2110. }
  2111. reply_entry(req, &e, err);
  2112. }
  2113. static
  2114. void
  2115. do_forget(struct fuse *f,
  2116. const fuse_ino_t ino,
  2117. const uint64_t nlookup)
  2118. {
  2119. if(f->conf.debug)
  2120. fprintf(stderr,
  2121. "FORGET %llu/%llu\n",
  2122. (unsigned long long)ino,
  2123. (unsigned long long)nlookup);
  2124. forget_node(f, ino, nlookup);
  2125. }
  2126. static
  2127. void
  2128. fuse_lib_forget(fuse_req_t req,
  2129. const fuse_ino_t ino,
  2130. const uint64_t nlookup)
  2131. {
  2132. do_forget(req_fuse(req), ino, nlookup);
  2133. fuse_reply_none(req);
  2134. }
  2135. static void fuse_lib_forget_multi(fuse_req_t req, size_t count,
  2136. struct fuse_forget_data *forgets)
  2137. {
  2138. struct fuse *f = req_fuse(req);
  2139. size_t i;
  2140. for (i = 0; i < count; i++)
  2141. do_forget(f, forgets[i].ino, forgets[i].nlookup);
  2142. fuse_reply_none(req);
  2143. }
  2144. static void fuse_lib_getattr(fuse_req_t req, fuse_ino_t ino,
  2145. struct fuse_file_info *fi)
  2146. {
  2147. struct fuse *f = req_fuse_prepare(req);
  2148. struct stat buf;
  2149. char *path;
  2150. int err;
  2151. struct node *node;
  2152. struct fuse_file_info ffi = {0};
  2153. if(fi == NULL)
  2154. {
  2155. pthread_mutex_lock(&f->lock);
  2156. node = get_node(f,ino);
  2157. if(node->is_hidden)
  2158. {
  2159. fi = &ffi;
  2160. fi->fh = node->hidden_fh;
  2161. }
  2162. pthread_mutex_unlock(&f->lock);
  2163. }
  2164. memset(&buf, 0, sizeof(buf));
  2165. path = NULL;
  2166. err = (((fi == NULL) || (f->fs->op.fgetattr == NULL)) ?
  2167. get_path(f,ino,&path) :
  2168. get_path_nullok(f,ino,&path));
  2169. if (!err) {
  2170. struct fuse_intr_data d;
  2171. fuse_prepare_interrupt(f, req, &d);
  2172. err = ((fi == NULL) ?
  2173. fuse_fs_getattr(f->fs,path,&buf) :
  2174. fuse_fs_fgetattr(f->fs,path,&buf,fi));
  2175. fuse_finish_interrupt(f, req, &d);
  2176. free_path(f, ino, path);
  2177. }
  2178. if (!err) {
  2179. pthread_mutex_lock(&f->lock);
  2180. node = get_node(f, ino);
  2181. update_stat(node, &buf);
  2182. pthread_mutex_unlock(&f->lock);
  2183. set_stat(f, ino, &buf);
  2184. fuse_reply_attr(req, &buf, f->conf.attr_timeout);
  2185. } else
  2186. reply_err(req, err);
  2187. }
  2188. int fuse_fs_chmod(struct fuse_fs *fs, const char *path, mode_t mode)
  2189. {
  2190. fuse_get_context()->private_data = fs->user_data;
  2191. if (fs->op.chmod)
  2192. return fs->op.chmod(path, mode);
  2193. else
  2194. return -ENOSYS;
  2195. }
  2196. int
  2197. fuse_fs_fchmod(struct fuse_fs *fs_,
  2198. const struct fuse_file_info *ffi_,
  2199. const mode_t mode_)
  2200. {
  2201. fuse_get_context()->private_data = fs_->user_data;
  2202. if(fs_->op.fchmod)
  2203. return fs_->op.fchmod(ffi_,mode_);
  2204. return -ENOSYS;
  2205. }
  2206. static
  2207. void
  2208. fuse_lib_setattr(fuse_req_t req,
  2209. fuse_ino_t ino,
  2210. struct stat *attr,
  2211. int valid,
  2212. struct fuse_file_info *fi)
  2213. {
  2214. struct fuse *f = req_fuse_prepare(req);
  2215. struct stat buf;
  2216. char *path;
  2217. int err;
  2218. struct node *node;
  2219. struct fuse_file_info ffi = {0};
  2220. if(fi == NULL)
  2221. {
  2222. pthread_mutex_lock(&f->lock);
  2223. node = get_node(f,ino);
  2224. if(node->is_hidden)
  2225. {
  2226. fi = &ffi;
  2227. fi->fh = node->hidden_fh;
  2228. }
  2229. pthread_mutex_unlock(&f->lock);
  2230. }
  2231. memset(&buf,0,sizeof(buf));
  2232. path = NULL;
  2233. err = ((fi == NULL) ?
  2234. get_path(f,ino,&path) :
  2235. get_path_nullok(f,ino,&path));
  2236. if(!err)
  2237. {
  2238. struct fuse_intr_data d;
  2239. fuse_prepare_interrupt(f,req,&d);
  2240. err = 0;
  2241. if (!err && (valid & FATTR_MODE))
  2242. err = ((fi == NULL) ?
  2243. fuse_fs_chmod(f->fs,path,attr->st_mode) :
  2244. fuse_fs_fchmod(f->fs,fi,attr->st_mode));
  2245. if(!err && (valid & (FATTR_UID | FATTR_GID)))
  2246. {
  2247. uid_t uid = ((valid & FATTR_UID) ? attr->st_uid : (uid_t) -1);
  2248. gid_t gid = ((valid & FATTR_GID) ? attr->st_gid : (gid_t) -1);
  2249. err = ((fi == NULL) ?
  2250. fuse_fs_chown(f->fs,path,uid,gid) :
  2251. fuse_fs_fchown(f->fs,fi,uid,gid));
  2252. }
  2253. if(!err && (valid & FATTR_SIZE))
  2254. err = ((fi == NULL) ?
  2255. fuse_fs_truncate(f->fs,path,attr->st_size) :
  2256. fuse_fs_ftruncate(f->fs,path,attr->st_size,fi));
  2257. #ifdef HAVE_UTIMENSAT
  2258. if(!err && f->utime_omit_ok && (valid & (FATTR_ATIME | FATTR_MTIME)))
  2259. {
  2260. struct timespec tv[2];
  2261. tv[0].tv_sec = 0;
  2262. tv[1].tv_sec = 0;
  2263. tv[0].tv_nsec = UTIME_OMIT;
  2264. tv[1].tv_nsec = UTIME_OMIT;
  2265. if(valid & FATTR_ATIME_NOW)
  2266. tv[0].tv_nsec = UTIME_NOW;
  2267. else if(valid & FATTR_ATIME)
  2268. tv[0] = attr->st_atim;
  2269. if(valid & FATTR_MTIME_NOW)
  2270. tv[1].tv_nsec = UTIME_NOW;
  2271. else if(valid & FATTR_MTIME)
  2272. tv[1] = attr->st_mtim;
  2273. err = ((fi == NULL) ?
  2274. fuse_fs_utimens(f->fs,path,tv) :
  2275. fuse_fs_futimens(f->fs,fi,tv));
  2276. }
  2277. else
  2278. #endif
  2279. if(!err && ((valid & (FATTR_ATIME|FATTR_MTIME)) == (FATTR_ATIME|FATTR_MTIME)))
  2280. {
  2281. struct timespec tv[2];
  2282. tv[0].tv_sec = attr->st_atime;
  2283. tv[0].tv_nsec = ST_ATIM_NSEC(attr);
  2284. tv[1].tv_sec = attr->st_mtime;
  2285. tv[1].tv_nsec = ST_MTIM_NSEC(attr);
  2286. err = ((fi == NULL) ?
  2287. fuse_fs_utimens(f->fs,path,tv) :
  2288. fuse_fs_futimens(f->fs,fi,tv));
  2289. }
  2290. if(!err)
  2291. err = ((fi == NULL) ?
  2292. fuse_fs_getattr(f->fs,path,&buf) :
  2293. fuse_fs_fgetattr(f->fs,path,&buf,fi));
  2294. fuse_finish_interrupt(f,req,&d);
  2295. free_path(f,ino,path);
  2296. }
  2297. if(!err)
  2298. {
  2299. pthread_mutex_lock(&f->lock);
  2300. update_stat(get_node(f,ino),&buf);
  2301. pthread_mutex_unlock(&f->lock);
  2302. set_stat(f,ino,&buf);
  2303. fuse_reply_attr(req,&buf,f->conf.attr_timeout);
  2304. }
  2305. else
  2306. {
  2307. reply_err(req,err);
  2308. }
  2309. }
  2310. static void fuse_lib_access(fuse_req_t req, fuse_ino_t ino, int mask)
  2311. {
  2312. struct fuse *f = req_fuse_prepare(req);
  2313. char *path;
  2314. int err;
  2315. err = get_path(f, ino, &path);
  2316. if (!err) {
  2317. struct fuse_intr_data d;
  2318. fuse_prepare_interrupt(f, req, &d);
  2319. err = fuse_fs_access(f->fs, path, mask);
  2320. fuse_finish_interrupt(f, req, &d);
  2321. free_path(f, ino, path);
  2322. }
  2323. reply_err(req, err);
  2324. }
  2325. static void fuse_lib_readlink(fuse_req_t req, fuse_ino_t ino)
  2326. {
  2327. struct fuse *f = req_fuse_prepare(req);
  2328. char linkname[PATH_MAX + 1];
  2329. char *path;
  2330. int err;
  2331. err = get_path(f, ino, &path);
  2332. if (!err) {
  2333. struct fuse_intr_data d;
  2334. fuse_prepare_interrupt(f, req, &d);
  2335. err = fuse_fs_readlink(f->fs, path, linkname, sizeof(linkname));
  2336. fuse_finish_interrupt(f, req, &d);
  2337. free_path(f, ino, path);
  2338. }
  2339. if (!err) {
  2340. linkname[PATH_MAX] = '\0';
  2341. fuse_reply_readlink(req, linkname);
  2342. } else
  2343. reply_err(req, err);
  2344. }
  2345. static void fuse_lib_mknod(fuse_req_t req, fuse_ino_t parent, const char *name,
  2346. mode_t mode, dev_t rdev)
  2347. {
  2348. struct fuse *f = req_fuse_prepare(req);
  2349. struct fuse_entry_param e;
  2350. char *path;
  2351. int err;
  2352. err = get_path_name(f, parent, name, &path);
  2353. if (!err) {
  2354. struct fuse_intr_data d;
  2355. fuse_prepare_interrupt(f, req, &d);
  2356. err = -ENOSYS;
  2357. if (S_ISREG(mode)) {
  2358. struct fuse_file_info fi;
  2359. memset(&fi, 0, sizeof(fi));
  2360. fi.flags = O_CREAT | O_EXCL | O_WRONLY;
  2361. err = fuse_fs_create(f->fs, path, mode, &fi);
  2362. if (!err) {
  2363. err = lookup_path(f, parent, name, path, &e,
  2364. &fi);
  2365. fuse_fs_release(f->fs, path, &fi);
  2366. }
  2367. }
  2368. if (err == -ENOSYS) {
  2369. err = fuse_fs_mknod(f->fs, path, mode, rdev);
  2370. if (!err)
  2371. err = lookup_path(f, parent, name, path, &e,
  2372. NULL);
  2373. }
  2374. fuse_finish_interrupt(f, req, &d);
  2375. free_path(f, parent, path);
  2376. }
  2377. reply_entry(req, &e, err);
  2378. }
  2379. static void fuse_lib_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name,
  2380. mode_t mode)
  2381. {
  2382. struct fuse *f = req_fuse_prepare(req);
  2383. struct fuse_entry_param e;
  2384. char *path;
  2385. int err;
  2386. err = get_path_name(f, parent, name, &path);
  2387. if (!err) {
  2388. struct fuse_intr_data d;
  2389. fuse_prepare_interrupt(f, req, &d);
  2390. err = fuse_fs_mkdir(f->fs, path, mode);
  2391. if (!err)
  2392. err = lookup_path(f, parent, name, path, &e, NULL);
  2393. fuse_finish_interrupt(f, req, &d);
  2394. free_path(f, parent, path);
  2395. }
  2396. reply_entry(req, &e, err);
  2397. }
  2398. static
  2399. void
  2400. fuse_lib_unlink(fuse_req_t req,
  2401. fuse_ino_t parent,
  2402. const char *name)
  2403. {
  2404. int err;
  2405. char *path;
  2406. struct fuse *f;
  2407. struct node *wnode;
  2408. struct fuse_intr_data d;
  2409. f = req_fuse_prepare(req);
  2410. err = get_path_wrlock(f,parent,name,&path,&wnode);
  2411. if(!err)
  2412. {
  2413. fuse_prepare_interrupt(f,req,&d);
  2414. pthread_mutex_lock(&f->lock);
  2415. if(node_open(wnode))
  2416. {
  2417. err = fuse_fs_prepare_hide(f->fs,path,&wnode->hidden_fh);
  2418. if(!err)
  2419. wnode->is_hidden = 1;
  2420. }
  2421. pthread_mutex_unlock(&f->lock);
  2422. err = fuse_fs_unlink(f->fs,path);
  2423. if(!err)
  2424. remove_node(f,parent,name);
  2425. fuse_finish_interrupt(f,req,&d);
  2426. free_path_wrlock(f,parent,wnode,path);
  2427. }
  2428. reply_err(req,err);
  2429. }
  2430. static void fuse_lib_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name)
  2431. {
  2432. struct fuse *f = req_fuse_prepare(req);
  2433. struct node *wnode;
  2434. char *path;
  2435. int err;
  2436. err = get_path_wrlock(f, parent, name, &path, &wnode);
  2437. if (!err) {
  2438. struct fuse_intr_data d;
  2439. fuse_prepare_interrupt(f, req, &d);
  2440. err = fuse_fs_rmdir(f->fs, path);
  2441. fuse_finish_interrupt(f, req, &d);
  2442. if (!err)
  2443. remove_node(f, parent, name);
  2444. free_path_wrlock(f, parent, wnode, path);
  2445. }
  2446. reply_err(req, err);
  2447. }
  2448. static void fuse_lib_symlink(fuse_req_t req, const char *linkname,
  2449. fuse_ino_t parent, const char *name)
  2450. {
  2451. struct fuse *f = req_fuse_prepare(req);
  2452. struct fuse_entry_param e;
  2453. char *path;
  2454. int err;
  2455. err = get_path_name(f, parent, name, &path);
  2456. if (!err) {
  2457. struct fuse_intr_data d;
  2458. fuse_prepare_interrupt(f, req, &d);
  2459. err = fuse_fs_symlink(f->fs, linkname, path);
  2460. if (!err)
  2461. err = lookup_path(f, parent, name, path, &e, NULL);
  2462. fuse_finish_interrupt(f, req, &d);
  2463. free_path(f, parent, path);
  2464. }
  2465. reply_entry(req, &e, err);
  2466. }
  2467. static
  2468. void
  2469. fuse_lib_rename(fuse_req_t req,
  2470. fuse_ino_t olddir,
  2471. const char *oldname,
  2472. fuse_ino_t newdir,
  2473. const char *newname)
  2474. {
  2475. int err;
  2476. struct fuse *f;
  2477. char *oldpath;
  2478. char *newpath;
  2479. struct node *wnode1;
  2480. struct node *wnode2;
  2481. struct fuse_intr_data d;
  2482. f = req_fuse_prepare(req);
  2483. err = get_path2(f,olddir,oldname,newdir,newname,
  2484. &oldpath,&newpath,&wnode1,&wnode2);
  2485. if(!err)
  2486. {
  2487. fuse_prepare_interrupt(f,req,&d);
  2488. pthread_mutex_lock(&f->lock);
  2489. if(node_open(wnode2))
  2490. {
  2491. err = fuse_fs_prepare_hide(f->fs,newpath,&wnode2->hidden_fh);
  2492. if(!err)
  2493. wnode2->is_hidden = 1;
  2494. }
  2495. pthread_mutex_unlock(&f->lock);
  2496. err = fuse_fs_rename(f->fs,oldpath,newpath);
  2497. if(!err)
  2498. err = rename_node(f,olddir,oldname,newdir,newname);
  2499. fuse_finish_interrupt(f,req,&d);
  2500. free_path2(f,olddir,newdir,wnode1,wnode2,oldpath,newpath);
  2501. }
  2502. reply_err(req,err);
  2503. }
  2504. static void fuse_lib_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent,
  2505. const char *newname)
  2506. {
  2507. struct fuse *f = req_fuse_prepare(req);
  2508. struct fuse_entry_param e;
  2509. char *oldpath;
  2510. char *newpath;
  2511. int err;
  2512. err = get_path2(f, ino, NULL, newparent, newname,
  2513. &oldpath, &newpath, NULL, NULL);
  2514. if (!err) {
  2515. struct fuse_intr_data d;
  2516. fuse_prepare_interrupt(f, req, &d);
  2517. err = fuse_fs_link(f->fs, oldpath, newpath);
  2518. if (!err)
  2519. err = lookup_path(f, newparent, newname, newpath,
  2520. &e, NULL);
  2521. fuse_finish_interrupt(f, req, &d);
  2522. free_path2(f, ino, newparent, NULL, NULL, oldpath, newpath);
  2523. }
  2524. reply_entry(req, &e, err);
  2525. }
  2526. static void fuse_do_release(struct fuse *f, fuse_ino_t ino, const char *path,
  2527. struct fuse_file_info *fi)
  2528. {
  2529. struct node *node;
  2530. uint64_t fh;
  2531. int was_hidden;
  2532. const char *compatpath;
  2533. fh = 0;
  2534. if (path != NULL || f->nullpath_ok || f->conf.nopath)
  2535. compatpath = path;
  2536. else
  2537. compatpath = "-";
  2538. fuse_fs_release(f->fs, compatpath, fi);
  2539. pthread_mutex_lock(&f->lock);
  2540. node = get_node(f, ino);
  2541. assert(node->open_count > 0);
  2542. node->open_count--;
  2543. was_hidden = 0;
  2544. if (node->is_hidden && (node->open_count == 0)) {
  2545. was_hidden = 1;
  2546. node->is_hidden = 0;
  2547. fh = node->hidden_fh;
  2548. }
  2549. pthread_mutex_unlock(&f->lock);
  2550. if(was_hidden)
  2551. fuse_fs_free_hide(f->fs,fh);
  2552. }
  2553. static
  2554. void
  2555. fuse_lib_create(fuse_req_t req,
  2556. fuse_ino_t parent,
  2557. const char *name,
  2558. mode_t mode,
  2559. struct fuse_file_info *fi)
  2560. {
  2561. int err;
  2562. char *path;
  2563. struct fuse *f;
  2564. struct fuse_intr_data d;
  2565. struct fuse_entry_param e;
  2566. f = req_fuse_prepare(req);
  2567. err = get_path_name(f, parent, name, &path);
  2568. if(!err)
  2569. {
  2570. fuse_prepare_interrupt(f, req, &d);
  2571. err = fuse_fs_create(f->fs, path, mode, fi);
  2572. if(!err)
  2573. {
  2574. err = lookup_path(f, parent, name, path, &e, fi);
  2575. if(err)
  2576. {
  2577. fuse_fs_release(f->fs, path, fi);
  2578. }
  2579. else if(!S_ISREG(e.attr.st_mode))
  2580. {
  2581. err = -EIO;
  2582. fuse_fs_release(f->fs, path, fi);
  2583. forget_node(f, e.ino, 1);
  2584. }
  2585. }
  2586. fuse_finish_interrupt(f, req, &d);
  2587. }
  2588. if(!err)
  2589. {
  2590. pthread_mutex_lock(&f->lock);
  2591. get_node(f,e.ino)->open_count++;
  2592. pthread_mutex_unlock(&f->lock);
  2593. if (fuse_reply_create(req, &e, fi) == -ENOENT) {
  2594. /* The open syscall was interrupted, so it
  2595. must be cancelled */
  2596. fuse_do_release(f, e.ino, path, fi);
  2597. forget_node(f, e.ino, 1);
  2598. }
  2599. }
  2600. else
  2601. {
  2602. reply_err(req, err);
  2603. }
  2604. free_path(f, parent, path);
  2605. }
  2606. static double diff_timespec(const struct timespec *t1,
  2607. const struct timespec *t2)
  2608. {
  2609. return (t1->tv_sec - t2->tv_sec) +
  2610. ((double) t1->tv_nsec - (double) t2->tv_nsec) / 1000000000.0;
  2611. }
  2612. static
  2613. void
  2614. open_auto_cache(struct fuse *f,
  2615. fuse_ino_t ino,
  2616. const char *path,
  2617. struct fuse_file_info *fi)
  2618. {
  2619. struct node *node;
  2620. pthread_mutex_lock(&f->lock);
  2621. node = get_node(f,ino);
  2622. if(node->stat_cache_valid)
  2623. {
  2624. int err;
  2625. struct stat stbuf;
  2626. pthread_mutex_unlock(&f->lock);
  2627. err = fuse_fs_fgetattr(f->fs,path,&stbuf,fi);
  2628. pthread_mutex_lock(&f->lock);
  2629. if(!err)
  2630. update_stat(node,&stbuf);
  2631. else
  2632. node->stat_cache_valid = 0;
  2633. }
  2634. if(node->stat_cache_valid)
  2635. fi->keep_cache = 1;
  2636. node->stat_cache_valid = 1;
  2637. pthread_mutex_unlock(&f->lock);
  2638. }
  2639. static
  2640. void
  2641. fuse_lib_open(fuse_req_t req,
  2642. fuse_ino_t ino,
  2643. struct fuse_file_info *fi)
  2644. {
  2645. int err;
  2646. char *path;
  2647. struct fuse *f;
  2648. struct fuse_intr_data d;
  2649. f = req_fuse_prepare(req);
  2650. err = get_path(f, ino, &path);
  2651. if(!err)
  2652. {
  2653. fuse_prepare_interrupt(f, req, &d);
  2654. err = fuse_fs_open(f->fs, path, fi);
  2655. if(!err)
  2656. {
  2657. if (fi && fi->auto_cache)
  2658. open_auto_cache(f, ino, path, fi);
  2659. }
  2660. fuse_finish_interrupt(f, req, &d);
  2661. }
  2662. if(!err)
  2663. {
  2664. pthread_mutex_lock(&f->lock);
  2665. get_node(f,ino)->open_count++;
  2666. pthread_mutex_unlock(&f->lock);
  2667. /* The open syscall was interrupted, so it must be cancelled */
  2668. if(fuse_reply_open(req, fi) == -ENOENT)
  2669. fuse_do_release(f, ino, path, fi);
  2670. }
  2671. else
  2672. {
  2673. reply_err(req, err);
  2674. }
  2675. free_path(f, ino, path);
  2676. }
  2677. static void fuse_lib_read(fuse_req_t req, fuse_ino_t ino, size_t size,
  2678. off_t off, struct fuse_file_info *fi)
  2679. {
  2680. struct fuse *f = req_fuse_prepare(req);
  2681. struct fuse_bufvec *buf = NULL;
  2682. char *path;
  2683. int res;
  2684. res = get_path_nullok(f, ino, &path);
  2685. if (res == 0) {
  2686. struct fuse_intr_data d;
  2687. fuse_prepare_interrupt(f, req, &d);
  2688. res = fuse_fs_read_buf(f->fs, path, &buf, size, off, fi);
  2689. fuse_finish_interrupt(f, req, &d);
  2690. free_path(f, ino, path);
  2691. }
  2692. if (res == 0)
  2693. fuse_reply_data(req, buf, FUSE_BUF_SPLICE_MOVE);
  2694. else
  2695. reply_err(req, res);
  2696. fuse_free_buf(buf);
  2697. }
  2698. static void fuse_lib_write_buf(fuse_req_t req, fuse_ino_t ino,
  2699. struct fuse_bufvec *buf, off_t off,
  2700. struct fuse_file_info *fi)
  2701. {
  2702. struct fuse *f = req_fuse_prepare(req);
  2703. char *path;
  2704. int res;
  2705. res = get_path_nullok(f, ino, &path);
  2706. if (res == 0) {
  2707. struct fuse_intr_data d;
  2708. fuse_prepare_interrupt(f, req, &d);
  2709. res = fuse_fs_write_buf(f->fs, path, buf, off, fi);
  2710. fuse_finish_interrupt(f, req, &d);
  2711. free_path(f, ino, path);
  2712. }
  2713. if (res >= 0)
  2714. fuse_reply_write(req, res);
  2715. else
  2716. reply_err(req, res);
  2717. }
  2718. static void fuse_lib_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
  2719. struct fuse_file_info *fi)
  2720. {
  2721. struct fuse *f = req_fuse_prepare(req);
  2722. char *path;
  2723. int err;
  2724. err = get_path_nullok(f, ino, &path);
  2725. if (!err) {
  2726. struct fuse_intr_data d;
  2727. fuse_prepare_interrupt(f, req, &d);
  2728. err = fuse_fs_fsync(f->fs, path, datasync, fi);
  2729. fuse_finish_interrupt(f, req, &d);
  2730. free_path(f, ino, path);
  2731. }
  2732. reply_err(req, err);
  2733. }
  2734. static struct fuse_dh *get_dirhandle(const struct fuse_file_info *llfi,
  2735. struct fuse_file_info *fi)
  2736. {
  2737. struct fuse_dh *dh = (struct fuse_dh *) (uintptr_t) llfi->fh;
  2738. memset(fi, 0, sizeof(struct fuse_file_info));
  2739. fi->fh = dh->fh;
  2740. return dh;
  2741. }
  2742. static
  2743. void
  2744. fuse_lib_opendir(fuse_req_t req,
  2745. fuse_ino_t ino,
  2746. struct fuse_file_info *llfi)
  2747. {
  2748. struct fuse *f = req_fuse_prepare(req);
  2749. struct fuse_intr_data d;
  2750. struct fuse_dh *dh;
  2751. struct fuse_file_info fi;
  2752. char *path;
  2753. int err;
  2754. dh = (struct fuse_dh*)calloc(1,sizeof(struct fuse_dh));
  2755. if(dh == NULL)
  2756. return reply_ENOMEM(req);
  2757. fuse_dirents_init(&dh->d);
  2758. fuse_mutex_init(&dh->lock);
  2759. llfi->fh = (uintptr_t)dh;
  2760. memset(&fi,0,sizeof(fi));
  2761. fi.flags = llfi->flags;
  2762. err = get_path(f, ino, &path);
  2763. if (!err) {
  2764. fuse_prepare_interrupt(f, req, &d);
  2765. err = fuse_fs_opendir(f->fs, path, &fi);
  2766. fuse_finish_interrupt(f, req, &d);
  2767. dh->fh = fi.fh;
  2768. llfi->keep_cache = fi.keep_cache;
  2769. llfi->cache_readdir = fi.cache_readdir;
  2770. }
  2771. if (!err) {
  2772. if (fuse_reply_open(req, llfi) == -ENOENT) {
  2773. /* The opendir syscall was interrupted, so it
  2774. must be cancelled */
  2775. fuse_fs_releasedir(f->fs, path, &fi);
  2776. pthread_mutex_destroy(&dh->lock);
  2777. free(dh);
  2778. }
  2779. } else {
  2780. reply_err(req, err);
  2781. pthread_mutex_destroy(&dh->lock);
  2782. free(dh);
  2783. }
  2784. free_path(f, ino, path);
  2785. }
  2786. static
  2787. int
  2788. readdir_fill(struct fuse *f_,
  2789. fuse_req_t req_,
  2790. fuse_dirents_t *d_,
  2791. struct fuse_file_info *fi_)
  2792. {
  2793. int rv;
  2794. struct fuse_intr_data intr_data;
  2795. fuse_prepare_interrupt(f_,req_,&intr_data);
  2796. rv = fuse_fs_readdir(f_->fs,fi_,d_);
  2797. fuse_finish_interrupt(f_,req_,&intr_data);
  2798. return rv;
  2799. }
  2800. static
  2801. int
  2802. readdir_plus_fill(struct fuse *f_,
  2803. fuse_req_t req_,
  2804. fuse_dirents_t *d_,
  2805. struct fuse_file_info *fi_)
  2806. {
  2807. int rv;
  2808. struct fuse_intr_data intr_data;
  2809. fuse_prepare_interrupt(f_,req_,&intr_data);
  2810. rv = fuse_fs_readdir_plus(f_->fs,fi_,d_);
  2811. fuse_finish_interrupt(f_,req_,&intr_data);
  2812. return rv;
  2813. }
  2814. static
  2815. uint64_t
  2816. convert_plus2normal(fuse_dirents_t *d_,
  2817. uint64_t off_)
  2818. {
  2819. uint64_t ino;
  2820. fuse_dirent_t *d;
  2821. fuse_direntplus_t *dp;
  2822. dp = (fuse_direntplus_t*)&d_->buf[off_];
  2823. ino = dp->dirent.ino;
  2824. fuse_dirents_convert_plus2normal(d_);
  2825. d = fuse_dirents_find(d_,ino);
  2826. return d->off;
  2827. }
  2828. static
  2829. void
  2830. fuse_lib_readdir(fuse_req_t req_,
  2831. fuse_ino_t ino_,
  2832. size_t size_,
  2833. off_t off_,
  2834. struct fuse_file_info *llffi_)
  2835. {
  2836. int rv;
  2837. struct fuse *f;
  2838. fuse_dirents_t *d;
  2839. struct fuse_dh *dh;
  2840. struct fuse_file_info fi;
  2841. f = req_fuse_prepare(req_);
  2842. dh = get_dirhandle(llffi_,&fi);
  2843. d = &dh->d;
  2844. pthread_mutex_lock(&dh->lock);
  2845. rv = 0;
  2846. if(off_ == 0)
  2847. rv = readdir_fill(f,req_,d,&fi);
  2848. if(rv)
  2849. {
  2850. reply_err(req_,rv);
  2851. goto out;
  2852. }
  2853. if(off_ >= d->data_len)
  2854. size_ = 0;
  2855. else if((off_ + size_) > d->data_len)
  2856. size_ = (d->data_len - off_);
  2857. /* if((size_ > 0) && (d->type == PLUS)) */
  2858. /* off_ = convert_plus2normal(d,off_); */
  2859. fuse_reply_buf(req_,
  2860. &d->buf[off_],
  2861. size_);
  2862. out:
  2863. pthread_mutex_unlock(&dh->lock);
  2864. }
  2865. static
  2866. void
  2867. fuse_lib_readdir_plus(fuse_req_t req_,
  2868. fuse_ino_t ino_,
  2869. size_t size_,
  2870. off_t off_,
  2871. struct fuse_file_info *llffi_)
  2872. {
  2873. int rv;
  2874. struct fuse *f;
  2875. fuse_dirents_t *d;
  2876. struct fuse_dh *dh;
  2877. struct fuse_file_info fi;
  2878. f = req_fuse_prepare(req_);
  2879. dh = get_dirhandle(llffi_,&fi);
  2880. d = &dh->d;
  2881. pthread_mutex_lock(&dh->lock);
  2882. rv = 0;
  2883. if(off_ == 0)
  2884. rv = readdir_plus_fill(f,req_,d,&fi);
  2885. if(rv)
  2886. {
  2887. reply_err(req_,rv);
  2888. goto out;
  2889. }
  2890. if(off_ >= d->data_len)
  2891. size_ = 0;
  2892. else if((off_ + size_) > d->data_len)
  2893. size_ = (d->data_len - off_);
  2894. fuse_reply_buf(req_,
  2895. &d->buf[off_],
  2896. size_);
  2897. out:
  2898. pthread_mutex_unlock(&dh->lock);
  2899. }
  2900. static
  2901. void
  2902. fuse_lib_releasedir(fuse_req_t req_,
  2903. fuse_ino_t ino_,
  2904. struct fuse_file_info *llfi_)
  2905. {
  2906. struct fuse *f;
  2907. struct fuse_dh *dh;
  2908. struct fuse_intr_data d;
  2909. struct fuse_file_info fi;
  2910. char *path;
  2911. const char *compatpath;
  2912. f = req_fuse_prepare(req_);
  2913. dh = get_dirhandle(llfi_,&fi);
  2914. get_path_nullok(f,ino_,&path);
  2915. if (path != NULL || f->nullpath_ok || f->conf.nopath)
  2916. compatpath = path;
  2917. else
  2918. compatpath = "-";
  2919. fuse_prepare_interrupt(f,req_,&d);
  2920. fuse_fs_releasedir(f->fs,compatpath,&fi);
  2921. fuse_finish_interrupt(f,req_,&d);
  2922. free_path(f,ino_,path);
  2923. /* Done to keep race condition between last readdir reply and the unlock */
  2924. pthread_mutex_lock(&dh->lock);
  2925. pthread_mutex_unlock(&dh->lock);
  2926. pthread_mutex_destroy(&dh->lock);
  2927. fuse_dirents_free(&dh->d);
  2928. free(dh);
  2929. reply_err(req_,0);
  2930. }
  2931. static void fuse_lib_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync,
  2932. struct fuse_file_info *llfi)
  2933. {
  2934. struct fuse *f = req_fuse_prepare(req);
  2935. struct fuse_file_info fi;
  2936. char *path;
  2937. int err;
  2938. get_dirhandle(llfi, &fi);
  2939. err = get_path_nullok(f, ino, &path);
  2940. if (!err) {
  2941. struct fuse_intr_data d;
  2942. fuse_prepare_interrupt(f, req, &d);
  2943. err = fuse_fs_fsyncdir(f->fs, path, datasync, &fi);
  2944. fuse_finish_interrupt(f, req, &d);
  2945. free_path(f, ino, path);
  2946. }
  2947. reply_err(req, err);
  2948. }
  2949. static void fuse_lib_statfs(fuse_req_t req, fuse_ino_t ino)
  2950. {
  2951. struct fuse *f = req_fuse_prepare(req);
  2952. struct statvfs buf;
  2953. char *path = NULL;
  2954. int err = 0;
  2955. memset(&buf, 0, sizeof(buf));
  2956. if (ino)
  2957. err = get_path(f, ino, &path);
  2958. if (!err) {
  2959. struct fuse_intr_data d;
  2960. fuse_prepare_interrupt(f, req, &d);
  2961. err = fuse_fs_statfs(f->fs, path ? path : "/", &buf);
  2962. fuse_finish_interrupt(f, req, &d);
  2963. free_path(f, ino, path);
  2964. }
  2965. if (!err)
  2966. fuse_reply_statfs(req, &buf);
  2967. else
  2968. reply_err(req, err);
  2969. }
  2970. static void fuse_lib_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  2971. const char *value, size_t size, int flags)
  2972. {
  2973. struct fuse *f = req_fuse_prepare(req);
  2974. char *path;
  2975. int err;
  2976. err = get_path(f, ino, &path);
  2977. if (!err) {
  2978. struct fuse_intr_data d;
  2979. fuse_prepare_interrupt(f, req, &d);
  2980. err = fuse_fs_setxattr(f->fs, path, name, value, size, flags);
  2981. fuse_finish_interrupt(f, req, &d);
  2982. free_path(f, ino, path);
  2983. }
  2984. reply_err(req, err);
  2985. }
  2986. static int common_getxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  2987. const char *name, char *value, size_t size)
  2988. {
  2989. int err;
  2990. char *path;
  2991. err = get_path(f, ino, &path);
  2992. if (!err) {
  2993. struct fuse_intr_data d;
  2994. fuse_prepare_interrupt(f, req, &d);
  2995. err = fuse_fs_getxattr(f->fs, path, name, value, size);
  2996. fuse_finish_interrupt(f, req, &d);
  2997. free_path(f, ino, path);
  2998. }
  2999. return err;
  3000. }
  3001. static void fuse_lib_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
  3002. size_t size)
  3003. {
  3004. int res;
  3005. struct fuse *f = req_fuse_prepare(req);
  3006. if(size)
  3007. {
  3008. char *value = (char*)malloc(size);
  3009. if(value == NULL)
  3010. return reply_ENOMEM(req);
  3011. res = common_getxattr(f, req, ino, name, value, size);
  3012. if(res > 0)
  3013. fuse_reply_buf(req, value, res);
  3014. else
  3015. reply_err(req, res);
  3016. free(value);
  3017. }
  3018. else
  3019. {
  3020. res = common_getxattr(f, req, ino, name, NULL, 0);
  3021. if(res >= 0)
  3022. fuse_reply_xattr(req, res);
  3023. else
  3024. reply_err(req, res);
  3025. }
  3026. }
  3027. static int common_listxattr(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3028. char *list, size_t size)
  3029. {
  3030. char *path;
  3031. int err;
  3032. err = get_path(f, ino, &path);
  3033. if (!err) {
  3034. struct fuse_intr_data d;
  3035. fuse_prepare_interrupt(f, req, &d);
  3036. err = fuse_fs_listxattr(f->fs, path, list, size);
  3037. fuse_finish_interrupt(f, req, &d);
  3038. free_path(f, ino, path);
  3039. }
  3040. return err;
  3041. }
  3042. static void fuse_lib_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
  3043. {
  3044. struct fuse *f = req_fuse_prepare(req);
  3045. int res;
  3046. if(size)
  3047. {
  3048. char *list = (char*)malloc(size);
  3049. if(list == NULL)
  3050. return reply_ENOMEM(req);
  3051. res = common_listxattr(f, req, ino, list, size);
  3052. if (res > 0)
  3053. fuse_reply_buf(req, list, res);
  3054. else
  3055. reply_err(req, res);
  3056. free(list);
  3057. }
  3058. else
  3059. {
  3060. res = common_listxattr(f, req, ino, NULL, 0);
  3061. if (res >= 0)
  3062. fuse_reply_xattr(req, res);
  3063. else
  3064. reply_err(req, res);
  3065. }
  3066. }
  3067. static void fuse_lib_removexattr(fuse_req_t req, fuse_ino_t ino,
  3068. const char *name)
  3069. {
  3070. struct fuse *f = req_fuse_prepare(req);
  3071. char *path;
  3072. int err;
  3073. err = get_path(f, ino, &path);
  3074. if (!err) {
  3075. struct fuse_intr_data d;
  3076. fuse_prepare_interrupt(f, req, &d);
  3077. err = fuse_fs_removexattr(f->fs, path, name);
  3078. fuse_finish_interrupt(f, req, &d);
  3079. free_path(f, ino, path);
  3080. }
  3081. reply_err(req, err);
  3082. }
  3083. static
  3084. void
  3085. fuse_lib_copy_file_range(fuse_req_t req_,
  3086. fuse_ino_t nodeid_in_,
  3087. off_t off_in_,
  3088. struct fuse_file_info *ffi_in_,
  3089. fuse_ino_t nodeid_out_,
  3090. off_t off_out_,
  3091. struct fuse_file_info *ffi_out_,
  3092. size_t len_,
  3093. int flags_)
  3094. {
  3095. int err;
  3096. ssize_t rv;
  3097. char *path_in;
  3098. char *path_out;
  3099. struct fuse *f;
  3100. struct fuse_intr_data d;
  3101. f = req_fuse_prepare(req_);
  3102. err = get_path_nullok(f,nodeid_in_,&path_in);
  3103. if(err)
  3104. return reply_err(req_,err);
  3105. err = get_path_nullok(f,nodeid_out_,&path_out);
  3106. if(err)
  3107. {
  3108. free_path(f,nodeid_in_,path_in);
  3109. return reply_err(req_,err);
  3110. }
  3111. fuse_prepare_interrupt(f,req_,&d);
  3112. rv = fuse_fs_copy_file_range(f->fs,
  3113. path_in,
  3114. ffi_in_,
  3115. off_in_,
  3116. path_out,
  3117. ffi_out_,
  3118. off_out_,
  3119. len_,
  3120. flags_);
  3121. fuse_finish_interrupt(f,req_,&d);
  3122. if(rv >= 0)
  3123. fuse_reply_write(req_,rv);
  3124. else
  3125. reply_err(req_,rv);
  3126. free_path(f,nodeid_in_,path_in);
  3127. free_path(f,nodeid_out_,path_out);
  3128. }
  3129. static struct lock *locks_conflict(struct node *node, const struct lock *lock)
  3130. {
  3131. struct lock *l;
  3132. for (l = node->locks; l; l = l->next)
  3133. if (l->owner != lock->owner &&
  3134. lock->start <= l->end && l->start <= lock->end &&
  3135. (l->type == F_WRLCK || lock->type == F_WRLCK))
  3136. break;
  3137. return l;
  3138. }
  3139. static void delete_lock(struct lock **lockp)
  3140. {
  3141. struct lock *l = *lockp;
  3142. *lockp = l->next;
  3143. free(l);
  3144. }
  3145. static void insert_lock(struct lock **pos, struct lock *lock)
  3146. {
  3147. lock->next = *pos;
  3148. *pos = lock;
  3149. }
  3150. static int locks_insert(struct node *node, struct lock *lock)
  3151. {
  3152. struct lock **lp;
  3153. struct lock *newl1 = NULL;
  3154. struct lock *newl2 = NULL;
  3155. if (lock->type != F_UNLCK || lock->start != 0 ||
  3156. lock->end != OFFSET_MAX) {
  3157. newl1 = malloc(sizeof(struct lock));
  3158. newl2 = malloc(sizeof(struct lock));
  3159. if (!newl1 || !newl2) {
  3160. free(newl1);
  3161. free(newl2);
  3162. return -ENOLCK;
  3163. }
  3164. }
  3165. for (lp = &node->locks; *lp;) {
  3166. struct lock *l = *lp;
  3167. if (l->owner != lock->owner)
  3168. goto skip;
  3169. if (lock->type == l->type) {
  3170. if (l->end < lock->start - 1)
  3171. goto skip;
  3172. if (lock->end < l->start - 1)
  3173. break;
  3174. if (l->start <= lock->start && lock->end <= l->end)
  3175. goto out;
  3176. if (l->start < lock->start)
  3177. lock->start = l->start;
  3178. if (lock->end < l->end)
  3179. lock->end = l->end;
  3180. goto delete;
  3181. } else {
  3182. if (l->end < lock->start)
  3183. goto skip;
  3184. if (lock->end < l->start)
  3185. break;
  3186. if (lock->start <= l->start && l->end <= lock->end)
  3187. goto delete;
  3188. if (l->end <= lock->end) {
  3189. l->end = lock->start - 1;
  3190. goto skip;
  3191. }
  3192. if (lock->start <= l->start) {
  3193. l->start = lock->end + 1;
  3194. break;
  3195. }
  3196. *newl2 = *l;
  3197. newl2->start = lock->end + 1;
  3198. l->end = lock->start - 1;
  3199. insert_lock(&l->next, newl2);
  3200. newl2 = NULL;
  3201. }
  3202. skip:
  3203. lp = &l->next;
  3204. continue;
  3205. delete:
  3206. delete_lock(lp);
  3207. }
  3208. if (lock->type != F_UNLCK) {
  3209. *newl1 = *lock;
  3210. insert_lock(lp, newl1);
  3211. newl1 = NULL;
  3212. }
  3213. out:
  3214. free(newl1);
  3215. free(newl2);
  3216. return 0;
  3217. }
  3218. static void flock_to_lock(struct flock *flock, struct lock *lock)
  3219. {
  3220. memset(lock, 0, sizeof(struct lock));
  3221. lock->type = flock->l_type;
  3222. lock->start = flock->l_start;
  3223. lock->end =
  3224. flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
  3225. lock->pid = flock->l_pid;
  3226. }
  3227. static void lock_to_flock(struct lock *lock, struct flock *flock)
  3228. {
  3229. flock->l_type = lock->type;
  3230. flock->l_start = lock->start;
  3231. flock->l_len =
  3232. (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
  3233. flock->l_pid = lock->pid;
  3234. }
  3235. static int fuse_flush_common(struct fuse *f, fuse_req_t req, fuse_ino_t ino,
  3236. const char *path, struct fuse_file_info *fi)
  3237. {
  3238. struct fuse_intr_data d;
  3239. struct flock lock;
  3240. struct lock l;
  3241. int err;
  3242. int errlock;
  3243. fuse_prepare_interrupt(f, req, &d);
  3244. memset(&lock, 0, sizeof(lock));
  3245. lock.l_type = F_UNLCK;
  3246. lock.l_whence = SEEK_SET;
  3247. err = fuse_fs_flush(f->fs, path, fi);
  3248. errlock = fuse_fs_lock(f->fs, path, fi, F_SETLK, &lock);
  3249. fuse_finish_interrupt(f, req, &d);
  3250. if (errlock != -ENOSYS) {
  3251. flock_to_lock(&lock, &l);
  3252. l.owner = fi->lock_owner;
  3253. pthread_mutex_lock(&f->lock);
  3254. locks_insert(get_node(f, ino), &l);
  3255. pthread_mutex_unlock(&f->lock);
  3256. /* if op.lock() is defined FLUSH is needed regardless
  3257. of op.flush() */
  3258. if (err == -ENOSYS)
  3259. err = 0;
  3260. }
  3261. return err;
  3262. }
  3263. static void fuse_lib_release(fuse_req_t req, fuse_ino_t ino,
  3264. struct fuse_file_info *fi)
  3265. {
  3266. struct fuse *f = req_fuse_prepare(req);
  3267. struct fuse_intr_data d;
  3268. char *path;
  3269. int err = 0;
  3270. get_path_nullok(f, ino, &path);
  3271. if (fi->flush) {
  3272. err = fuse_flush_common(f, req, ino, path, fi);
  3273. if (err == -ENOSYS)
  3274. err = 0;
  3275. }
  3276. fuse_prepare_interrupt(f, req, &d);
  3277. fuse_do_release(f, ino, path, fi);
  3278. fuse_finish_interrupt(f, req, &d);
  3279. free_path(f, ino, path);
  3280. reply_err(req, err);
  3281. }
  3282. static void fuse_lib_flush(fuse_req_t req, fuse_ino_t ino,
  3283. struct fuse_file_info *fi)
  3284. {
  3285. struct fuse *f = req_fuse_prepare(req);
  3286. char *path;
  3287. int err;
  3288. get_path_nullok(f, ino, &path);
  3289. err = fuse_flush_common(f, req, ino, path, fi);
  3290. free_path(f, ino, path);
  3291. reply_err(req, err);
  3292. }
  3293. static int fuse_lock_common(fuse_req_t req, fuse_ino_t ino,
  3294. struct fuse_file_info *fi, struct flock *lock,
  3295. int cmd)
  3296. {
  3297. struct fuse *f = req_fuse_prepare(req);
  3298. char *path;
  3299. int err;
  3300. err = get_path_nullok(f, ino, &path);
  3301. if (!err) {
  3302. struct fuse_intr_data d;
  3303. fuse_prepare_interrupt(f, req, &d);
  3304. err = fuse_fs_lock(f->fs, path, fi, cmd, lock);
  3305. fuse_finish_interrupt(f, req, &d);
  3306. free_path(f, ino, path);
  3307. }
  3308. return err;
  3309. }
  3310. static void fuse_lib_getlk(fuse_req_t req, fuse_ino_t ino,
  3311. struct fuse_file_info *fi, struct flock *lock)
  3312. {
  3313. int err;
  3314. struct lock l;
  3315. struct lock *conflict;
  3316. struct fuse *f = req_fuse(req);
  3317. flock_to_lock(lock, &l);
  3318. l.owner = fi->lock_owner;
  3319. pthread_mutex_lock(&f->lock);
  3320. conflict = locks_conflict(get_node(f, ino), &l);
  3321. if (conflict)
  3322. lock_to_flock(conflict, lock);
  3323. pthread_mutex_unlock(&f->lock);
  3324. if (!conflict)
  3325. err = fuse_lock_common(req, ino, fi, lock, F_GETLK);
  3326. else
  3327. err = 0;
  3328. if (!err)
  3329. fuse_reply_lock(req, lock);
  3330. else
  3331. reply_err(req, err);
  3332. }
  3333. static void fuse_lib_setlk(fuse_req_t req, fuse_ino_t ino,
  3334. struct fuse_file_info *fi, struct flock *lock,
  3335. int sleep)
  3336. {
  3337. int err = fuse_lock_common(req, ino, fi, lock,
  3338. sleep ? F_SETLKW : F_SETLK);
  3339. if (!err) {
  3340. struct fuse *f = req_fuse(req);
  3341. struct lock l;
  3342. flock_to_lock(lock, &l);
  3343. l.owner = fi->lock_owner;
  3344. pthread_mutex_lock(&f->lock);
  3345. locks_insert(get_node(f, ino), &l);
  3346. pthread_mutex_unlock(&f->lock);
  3347. }
  3348. reply_err(req, err);
  3349. }
  3350. static void fuse_lib_flock(fuse_req_t req, fuse_ino_t ino,
  3351. struct fuse_file_info *fi, int op)
  3352. {
  3353. struct fuse *f = req_fuse_prepare(req);
  3354. char *path;
  3355. int err;
  3356. err = get_path_nullok(f, ino, &path);
  3357. if (err == 0) {
  3358. struct fuse_intr_data d;
  3359. fuse_prepare_interrupt(f, req, &d);
  3360. err = fuse_fs_flock(f->fs, path, fi, op);
  3361. fuse_finish_interrupt(f, req, &d);
  3362. free_path(f, ino, path);
  3363. }
  3364. reply_err(req, err);
  3365. }
  3366. static void fuse_lib_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
  3367. uint64_t idx)
  3368. {
  3369. struct fuse *f = req_fuse_prepare(req);
  3370. struct fuse_intr_data d;
  3371. char *path;
  3372. int err;
  3373. err = get_path(f, ino, &path);
  3374. if (!err) {
  3375. fuse_prepare_interrupt(f, req, &d);
  3376. err = fuse_fs_bmap(f->fs, path, blocksize, &idx);
  3377. fuse_finish_interrupt(f, req, &d);
  3378. free_path(f, ino, path);
  3379. }
  3380. if (!err)
  3381. fuse_reply_bmap(req, idx);
  3382. else
  3383. reply_err(req, err);
  3384. }
  3385. static void fuse_lib_ioctl(fuse_req_t req, fuse_ino_t ino, int cmd, void *arg,
  3386. struct fuse_file_info *llfi, unsigned int flags,
  3387. const void *in_buf, uint32_t in_bufsz,
  3388. uint32_t out_bufsz_)
  3389. {
  3390. struct fuse *f = req_fuse_prepare(req);
  3391. struct fuse_intr_data d;
  3392. struct fuse_file_info fi;
  3393. char *path, *out_buf = NULL;
  3394. int err;
  3395. uint32_t out_bufsz = out_bufsz_;
  3396. err = -EPERM;
  3397. if (flags & FUSE_IOCTL_UNRESTRICTED)
  3398. goto err;
  3399. if (flags & FUSE_IOCTL_DIR)
  3400. get_dirhandle(llfi, &fi);
  3401. else
  3402. fi = *llfi;
  3403. if (out_bufsz) {
  3404. err = -ENOMEM;
  3405. out_buf = malloc(out_bufsz);
  3406. if (!out_buf)
  3407. goto err;
  3408. }
  3409. assert(!in_bufsz || !out_bufsz || in_bufsz == out_bufsz);
  3410. if (out_buf)
  3411. memcpy(out_buf, in_buf, in_bufsz);
  3412. err = get_path_nullok(f, ino, &path);
  3413. if (err)
  3414. goto err;
  3415. fuse_prepare_interrupt(f, req, &d);
  3416. err = fuse_fs_ioctl(f->fs, path, cmd, arg, &fi, flags,
  3417. out_buf ?: (void *)in_buf, &out_bufsz);
  3418. fuse_finish_interrupt(f, req, &d);
  3419. free_path(f, ino, path);
  3420. fuse_reply_ioctl(req, err, out_buf, out_bufsz);
  3421. goto out;
  3422. err:
  3423. reply_err(req, err);
  3424. out:
  3425. free(out_buf);
  3426. }
  3427. static void fuse_lib_poll(fuse_req_t req, fuse_ino_t ino,
  3428. struct fuse_file_info *fi, struct fuse_pollhandle *ph)
  3429. {
  3430. struct fuse *f = req_fuse_prepare(req);
  3431. struct fuse_intr_data d;
  3432. char *path;
  3433. int err;
  3434. unsigned revents = 0;
  3435. err = get_path_nullok(f, ino, &path);
  3436. if (!err) {
  3437. fuse_prepare_interrupt(f, req, &d);
  3438. err = fuse_fs_poll(f->fs, path, fi, ph, &revents);
  3439. fuse_finish_interrupt(f, req, &d);
  3440. free_path(f, ino, path);
  3441. }
  3442. if (!err)
  3443. fuse_reply_poll(req, revents);
  3444. else
  3445. reply_err(req, err);
  3446. }
  3447. static void fuse_lib_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
  3448. off_t offset, off_t length, struct fuse_file_info *fi)
  3449. {
  3450. struct fuse *f = req_fuse_prepare(req);
  3451. struct fuse_intr_data d;
  3452. char *path;
  3453. int err;
  3454. err = get_path_nullok(f, ino, &path);
  3455. if (!err) {
  3456. fuse_prepare_interrupt(f, req, &d);
  3457. err = fuse_fs_fallocate(f->fs, path, mode, offset, length, fi);
  3458. fuse_finish_interrupt(f, req, &d);
  3459. free_path(f, ino, path);
  3460. }
  3461. reply_err(req, err);
  3462. }
  3463. static int clean_delay(struct fuse *f)
  3464. {
  3465. /*
  3466. * This is calculating the delay between clean runs. To
  3467. * reduce the number of cleans we are doing them 10 times
  3468. * within the remember window.
  3469. */
  3470. int min_sleep = 60;
  3471. int max_sleep = 3600;
  3472. int sleep_time = f->conf.remember / 10;
  3473. if (sleep_time > max_sleep)
  3474. return max_sleep;
  3475. if (sleep_time < min_sleep)
  3476. return min_sleep;
  3477. return sleep_time;
  3478. }
  3479. int fuse_clean_cache(struct fuse *f)
  3480. {
  3481. struct node_lru *lnode;
  3482. struct list_head *curr, *next;
  3483. struct node *node;
  3484. struct timespec now;
  3485. pthread_mutex_lock(&f->lock);
  3486. curr_time(&now);
  3487. for (curr = f->lru_table.next; curr != &f->lru_table; curr = next) {
  3488. double age;
  3489. next = curr->next;
  3490. lnode = list_entry(curr, struct node_lru, lru);
  3491. node = &lnode->node;
  3492. age = diff_timespec(&now, &lnode->forget_time);
  3493. if (age <= f->conf.remember)
  3494. break;
  3495. assert(node->nlookup == 1);
  3496. /* Don't forget active directories */
  3497. if (node->refctr > 1)
  3498. continue;
  3499. node->nlookup = 0;
  3500. unhash_name(f, node);
  3501. unref_node(f, node);
  3502. }
  3503. pthread_mutex_unlock(&f->lock);
  3504. return clean_delay(f);
  3505. }
  3506. static struct fuse_lowlevel_ops fuse_path_ops =
  3507. {
  3508. .init = fuse_lib_init,
  3509. .destroy = fuse_lib_destroy,
  3510. .lookup = fuse_lib_lookup,
  3511. .forget = fuse_lib_forget,
  3512. .forget_multi = fuse_lib_forget_multi,
  3513. .getattr = fuse_lib_getattr,
  3514. .setattr = fuse_lib_setattr,
  3515. .access = fuse_lib_access,
  3516. .readlink = fuse_lib_readlink,
  3517. .mknod = fuse_lib_mknod,
  3518. .mkdir = fuse_lib_mkdir,
  3519. .unlink = fuse_lib_unlink,
  3520. .rmdir = fuse_lib_rmdir,
  3521. .symlink = fuse_lib_symlink,
  3522. .rename = fuse_lib_rename,
  3523. .link = fuse_lib_link,
  3524. .create = fuse_lib_create,
  3525. .open = fuse_lib_open,
  3526. .read = fuse_lib_read,
  3527. .write_buf = fuse_lib_write_buf,
  3528. .flush = fuse_lib_flush,
  3529. .release = fuse_lib_release,
  3530. .fsync = fuse_lib_fsync,
  3531. .opendir = fuse_lib_opendir,
  3532. .readdir = fuse_lib_readdir,
  3533. .readdir_plus = fuse_lib_readdir_plus,
  3534. .releasedir = fuse_lib_releasedir,
  3535. .fsyncdir = fuse_lib_fsyncdir,
  3536. .statfs = fuse_lib_statfs,
  3537. .setxattr = fuse_lib_setxattr,
  3538. .getxattr = fuse_lib_getxattr,
  3539. .listxattr = fuse_lib_listxattr,
  3540. .removexattr = fuse_lib_removexattr,
  3541. .getlk = fuse_lib_getlk,
  3542. .setlk = fuse_lib_setlk,
  3543. .flock = fuse_lib_flock,
  3544. .bmap = fuse_lib_bmap,
  3545. .ioctl = fuse_lib_ioctl,
  3546. .poll = fuse_lib_poll,
  3547. .fallocate = fuse_lib_fallocate,
  3548. .copy_file_range = fuse_lib_copy_file_range,
  3549. };
  3550. int fuse_exited(struct fuse *f)
  3551. {
  3552. return fuse_session_exited(f->se);
  3553. }
  3554. struct fuse_session*
  3555. fuse_get_session(struct fuse *f)
  3556. {
  3557. return f->se;
  3558. }
  3559. void
  3560. fuse_exit(struct fuse *f)
  3561. {
  3562. fuse_session_exit(f->se);
  3563. }
  3564. struct
  3565. fuse_context*
  3566. fuse_get_context(void)
  3567. {
  3568. return &fuse_get_context_internal()->ctx;
  3569. }
  3570. int fuse_interrupted(void)
  3571. {
  3572. return fuse_req_interrupted(fuse_get_context_internal()->req);
  3573. }
  3574. void fuse_set_getcontext_func(struct fuse_context *(*func)(void))
  3575. {
  3576. (void) func;
  3577. /* no-op */
  3578. }
  3579. enum {
  3580. KEY_HELP,
  3581. };
  3582. #define FUSE_LIB_OPT(t, p, v) { t, offsetof(struct fuse_config, p), v }
  3583. static const struct fuse_opt fuse_lib_opts[] =
  3584. {
  3585. FUSE_OPT_KEY("-h", KEY_HELP),
  3586. FUSE_OPT_KEY("--help", KEY_HELP),
  3587. FUSE_OPT_KEY("debug", FUSE_OPT_KEY_KEEP),
  3588. FUSE_OPT_KEY("-d", FUSE_OPT_KEY_KEEP),
  3589. FUSE_LIB_OPT("debug", debug, 1),
  3590. FUSE_LIB_OPT("-d", debug, 1),
  3591. FUSE_LIB_OPT("use_ino", use_ino, 1),
  3592. FUSE_LIB_OPT("umask=", set_mode, 1),
  3593. FUSE_LIB_OPT("umask=%o", umask, 0),
  3594. FUSE_LIB_OPT("uid=", set_uid, 1),
  3595. FUSE_LIB_OPT("uid=%d", uid, 0),
  3596. FUSE_LIB_OPT("gid=", set_gid, 1),
  3597. FUSE_LIB_OPT("gid=%d", gid, 0),
  3598. FUSE_LIB_OPT("entry_timeout=%lf", entry_timeout, 0),
  3599. FUSE_LIB_OPT("attr_timeout=%lf", attr_timeout, 0),
  3600. FUSE_LIB_OPT("negative_timeout=%lf", negative_timeout, 0),
  3601. FUSE_LIB_OPT("noforget", remember, -1),
  3602. FUSE_LIB_OPT("remember=%u", remember, 0),
  3603. FUSE_LIB_OPT("nopath", nopath, 1),
  3604. FUSE_LIB_OPT("intr", intr, 1),
  3605. FUSE_LIB_OPT("intr_signal=%d", intr_signal, 0),
  3606. FUSE_LIB_OPT("threads=%d", threads, 0),
  3607. FUSE_OPT_END
  3608. };
  3609. static void fuse_lib_help(void)
  3610. {
  3611. fprintf(stderr,
  3612. " -o use_ino let filesystem set inode numbers\n"
  3613. " -o umask=M set file permissions (octal)\n"
  3614. " -o uid=N set file owner\n"
  3615. " -o gid=N set file group\n"
  3616. " -o entry_timeout=T cache timeout for names (1.0s)\n"
  3617. " -o negative_timeout=T cache timeout for deleted names (0.0s)\n"
  3618. " -o attr_timeout=T cache timeout for attributes (1.0s)\n"
  3619. " -o noforget never forget cached inodes\n"
  3620. " -o remember=T remember cached inodes for T seconds (0s)\n"
  3621. " -o nopath don't supply path if not necessary\n"
  3622. " -o intr allow requests to be interrupted\n"
  3623. " -o intr_signal=NUM signal to send on interrupt (%i)\n"
  3624. " -o threads=NUM number of worker threads. 0 = autodetect.\n"
  3625. " Negative values autodetect then divide by\n"
  3626. " absolute value. default = 0\n"
  3627. "\n", FUSE_DEFAULT_INTR_SIGNAL);
  3628. }
  3629. static int fuse_lib_opt_proc(void *data, const char *arg, int key,
  3630. struct fuse_args *outargs)
  3631. {
  3632. (void) arg; (void) outargs;
  3633. if (key == KEY_HELP) {
  3634. struct fuse_config *conf = (struct fuse_config *) data;
  3635. fuse_lib_help();
  3636. conf->help = 1;
  3637. }
  3638. return 1;
  3639. }
  3640. static int fuse_init_intr_signal(int signum, int *installed)
  3641. {
  3642. struct sigaction old_sa;
  3643. if (sigaction(signum, NULL, &old_sa) == -1) {
  3644. perror("fuse: cannot get old signal handler");
  3645. return -1;
  3646. }
  3647. if (old_sa.sa_handler == SIG_DFL) {
  3648. struct sigaction sa;
  3649. memset(&sa, 0, sizeof(struct sigaction));
  3650. sa.sa_handler = fuse_intr_sighandler;
  3651. sigemptyset(&sa.sa_mask);
  3652. if (sigaction(signum, &sa, NULL) == -1) {
  3653. perror("fuse: cannot set interrupt signal handler");
  3654. return -1;
  3655. }
  3656. *installed = 1;
  3657. }
  3658. return 0;
  3659. }
  3660. static void fuse_restore_intr_signal(int signum)
  3661. {
  3662. struct sigaction sa;
  3663. memset(&sa, 0, sizeof(struct sigaction));
  3664. sa.sa_handler = SIG_DFL;
  3665. sigaction(signum, &sa, NULL);
  3666. }
  3667. struct fuse_fs *fuse_fs_new(const struct fuse_operations *op, size_t op_size,
  3668. void *user_data)
  3669. {
  3670. struct fuse_fs *fs;
  3671. if (sizeof(struct fuse_operations) < op_size) {
  3672. fprintf(stderr, "fuse: warning: library too old, some operations may not not work\n");
  3673. op_size = sizeof(struct fuse_operations);
  3674. }
  3675. fs = (struct fuse_fs *) calloc(1, sizeof(struct fuse_fs));
  3676. if (!fs) {
  3677. fprintf(stderr, "fuse: failed to allocate fuse_fs object\n");
  3678. return NULL;
  3679. }
  3680. fs->user_data = user_data;
  3681. if (op)
  3682. memcpy(&fs->op, op, op_size);
  3683. return fs;
  3684. }
  3685. static int node_table_init(struct node_table *t)
  3686. {
  3687. t->size = NODE_TABLE_MIN_SIZE;
  3688. t->array = (struct node **) calloc(1, sizeof(struct node *) * t->size);
  3689. if (t->array == NULL) {
  3690. fprintf(stderr, "fuse: memory allocation failed\n");
  3691. return -1;
  3692. }
  3693. t->use = 0;
  3694. t->split = 0;
  3695. return 0;
  3696. }
  3697. static void *fuse_prune_nodes(void *fuse)
  3698. {
  3699. struct fuse *f = fuse;
  3700. int sleep_time;
  3701. while(1) {
  3702. sleep_time = fuse_clean_cache(f);
  3703. sleep(sleep_time);
  3704. }
  3705. return NULL;
  3706. }
  3707. int fuse_start_cleanup_thread(struct fuse *f)
  3708. {
  3709. if (lru_enabled(f))
  3710. return fuse_start_thread(&f->prune_thread, fuse_prune_nodes, f);
  3711. return 0;
  3712. }
  3713. void fuse_stop_cleanup_thread(struct fuse *f)
  3714. {
  3715. if (lru_enabled(f)) {
  3716. pthread_mutex_lock(&f->lock);
  3717. pthread_cancel(f->prune_thread);
  3718. pthread_mutex_unlock(&f->lock);
  3719. pthread_join(f->prune_thread, NULL);
  3720. }
  3721. }
  3722. struct fuse *
  3723. fuse_new_common(int devfuse_fd, struct fuse_args *args,
  3724. const struct fuse_operations *op,
  3725. size_t op_size, void *user_data)
  3726. {
  3727. struct fuse *f;
  3728. struct node *root;
  3729. struct fuse_fs *fs;
  3730. struct fuse_lowlevel_ops llop = fuse_path_ops;
  3731. if (fuse_create_context_key() == -1)
  3732. goto out;
  3733. f = (struct fuse *) calloc(1, sizeof(struct fuse));
  3734. if (f == NULL) {
  3735. fprintf(stderr, "fuse: failed to allocate fuse object\n");
  3736. goto out_delete_context_key;
  3737. }
  3738. fs = fuse_fs_new(op, op_size, user_data);
  3739. if (!fs)
  3740. goto out_free;
  3741. f->fs = fs;
  3742. f->nullpath_ok = fs->op.flag_nullpath_ok;
  3743. f->conf.nopath = fs->op.flag_nopath;
  3744. f->utime_omit_ok = fs->op.flag_utime_omit_ok;
  3745. /* Oh f**k, this is ugly! */
  3746. if (!fs->op.lock) {
  3747. llop.getlk = NULL;
  3748. llop.setlk = NULL;
  3749. }
  3750. f->conf.entry_timeout = 1.0;
  3751. f->conf.attr_timeout = 1.0;
  3752. f->conf.negative_timeout = 0.0;
  3753. f->conf.intr_signal = FUSE_DEFAULT_INTR_SIGNAL;
  3754. f->pagesize = getpagesize();
  3755. init_list_head(&f->partial_slabs);
  3756. init_list_head(&f->full_slabs);
  3757. init_list_head(&f->lru_table);
  3758. if (fuse_opt_parse(args, &f->conf, fuse_lib_opts,
  3759. fuse_lib_opt_proc) == -1)
  3760. goto out_free_fs;
  3761. f->se = fuse_lowlevel_new_common(args, &llop, sizeof(llop), f);
  3762. if (f->se == NULL) {
  3763. goto out_free_fs;
  3764. }
  3765. f->se->devfuse_fd = devfuse_fd;
  3766. if (f->conf.debug) {
  3767. fprintf(stderr, "nullpath_ok: %i\n", f->nullpath_ok);
  3768. fprintf(stderr, "nopath: %i\n", f->conf.nopath);
  3769. fprintf(stderr, "utime_omit_ok: %i\n", f->utime_omit_ok);
  3770. }
  3771. /* Trace topmost layer by default */
  3772. srand(time(NULL));
  3773. f->fs->debug = f->conf.debug;
  3774. f->ctr = 0;
  3775. f->generation = rand64();
  3776. if (node_table_init(&f->name_table) == -1)
  3777. goto out_free_session;
  3778. if (node_table_init(&f->id_table) == -1)
  3779. goto out_free_name_table;
  3780. fuse_mutex_init(&f->lock);
  3781. root = alloc_node(f);
  3782. if (root == NULL) {
  3783. fprintf(stderr, "fuse: memory allocation failed\n");
  3784. goto out_free_id_table;
  3785. }
  3786. if (lru_enabled(f)) {
  3787. struct node_lru *lnode = node_lru(root);
  3788. init_list_head(&lnode->lru);
  3789. }
  3790. strcpy(root->inline_name, "/");
  3791. root->name = root->inline_name;
  3792. if (f->conf.intr &&
  3793. fuse_init_intr_signal(f->conf.intr_signal,
  3794. &f->intr_installed) == -1)
  3795. goto out_free_root;
  3796. root->parent = NULL;
  3797. root->nodeid = FUSE_ROOT_ID;
  3798. inc_nlookup(root);
  3799. hash_id(f, root);
  3800. return f;
  3801. out_free_root:
  3802. free(root);
  3803. out_free_id_table:
  3804. free(f->id_table.array);
  3805. out_free_name_table:
  3806. free(f->name_table.array);
  3807. out_free_session:
  3808. fuse_session_destroy(f->se);
  3809. out_free_fs:
  3810. /* Horrible compatibility hack to stop the destructor from being
  3811. called on the filesystem without init being called first */
  3812. fs->op.destroy = NULL;
  3813. fuse_fs_destroy(f->fs);
  3814. out_free:
  3815. free(f);
  3816. out_delete_context_key:
  3817. fuse_delete_context_key();
  3818. out:
  3819. return NULL;
  3820. }
  3821. struct fuse *fuse_new(int devfuse_fd, struct fuse_args *args,
  3822. const struct fuse_operations *op, size_t op_size,
  3823. void *user_data)
  3824. {
  3825. return fuse_new_common(devfuse_fd, args, op, op_size, user_data);
  3826. }
  3827. void fuse_destroy(struct fuse *f)
  3828. {
  3829. size_t i;
  3830. if (f->conf.intr && f->intr_installed)
  3831. fuse_restore_intr_signal(f->conf.intr_signal);
  3832. if (f->fs) {
  3833. struct fuse_context_i *c = fuse_get_context_internal();
  3834. memset(c, 0, sizeof(*c));
  3835. c->ctx.fuse = f;
  3836. for (i = 0; i < f->id_table.size; i++) {
  3837. struct node *node;
  3838. for (node = f->id_table.array[i]; node != NULL; node = node->id_next)
  3839. {
  3840. if (node->is_hidden)
  3841. fuse_fs_free_hide(f->fs,node->hidden_fh);
  3842. }
  3843. }
  3844. }
  3845. for (i = 0; i < f->id_table.size; i++) {
  3846. struct node *node;
  3847. struct node *next;
  3848. for (node = f->id_table.array[i]; node != NULL; node = next) {
  3849. next = node->id_next;
  3850. free_node(f, node);
  3851. f->id_table.use--;
  3852. }
  3853. }
  3854. assert(list_empty(&f->partial_slabs));
  3855. assert(list_empty(&f->full_slabs));
  3856. free(f->id_table.array);
  3857. free(f->name_table.array);
  3858. pthread_mutex_destroy(&f->lock);
  3859. fuse_session_destroy(f->se);
  3860. free(f);
  3861. fuse_delete_context_key();
  3862. }
  3863. int
  3864. fuse_config_num_threads(const struct fuse *fuse_)
  3865. {
  3866. return fuse_->conf.threads;
  3867. }
  3868. void
  3869. fuse_config_set_entry_timeout(struct fuse *fuse_,
  3870. const double entry_timeout_)
  3871. {
  3872. fuse_->conf.entry_timeout = entry_timeout_;
  3873. }
  3874. double
  3875. fuse_config_get_entry_timeout(const struct fuse *fuse_)
  3876. {
  3877. return fuse_->conf.entry_timeout;
  3878. }
  3879. void
  3880. fuse_config_set_negative_entry_timeout(struct fuse *fuse_,
  3881. const double entry_timeout_)
  3882. {
  3883. fuse_->conf.negative_timeout = entry_timeout_;
  3884. }
  3885. double
  3886. fuse_config_get_negative_entry_timeout(const struct fuse *fuse_)
  3887. {
  3888. return fuse_->conf.negative_timeout;
  3889. }
  3890. void
  3891. fuse_config_set_attr_timeout(struct fuse *fuse_,
  3892. const double attr_timeout_)
  3893. {
  3894. fuse_->conf.attr_timeout = attr_timeout_;
  3895. }
  3896. double
  3897. fuse_config_get_attr_timeout(const struct fuse *fuse_)
  3898. {
  3899. return fuse_->conf.attr_timeout;
  3900. }