You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4160 lines
84 KiB

10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
2 years ago
10 months ago
2 years ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
10 months ago
2 years ago
2 years ago
10 months ago
10 months ago
10 months ago
10 months ago
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. /* For pthread_rwlock_t */
  8. #ifndef _GNU_SOURCE
  9. #define _GNU_SOURCE
  10. #endif
  11. #include "crc32b.h"
  12. #include "fuse_node.h"
  13. #include "khash.h"
  14. #include "kvec.h"
  15. #include "node.h"
  16. #include "config.h"
  17. #include "fuse_dirents.h"
  18. #include "fuse_i.h"
  19. #include "fuse_kernel.h"
  20. #include "fuse_lowlevel.h"
  21. #include "fuse_misc.h"
  22. #include "fuse_opt.h"
  23. #include "fuse_pollhandle.h"
  24. #include "fuse_msgbuf.hpp"
  25. #include <assert.h>
  26. #include <dlfcn.h>
  27. #include <errno.h>
  28. #include <fcntl.h>
  29. #include <inttypes.h>
  30. #include <limits.h>
  31. #include <poll.h>
  32. #include <signal.h>
  33. #include <stdbool.h>
  34. #include <stddef.h>
  35. #include <stdint.h>
  36. #include <stdio.h>
  37. #include <stdlib.h>
  38. #include <string.h>
  39. #include <sys/file.h>
  40. #include <sys/mman.h>
  41. #include <sys/param.h>
  42. #include <sys/time.h>
  43. #include <sys/uio.h>
  44. #include <syslog.h>
  45. #include <time.h>
  46. #include <unistd.h>
  47. #ifdef HAVE_MALLOC_TRIM
  48. #include <malloc.h>
  49. #endif
  50. #define FUSE_UNKNOWN_INO UINT64_MAX
  51. #define OFFSET_MAX 0x7fffffffffffffffLL
  52. #define NODE_TABLE_MIN_SIZE 8192
  53. #define PARAM(inarg) ((void*)(((char*)(inarg)) + sizeof(*(inarg))))
  54. static int g_LOG_METRICS = 0;
  55. struct fuse_config
  56. {
  57. unsigned int uid;
  58. unsigned int gid;
  59. unsigned int umask;
  60. int remember;
  61. int debug;
  62. int nogc;
  63. int set_mode;
  64. int set_uid;
  65. int set_gid;
  66. int help;
  67. };
  68. struct fuse_fs
  69. {
  70. struct fuse_operations op;
  71. };
  72. struct lock_queue_element
  73. {
  74. struct lock_queue_element *next;
  75. pthread_cond_t cond;
  76. uint64_t nodeid1;
  77. const char *name1;
  78. char **path1;
  79. node_t **wnode1;
  80. uint64_t nodeid2;
  81. const char *name2;
  82. char **path2;
  83. node_t **wnode2;
  84. int err;
  85. bool done : 1;
  86. };
  87. struct node_table
  88. {
  89. node_t **array;
  90. size_t use;
  91. size_t size;
  92. size_t split;
  93. };
  94. #define container_of(ptr,type,member) ({ \
  95. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  96. (type *)( (char *)__mptr - offsetof(type,member) );})
  97. #define list_entry(ptr,type,member) \
  98. container_of(ptr,type,member)
  99. struct list_head
  100. {
  101. struct list_head *next;
  102. struct list_head *prev;
  103. };
  104. typedef struct remembered_node_t remembered_node_t;
  105. struct remembered_node_t
  106. {
  107. node_t *node;
  108. time_t time;
  109. };
  110. typedef struct nodeid_gen_t nodeid_gen_t;
  111. struct nodeid_gen_t
  112. {
  113. uint64_t nodeid;
  114. uint64_t generation;
  115. };
  116. struct fuse
  117. {
  118. struct fuse_session *se;
  119. struct node_table name_table;
  120. struct node_table id_table;
  121. nodeid_gen_t nodeid_gen;
  122. unsigned int hidectr;
  123. pthread_mutex_t lock;
  124. struct fuse_config conf;
  125. struct fuse_fs *fs;
  126. struct lock_queue_element *lockq;
  127. pthread_t maintenance_thread;
  128. kvec_t(remembered_node_t) remembered_nodes;
  129. };
  130. struct lock
  131. {
  132. int type;
  133. off_t start;
  134. off_t end;
  135. pid_t pid;
  136. uint64_t owner;
  137. struct lock *next;
  138. };
  139. #define TREELOCK_WRITE -1
  140. #define TREELOCK_WAIT_OFFSET INT_MIN
  141. struct fuse_dh
  142. {
  143. pthread_mutex_t lock;
  144. uint64_t fh;
  145. fuse_dirents_t d;
  146. };
  147. struct fuse_context_i
  148. {
  149. struct fuse_context ctx;
  150. fuse_req_t req;
  151. };
  152. static pthread_key_t fuse_context_key;
  153. static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
  154. static int fuse_context_ref;
  155. /*
  156. Why was the nodeid:generation logic simplified?
  157. nodeid is uint64_t: max value of 18446744073709551616
  158. If nodes were created at a rate of 1048576 per second it would take
  159. over 500 thousand years to roll over. I'm fine with risking that.
  160. */
  161. static
  162. uint64_t
  163. generate_nodeid(nodeid_gen_t *ng_)
  164. {
  165. ng_->nodeid++;
  166. return ng_->nodeid;
  167. }
  168. static
  169. char*
  170. filename_strdup(struct fuse *f_,
  171. const char *fn_)
  172. {
  173. return strdup(fn_);
  174. }
  175. static
  176. void
  177. filename_free(struct fuse *f_,
  178. char *fn_)
  179. {
  180. free(fn_);
  181. }
  182. static
  183. void*
  184. fuse_hdr_arg(const struct fuse_in_header *hdr_)
  185. {
  186. return (void*)&hdr_[1];
  187. }
  188. static
  189. void
  190. list_add(struct list_head *new_,
  191. struct list_head *prev_,
  192. struct list_head *next_)
  193. {
  194. next_->prev = new_;
  195. new_->next = next_;
  196. new_->prev = prev_;
  197. prev_->next = new_;
  198. }
  199. static
  200. inline
  201. void
  202. list_add_head(struct list_head *new_,
  203. struct list_head *head_)
  204. {
  205. list_add(new_,head_,head_->next);
  206. }
  207. static
  208. inline
  209. void
  210. list_add_tail(struct list_head *new_,
  211. struct list_head *head_)
  212. {
  213. list_add(new_,head_->prev,head_);
  214. }
  215. static
  216. inline
  217. void
  218. list_del(struct list_head *entry)
  219. {
  220. struct list_head *prev = entry->prev;
  221. struct list_head *next = entry->next;
  222. next->prev = prev;
  223. prev->next = next;
  224. }
  225. static
  226. size_t
  227. id_hash(struct fuse *f,
  228. uint64_t ino)
  229. {
  230. uint64_t hash = ((uint32_t)ino * 2654435761U) % f->id_table.size;
  231. uint64_t oldhash = hash % (f->id_table.size / 2);
  232. if(oldhash >= f->id_table.split)
  233. return oldhash;
  234. else
  235. return hash;
  236. }
  237. static
  238. node_t*
  239. get_node_nocheck(struct fuse *f,
  240. uint64_t nodeid)
  241. {
  242. size_t hash = id_hash(f,nodeid);
  243. node_t *node;
  244. for(node = f->id_table.array[hash]; node != NULL; node = node->id_next)
  245. if(node->nodeid == nodeid)
  246. return node;
  247. return NULL;
  248. }
  249. static
  250. node_t*
  251. get_node(struct fuse *f,
  252. const uint64_t nodeid)
  253. {
  254. node_t *node = get_node_nocheck(f,nodeid);
  255. if(!node)
  256. {
  257. fprintf(stderr,"fuse internal error: node %llu not found\n",
  258. (unsigned long long)nodeid);
  259. abort();
  260. }
  261. return node;
  262. }
  263. static
  264. void
  265. remove_remembered_node(struct fuse *f_,
  266. node_t *node_)
  267. {
  268. for(size_t i = 0; i < kv_size(f_->remembered_nodes); i++)
  269. {
  270. if(kv_A(f_->remembered_nodes,i).node != node_)
  271. continue;
  272. kv_delete(f_->remembered_nodes,i);
  273. break;
  274. }
  275. }
  276. static
  277. uint32_t
  278. stat_crc32b(const struct stat *st_)
  279. {
  280. uint32_t crc;
  281. crc = crc32b_start();
  282. crc = crc32b_continue(&st_->st_ino,sizeof(st_->st_ino),crc);
  283. crc = crc32b_continue(&st_->st_size,sizeof(st_->st_size),crc);
  284. crc = crc32b_continue(&st_->st_mtim,sizeof(st_->st_mtim),crc);
  285. crc = crc32b_finish(crc);
  286. return crc;
  287. }
  288. #ifndef CLOCK_MONOTONIC
  289. # define CLOCK_MONOTONIC CLOCK_REALTIME
  290. #endif
  291. static
  292. time_t
  293. current_time()
  294. {
  295. int rv;
  296. struct timespec now;
  297. static clockid_t clockid = CLOCK_MONOTONIC;
  298. rv = clock_gettime(clockid,&now);
  299. if((rv == -1) && (errno == EINVAL))
  300. {
  301. clockid = CLOCK_REALTIME;
  302. rv = clock_gettime(clockid,&now);
  303. }
  304. if(rv == -1)
  305. now.tv_sec = time(NULL);
  306. return now.tv_sec;
  307. }
  308. static
  309. void
  310. free_node(struct fuse *f_,
  311. node_t *node_)
  312. {
  313. filename_free(f_,node_->name);
  314. if(node_->hidden_fh)
  315. f_->fs->op.free_hide(node_->hidden_fh);
  316. node_free(node_);
  317. }
  318. static
  319. void
  320. node_table_reduce(struct node_table *t)
  321. {
  322. size_t newsize = t->size / 2;
  323. void *newarray;
  324. if(newsize < NODE_TABLE_MIN_SIZE)
  325. return;
  326. newarray = realloc(t->array,sizeof(node_t*) * newsize);
  327. if(newarray != NULL)
  328. t->array = (node_t**)newarray;
  329. t->size = newsize;
  330. t->split = t->size / 2;
  331. }
  332. static
  333. void
  334. remerge_id(struct fuse *f)
  335. {
  336. struct node_table *t = &f->id_table;
  337. int iter;
  338. if(t->split == 0)
  339. node_table_reduce(t);
  340. for(iter = 8; t->split > 0 && iter; iter--)
  341. {
  342. node_t **upper;
  343. t->split--;
  344. upper = &t->array[t->split + t->size / 2];
  345. if(*upper)
  346. {
  347. node_t **nodep;
  348. for(nodep = &t->array[t->split]; *nodep;
  349. nodep = &(*nodep)->id_next);
  350. *nodep = *upper;
  351. *upper = NULL;
  352. break;
  353. }
  354. }
  355. }
  356. static
  357. void
  358. unhash_id(struct fuse *f,
  359. node_t *node)
  360. {
  361. node_t **nodep = &f->id_table.array[id_hash(f,node->nodeid)];
  362. for(; *nodep != NULL; nodep = &(*nodep)->id_next)
  363. if(*nodep == node)
  364. {
  365. *nodep = node->id_next;
  366. f->id_table.use--;
  367. if(f->id_table.use < f->id_table.size / 4)
  368. remerge_id(f);
  369. return;
  370. }
  371. }
  372. static
  373. int
  374. node_table_resize(struct node_table *t)
  375. {
  376. size_t newsize = t->size * 2;
  377. void *newarray;
  378. newarray = realloc(t->array,sizeof(node_t*) * newsize);
  379. if(newarray == NULL)
  380. return -1;
  381. t->array = (node_t**)newarray;
  382. memset(t->array + t->size,0,t->size * sizeof(node_t*));
  383. t->size = newsize;
  384. t->split = 0;
  385. return 0;
  386. }
  387. static
  388. void
  389. rehash_id(struct fuse *f)
  390. {
  391. struct node_table *t = &f->id_table;
  392. node_t **nodep;
  393. node_t **next;
  394. size_t hash;
  395. if(t->split == t->size / 2)
  396. return;
  397. hash = t->split;
  398. t->split++;
  399. for(nodep = &t->array[hash]; *nodep != NULL; nodep = next)
  400. {
  401. node_t *node = *nodep;
  402. size_t newhash = id_hash(f,node->nodeid);
  403. if(newhash != hash)
  404. {
  405. next = nodep;
  406. *nodep = node->id_next;
  407. node->id_next = t->array[newhash];
  408. t->array[newhash] = node;
  409. }
  410. else
  411. {
  412. next = &node->id_next;
  413. }
  414. }
  415. if(t->split == t->size / 2)
  416. node_table_resize(t);
  417. }
  418. static
  419. void
  420. hash_id(struct fuse *f,
  421. node_t *node)
  422. {
  423. size_t hash;
  424. hash = id_hash(f,node->nodeid);
  425. node->id_next = f->id_table.array[hash];
  426. f->id_table.array[hash] = node;
  427. f->id_table.use++;
  428. if(f->id_table.use >= f->id_table.size / 2)
  429. rehash_id(f);
  430. }
  431. static
  432. size_t
  433. name_hash(struct fuse *f,
  434. uint64_t parent,
  435. const char *name)
  436. {
  437. uint64_t hash = parent;
  438. uint64_t oldhash;
  439. for(; *name; name++)
  440. hash = hash * 31 + (unsigned char)*name;
  441. hash %= f->name_table.size;
  442. oldhash = hash % (f->name_table.size / 2);
  443. if(oldhash >= f->name_table.split)
  444. return oldhash;
  445. else
  446. return hash;
  447. }
  448. static
  449. void
  450. unref_node(struct fuse *f,
  451. node_t *node);
  452. static
  453. void
  454. remerge_name(struct fuse *f)
  455. {
  456. int iter;
  457. struct node_table *t = &f->name_table;
  458. if(t->split == 0)
  459. node_table_reduce(t);
  460. for(iter = 8; t->split > 0 && iter; iter--)
  461. {
  462. node_t **upper;
  463. t->split--;
  464. upper = &t->array[t->split + t->size / 2];
  465. if(*upper)
  466. {
  467. node_t **nodep;
  468. for(nodep = &t->array[t->split]; *nodep; nodep = &(*nodep)->name_next);
  469. *nodep = *upper;
  470. *upper = NULL;
  471. break;
  472. }
  473. }
  474. }
  475. static
  476. void
  477. unhash_name(struct fuse *f,
  478. node_t *node)
  479. {
  480. if(node->name)
  481. {
  482. size_t hash = name_hash(f,node->parent->nodeid,node->name);
  483. node_t **nodep = &f->name_table.array[hash];
  484. for(; *nodep != NULL; nodep = &(*nodep)->name_next)
  485. if(*nodep == node)
  486. {
  487. *nodep = node->name_next;
  488. node->name_next = NULL;
  489. unref_node(f,node->parent);
  490. filename_free(f,node->name);
  491. node->name = NULL;
  492. node->parent = NULL;
  493. f->name_table.use--;
  494. if(f->name_table.use < f->name_table.size / 4)
  495. remerge_name(f);
  496. return;
  497. }
  498. fprintf(stderr,
  499. "fuse internal error: unable to unhash node: %llu\n",
  500. (unsigned long long)node->nodeid);
  501. abort();
  502. }
  503. }
  504. static
  505. void
  506. rehash_name(struct fuse *f)
  507. {
  508. struct node_table *t = &f->name_table;
  509. node_t **nodep;
  510. node_t **next;
  511. size_t hash;
  512. if(t->split == t->size / 2)
  513. return;
  514. hash = t->split;
  515. t->split++;
  516. for(nodep = &t->array[hash]; *nodep != NULL; nodep = next)
  517. {
  518. node_t *node = *nodep;
  519. size_t newhash = name_hash(f,node->parent->nodeid,node->name);
  520. if(newhash != hash)
  521. {
  522. next = nodep;
  523. *nodep = node->name_next;
  524. node->name_next = t->array[newhash];
  525. t->array[newhash] = node;
  526. }
  527. else
  528. {
  529. next = &node->name_next;
  530. }
  531. }
  532. if(t->split == t->size / 2)
  533. node_table_resize(t);
  534. }
  535. static
  536. int
  537. hash_name(struct fuse *f,
  538. node_t *node,
  539. uint64_t parentid,
  540. const char *name)
  541. {
  542. size_t hash = name_hash(f,parentid,name);
  543. node_t *parent = get_node(f,parentid);
  544. node->name = filename_strdup(f,name);
  545. if(node->name == NULL)
  546. return -1;
  547. parent->refctr++;
  548. node->parent = parent;
  549. node->name_next = f->name_table.array[hash];
  550. f->name_table.array[hash] = node;
  551. f->name_table.use++;
  552. if(f->name_table.use >= f->name_table.size / 2)
  553. rehash_name(f);
  554. return 0;
  555. }
  556. static
  557. inline
  558. int
  559. remember_nodes(struct fuse *f_)
  560. {
  561. return (f_->conf.remember > 0);
  562. }
  563. static
  564. void
  565. delete_node(struct fuse *f,
  566. node_t *node)
  567. {
  568. assert(node->treelock == 0);
  569. unhash_name(f,node);
  570. if(remember_nodes(f))
  571. remove_remembered_node(f,node);
  572. unhash_id(f,node);
  573. node_free(node);
  574. }
  575. static
  576. void
  577. unref_node(struct fuse *f,
  578. node_t *node)
  579. {
  580. assert(node->refctr > 0);
  581. node->refctr--;
  582. if(!node->refctr)
  583. delete_node(f,node);
  584. }
  585. static
  586. uint64_t
  587. rand64(void)
  588. {
  589. uint64_t rv;
  590. rv = rand();
  591. rv <<= 32;
  592. rv |= rand();
  593. return rv;
  594. }
  595. static
  596. node_t*
  597. lookup_node(struct fuse *f,
  598. uint64_t parent,
  599. const char *name)
  600. {
  601. size_t hash;
  602. node_t *node;
  603. hash = name_hash(f,parent,name);
  604. for(node = f->name_table.array[hash]; node != NULL; node = node->name_next)
  605. if(node->parent->nodeid == parent && strcmp(node->name,name) == 0)
  606. return node;
  607. return NULL;
  608. }
  609. static
  610. void
  611. inc_nlookup(node_t *node)
  612. {
  613. if(!node->nlookup)
  614. node->refctr++;
  615. node->nlookup++;
  616. }
  617. static
  618. node_t*
  619. find_node(struct fuse *f,
  620. uint64_t parent,
  621. const char *name)
  622. {
  623. node_t *node;
  624. pthread_mutex_lock(&f->lock);
  625. if(!name)
  626. node = get_node(f,parent);
  627. else
  628. node = lookup_node(f,parent,name);
  629. if(node == NULL)
  630. {
  631. node = node_alloc();
  632. if(node == NULL)
  633. goto out_err;
  634. node->nodeid = generate_nodeid(&f->nodeid_gen);
  635. if(f->conf.remember)
  636. inc_nlookup(node);
  637. if(hash_name(f,node,parent,name) == -1)
  638. {
  639. free_node(f,node);
  640. node = NULL;
  641. goto out_err;
  642. }
  643. hash_id(f,node);
  644. }
  645. else if((node->nlookup == 1) && remember_nodes(f))
  646. {
  647. remove_remembered_node(f,node);
  648. }
  649. inc_nlookup(node);
  650. out_err:
  651. pthread_mutex_unlock(&f->lock);
  652. return node;
  653. }
  654. static
  655. char*
  656. add_name(char **buf,
  657. unsigned *bufsize,
  658. char *s,
  659. const char *name)
  660. {
  661. size_t len = strlen(name);
  662. if(s - len <= *buf)
  663. {
  664. unsigned pathlen = *bufsize - (s - *buf);
  665. unsigned newbufsize = *bufsize;
  666. char *newbuf;
  667. while(newbufsize < pathlen + len + 1)
  668. {
  669. if(newbufsize >= 0x80000000)
  670. newbufsize = 0xffffffff;
  671. else
  672. newbufsize *= 2;
  673. }
  674. newbuf = (char*)realloc(*buf,newbufsize);
  675. if(newbuf == NULL)
  676. return NULL;
  677. *buf = newbuf;
  678. s = newbuf + newbufsize - pathlen;
  679. memmove(s,newbuf + *bufsize - pathlen,pathlen);
  680. *bufsize = newbufsize;
  681. }
  682. s -= len;
  683. strncpy(s,name,len);
  684. s--;
  685. *s = '/';
  686. return s;
  687. }
  688. static
  689. void
  690. unlock_path(struct fuse *f,
  691. uint64_t nodeid,
  692. node_t *wnode,
  693. node_t *end)
  694. {
  695. node_t *node;
  696. if(wnode)
  697. {
  698. assert(wnode->treelock == TREELOCK_WRITE);
  699. wnode->treelock = 0;
  700. }
  701. for(node = get_node(f,nodeid); node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent)
  702. {
  703. assert(node->treelock != 0);
  704. assert(node->treelock != TREELOCK_WAIT_OFFSET);
  705. assert(node->treelock != TREELOCK_WRITE);
  706. node->treelock--;
  707. if(node->treelock == TREELOCK_WAIT_OFFSET)
  708. node->treelock = 0;
  709. }
  710. }
  711. static
  712. int
  713. try_get_path(struct fuse *f,
  714. uint64_t nodeid,
  715. const char *name,
  716. char **path,
  717. node_t **wnodep,
  718. bool need_lock)
  719. {
  720. unsigned bufsize = 256;
  721. char *buf;
  722. char *s;
  723. node_t *node;
  724. node_t *wnode = NULL;
  725. int err;
  726. *path = NULL;
  727. err = -ENOMEM;
  728. buf = (char*)malloc(bufsize);
  729. if(buf == NULL)
  730. goto out_err;
  731. s = buf + bufsize - 1;
  732. *s = '\0';
  733. if(name != NULL)
  734. {
  735. s = add_name(&buf,&bufsize,s,name);
  736. err = -ENOMEM;
  737. if(s == NULL)
  738. goto out_free;
  739. }
  740. if(wnodep)
  741. {
  742. assert(need_lock);
  743. wnode = lookup_node(f,nodeid,name);
  744. if(wnode)
  745. {
  746. if(wnode->treelock != 0)
  747. {
  748. if(wnode->treelock > 0)
  749. wnode->treelock += TREELOCK_WAIT_OFFSET;
  750. err = -EAGAIN;
  751. goto out_free;
  752. }
  753. wnode->treelock = TREELOCK_WRITE;
  754. }
  755. }
  756. for(node = get_node(f,nodeid); node->nodeid != FUSE_ROOT_ID; node = node->parent)
  757. {
  758. err = -ESTALE;
  759. if(node->name == NULL || node->parent == NULL)
  760. goto out_unlock;
  761. err = -ENOMEM;
  762. s = add_name(&buf,&bufsize,s,node->name);
  763. if(s == NULL)
  764. goto out_unlock;
  765. if(need_lock)
  766. {
  767. err = -EAGAIN;
  768. if(node->treelock < 0)
  769. goto out_unlock;
  770. node->treelock++;
  771. }
  772. }
  773. if(s[0])
  774. memmove(buf,s,bufsize - (s - buf));
  775. else
  776. strcpy(buf,"/");
  777. *path = buf;
  778. if(wnodep)
  779. *wnodep = wnode;
  780. return 0;
  781. out_unlock:
  782. if(need_lock)
  783. unlock_path(f,nodeid,wnode,node);
  784. out_free:
  785. free(buf);
  786. out_err:
  787. return err;
  788. }
  789. static
  790. int
  791. try_get_path2(struct fuse *f,
  792. uint64_t nodeid1,
  793. const char *name1,
  794. uint64_t nodeid2,
  795. const char *name2,
  796. char **path1,
  797. char **path2,
  798. node_t **wnode1,
  799. node_t **wnode2)
  800. {
  801. int err;
  802. err = try_get_path(f,nodeid1,name1,path1,wnode1,true);
  803. if(!err)
  804. {
  805. err = try_get_path(f,nodeid2,name2,path2,wnode2,true);
  806. if(err)
  807. {
  808. node_t *wn1 = wnode1 ? *wnode1 : NULL;
  809. unlock_path(f,nodeid1,wn1,NULL);
  810. free(*path1);
  811. }
  812. }
  813. return err;
  814. }
  815. static
  816. void
  817. queue_element_wakeup(struct fuse *f,
  818. struct lock_queue_element *qe)
  819. {
  820. int err;
  821. if(!qe->path1)
  822. {
  823. /* Just waiting for it to be unlocked */
  824. if(get_node(f,qe->nodeid1)->treelock == 0)
  825. pthread_cond_signal(&qe->cond);
  826. return;
  827. }
  828. if(qe->done)
  829. return;
  830. if(!qe->path2)
  831. {
  832. err = try_get_path(f,
  833. qe->nodeid1,
  834. qe->name1,
  835. qe->path1,
  836. qe->wnode1,
  837. true);
  838. }
  839. else
  840. {
  841. err = try_get_path2(f,
  842. qe->nodeid1,
  843. qe->name1,
  844. qe->nodeid2,
  845. qe->name2,
  846. qe->path1,
  847. qe->path2,
  848. qe->wnode1,
  849. qe->wnode2);
  850. }
  851. if(err == -EAGAIN)
  852. return;
  853. qe->err = err;
  854. qe->done = true;
  855. pthread_cond_signal(&qe->cond);
  856. }
  857. static
  858. void
  859. wake_up_queued(struct fuse *f)
  860. {
  861. struct lock_queue_element *qe;
  862. for(qe = f->lockq; qe != NULL; qe = qe->next)
  863. queue_element_wakeup(f,qe);
  864. }
  865. static
  866. void
  867. queue_path(struct fuse *f,
  868. struct lock_queue_element *qe)
  869. {
  870. struct lock_queue_element **qp;
  871. qe->done = false;
  872. pthread_cond_init(&qe->cond,NULL);
  873. qe->next = NULL;
  874. for(qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
  875. *qp = qe;
  876. }
  877. static
  878. void
  879. dequeue_path(struct fuse *f,
  880. struct lock_queue_element *qe)
  881. {
  882. struct lock_queue_element **qp;
  883. pthread_cond_destroy(&qe->cond);
  884. for(qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
  885. *qp = qe->next;
  886. }
  887. static
  888. int
  889. wait_path(struct fuse *f,
  890. struct lock_queue_element *qe)
  891. {
  892. queue_path(f,qe);
  893. do
  894. {
  895. pthread_cond_wait(&qe->cond,&f->lock);
  896. } while(!qe->done);
  897. dequeue_path(f,qe);
  898. return qe->err;
  899. }
  900. static
  901. int
  902. get_path_common(struct fuse *f,
  903. uint64_t nodeid,
  904. const char *name,
  905. char **path,
  906. node_t **wnode)
  907. {
  908. int err;
  909. pthread_mutex_lock(&f->lock);
  910. err = try_get_path(f,nodeid,name,path,wnode,true);
  911. if(err == -EAGAIN)
  912. {
  913. struct lock_queue_element qe = {0};
  914. qe.nodeid1 = nodeid;
  915. qe.name1 = name;
  916. qe.path1 = path;
  917. qe.wnode1 = wnode;
  918. err = wait_path(f,&qe);
  919. }
  920. pthread_mutex_unlock(&f->lock);
  921. return err;
  922. }
  923. static
  924. int
  925. get_path(struct fuse *f,
  926. uint64_t nodeid,
  927. char **path)
  928. {
  929. return get_path_common(f,nodeid,NULL,path,NULL);
  930. }
  931. static
  932. int
  933. get_path_name(struct fuse *f,
  934. uint64_t nodeid,
  935. const char *name,
  936. char **path)
  937. {
  938. return get_path_common(f,nodeid,name,path,NULL);
  939. }
  940. static
  941. int
  942. get_path_wrlock(struct fuse *f,
  943. uint64_t nodeid,
  944. const char *name,
  945. char **path,
  946. node_t **wnode)
  947. {
  948. return get_path_common(f,nodeid,name,path,wnode);
  949. }
  950. static
  951. int
  952. get_path2(struct fuse *f,
  953. uint64_t nodeid1,
  954. const char *name1,
  955. uint64_t nodeid2,
  956. const char *name2,
  957. char **path1,
  958. char **path2,
  959. node_t **wnode1,
  960. node_t **wnode2)
  961. {
  962. int err;
  963. pthread_mutex_lock(&f->lock);
  964. err = try_get_path2(f,nodeid1,name1,nodeid2,name2,
  965. path1,path2,wnode1,wnode2);
  966. if(err == -EAGAIN)
  967. {
  968. struct lock_queue_element qe = {0};
  969. qe.nodeid1 = nodeid1;
  970. qe.name1 = name1;
  971. qe.path1 = path1;
  972. qe.wnode1 = wnode1;
  973. qe.nodeid2 = nodeid2;
  974. qe.name2 = name2;
  975. qe.path2 = path2;
  976. qe.wnode2 = wnode2;
  977. err = wait_path(f,&qe);
  978. }
  979. pthread_mutex_unlock(&f->lock);
  980. return err;
  981. }
  982. static
  983. void
  984. free_path_wrlock(struct fuse *f,
  985. uint64_t nodeid,
  986. node_t *wnode,
  987. char *path)
  988. {
  989. pthread_mutex_lock(&f->lock);
  990. unlock_path(f,nodeid,wnode,NULL);
  991. if(f->lockq)
  992. wake_up_queued(f);
  993. pthread_mutex_unlock(&f->lock);
  994. free(path);
  995. }
  996. static
  997. void
  998. free_path(struct fuse *f,
  999. uint64_t nodeid,
  1000. char *path)
  1001. {
  1002. if(path)
  1003. free_path_wrlock(f,nodeid,NULL,path);
  1004. }
  1005. static
  1006. void
  1007. free_path2(struct fuse *f,
  1008. uint64_t nodeid1,
  1009. uint64_t nodeid2,
  1010. node_t *wnode1,
  1011. node_t *wnode2,
  1012. char *path1,
  1013. char *path2)
  1014. {
  1015. pthread_mutex_lock(&f->lock);
  1016. unlock_path(f,nodeid1,wnode1,NULL);
  1017. unlock_path(f,nodeid2,wnode2,NULL);
  1018. wake_up_queued(f);
  1019. pthread_mutex_unlock(&f->lock);
  1020. free(path1);
  1021. free(path2);
  1022. }
  1023. static
  1024. void
  1025. forget_node(struct fuse *f,
  1026. const uint64_t nodeid,
  1027. const uint64_t nlookup)
  1028. {
  1029. node_t *node;
  1030. if(nodeid == FUSE_ROOT_ID)
  1031. return;
  1032. pthread_mutex_lock(&f->lock);
  1033. node = get_node(f,nodeid);
  1034. /*
  1035. * Node may still be locked due to interrupt idiocy in open,
  1036. * create and opendir
  1037. */
  1038. while(node->nlookup == nlookup && node->treelock)
  1039. {
  1040. struct lock_queue_element qe = {0};
  1041. qe.nodeid1 = nodeid;
  1042. queue_path(f,&qe);
  1043. do
  1044. {
  1045. pthread_cond_wait(&qe.cond,&f->lock);
  1046. }
  1047. while((node->nlookup == nlookup) && node->treelock);
  1048. dequeue_path(f,&qe);
  1049. }
  1050. assert(node->nlookup >= nlookup);
  1051. node->nlookup -= nlookup;
  1052. if(node->nlookup == 0)
  1053. {
  1054. unref_node(f,node);
  1055. }
  1056. else if((node->nlookup == 1) && remember_nodes(f))
  1057. {
  1058. remembered_node_t fn;
  1059. fn.node = node;
  1060. fn.time = current_time();
  1061. kv_push(remembered_node_t,f->remembered_nodes,fn);
  1062. }
  1063. pthread_mutex_unlock(&f->lock);
  1064. }
  1065. static
  1066. void
  1067. unlink_node(struct fuse *f,
  1068. node_t *node)
  1069. {
  1070. if(remember_nodes(f))
  1071. {
  1072. assert(node->nlookup > 1);
  1073. node->nlookup--;
  1074. }
  1075. unhash_name(f,node);
  1076. }
  1077. static
  1078. void
  1079. remove_node(struct fuse *f,
  1080. uint64_t dir,
  1081. const char *name)
  1082. {
  1083. node_t *node;
  1084. pthread_mutex_lock(&f->lock);
  1085. node = lookup_node(f,dir,name);
  1086. if(node != NULL)
  1087. unlink_node(f,node);
  1088. pthread_mutex_unlock(&f->lock);
  1089. }
  1090. static
  1091. int
  1092. rename_node(struct fuse *f,
  1093. uint64_t olddir,
  1094. const char *oldname,
  1095. uint64_t newdir,
  1096. const char *newname)
  1097. {
  1098. node_t *node;
  1099. node_t *newnode;
  1100. int err = 0;
  1101. pthread_mutex_lock(&f->lock);
  1102. node = lookup_node(f,olddir,oldname);
  1103. newnode = lookup_node(f,newdir,newname);
  1104. if(node == NULL)
  1105. goto out;
  1106. if(newnode != NULL)
  1107. unlink_node(f,newnode);
  1108. unhash_name(f,node);
  1109. if(hash_name(f,node,newdir,newname) == -1)
  1110. {
  1111. err = -ENOMEM;
  1112. goto out;
  1113. }
  1114. out:
  1115. pthread_mutex_unlock(&f->lock);
  1116. return err;
  1117. }
  1118. static
  1119. void
  1120. set_stat(struct fuse *f,
  1121. uint64_t nodeid,
  1122. struct stat *stbuf)
  1123. {
  1124. if(f->conf.set_mode)
  1125. stbuf->st_mode = (stbuf->st_mode & S_IFMT) | (0777 & ~f->conf.umask);
  1126. if(f->conf.set_uid)
  1127. stbuf->st_uid = f->conf.uid;
  1128. if(f->conf.set_gid)
  1129. stbuf->st_gid = f->conf.gid;
  1130. }
  1131. static
  1132. struct fuse*
  1133. req_fuse(fuse_req_t req)
  1134. {
  1135. return (struct fuse*)fuse_req_userdata(req);
  1136. }
  1137. static
  1138. int
  1139. node_open(const node_t *node_)
  1140. {
  1141. return ((node_ != NULL) && (node_->open_count > 0));
  1142. }
  1143. static
  1144. void
  1145. update_stat(node_t *node_,
  1146. const struct stat *stnew_)
  1147. {
  1148. uint32_t crc32b;
  1149. crc32b = stat_crc32b(stnew_);
  1150. if(node_->is_stat_cache_valid && (crc32b != node_->stat_crc32b))
  1151. node_->is_stat_cache_valid = 0;
  1152. node_->stat_crc32b = crc32b;
  1153. }
  1154. static
  1155. int
  1156. set_path_info(struct fuse *f,
  1157. uint64_t nodeid,
  1158. const char *name,
  1159. struct fuse_entry_param *e)
  1160. {
  1161. node_t *node;
  1162. node = find_node(f,nodeid,name);
  1163. if(node == NULL)
  1164. return -ENOMEM;
  1165. e->ino = node->nodeid;
  1166. e->generation = ((e->ino == FUSE_ROOT_ID) ? 0 : f->nodeid_gen.generation);
  1167. pthread_mutex_lock(&f->lock);
  1168. update_stat(node,&e->attr);
  1169. pthread_mutex_unlock(&f->lock);
  1170. set_stat(f,e->ino,&e->attr);
  1171. return 0;
  1172. }
  1173. /*
  1174. lookup requests only come in for FUSE_ROOT_ID when a "parent of
  1175. child of root node" request is made. This can happen when using
  1176. EXPORT_SUPPORT=true and a file handle is used to keep a reference to
  1177. a node which has been forgotten. Mostly a NFS concern but not
  1178. excluslively. Root node always has a nodeid of 1 and generation of
  1179. 0. To ensure this set_path_info() explicitly ensures the root id has
  1180. a generation of 0.
  1181. */
  1182. static
  1183. int
  1184. lookup_path(struct fuse *f,
  1185. uint64_t nodeid,
  1186. const char *name,
  1187. const char *path,
  1188. struct fuse_entry_param *e,
  1189. fuse_file_info_t *fi)
  1190. {
  1191. int rv;
  1192. memset(e,0,sizeof(struct fuse_entry_param));
  1193. rv = ((fi == NULL) ?
  1194. f->fs->op.getattr(path,&e->attr,&e->timeout) :
  1195. f->fs->op.fgetattr(fi,&e->attr,&e->timeout));
  1196. if(rv)
  1197. return rv;
  1198. return set_path_info(f,nodeid,name,e);
  1199. }
  1200. static
  1201. struct fuse_context_i*
  1202. fuse_get_context_internal(void)
  1203. {
  1204. struct fuse_context_i *c;
  1205. c = (struct fuse_context_i *)pthread_getspecific(fuse_context_key);
  1206. if(c == NULL)
  1207. {
  1208. c = (struct fuse_context_i*)calloc(1,sizeof(struct fuse_context_i));
  1209. if(c == NULL)
  1210. {
  1211. /* This is hard to deal with properly,so just
  1212. abort. If memory is so low that the
  1213. context cannot be allocated,there's not
  1214. much hope for the filesystem anyway */
  1215. fprintf(stderr,"fuse: failed to allocate thread specific data\n");
  1216. abort();
  1217. }
  1218. pthread_setspecific(fuse_context_key,c);
  1219. }
  1220. return c;
  1221. }
  1222. static
  1223. void
  1224. fuse_freecontext(void *data)
  1225. {
  1226. free(data);
  1227. }
  1228. static
  1229. int
  1230. fuse_create_context_key(void)
  1231. {
  1232. int err = 0;
  1233. pthread_mutex_lock(&fuse_context_lock);
  1234. if(!fuse_context_ref)
  1235. {
  1236. err = pthread_key_create(&fuse_context_key,fuse_freecontext);
  1237. if(err)
  1238. {
  1239. fprintf(stderr,"fuse: failed to create thread specific key: %s\n",
  1240. strerror(err));
  1241. pthread_mutex_unlock(&fuse_context_lock);
  1242. return -1;
  1243. }
  1244. }
  1245. fuse_context_ref++;
  1246. pthread_mutex_unlock(&fuse_context_lock);
  1247. return 0;
  1248. }
  1249. static
  1250. void
  1251. fuse_delete_context_key(void)
  1252. {
  1253. pthread_mutex_lock(&fuse_context_lock);
  1254. fuse_context_ref--;
  1255. if(!fuse_context_ref)
  1256. {
  1257. free(pthread_getspecific(fuse_context_key));
  1258. pthread_key_delete(fuse_context_key);
  1259. }
  1260. pthread_mutex_unlock(&fuse_context_lock);
  1261. }
  1262. static
  1263. struct fuse*
  1264. req_fuse_prepare(fuse_req_t req)
  1265. {
  1266. struct fuse_context_i *c = fuse_get_context_internal();
  1267. const struct fuse_ctx *ctx = fuse_req_ctx(req);
  1268. c->req = req;
  1269. c->ctx.fuse = req_fuse(req);
  1270. c->ctx.uid = ctx->uid;
  1271. c->ctx.gid = ctx->gid;
  1272. c->ctx.pid = ctx->pid;
  1273. c->ctx.umask = ctx->umask;
  1274. return c->ctx.fuse;
  1275. }
  1276. static
  1277. void
  1278. reply_entry(fuse_req_t req,
  1279. const struct fuse_entry_param *e,
  1280. int err)
  1281. {
  1282. if(!err)
  1283. {
  1284. struct fuse *f = req_fuse(req);
  1285. if(fuse_reply_entry(req,e) == -ENOENT)
  1286. {
  1287. /* Skip forget for negative result */
  1288. if(e->ino != 0)
  1289. forget_node(f,e->ino,1);
  1290. }
  1291. }
  1292. else
  1293. {
  1294. fuse_reply_err(req,err);
  1295. }
  1296. }
  1297. static
  1298. void
  1299. fuse_lib_init(void *data,
  1300. struct fuse_conn_info *conn)
  1301. {
  1302. struct fuse *f = (struct fuse *)data;
  1303. struct fuse_context_i *c = fuse_get_context_internal();
  1304. memset(c,0,sizeof(*c));
  1305. c->ctx.fuse = f;
  1306. f->fs->op.init(conn);
  1307. }
  1308. static
  1309. void
  1310. fuse_lib_destroy(void *data)
  1311. {
  1312. struct fuse *f = (struct fuse *)data;
  1313. struct fuse_context_i *c = fuse_get_context_internal();
  1314. memset(c,0,sizeof(*c));
  1315. c->ctx.fuse = f;
  1316. f->fs->op.destroy();
  1317. free(f->fs);
  1318. f->fs = NULL;
  1319. }
  1320. static
  1321. void
  1322. fuse_lib_lookup(fuse_req_t req,
  1323. struct fuse_in_header *hdr_)
  1324. {
  1325. int err;
  1326. uint64_t nodeid;
  1327. char *path;
  1328. const char *name;
  1329. struct fuse *f;
  1330. node_t *dot = NULL;
  1331. struct fuse_entry_param e = {0};
  1332. name = (const char*)fuse_hdr_arg(hdr_);
  1333. nodeid = hdr_->nodeid;
  1334. f = req_fuse_prepare(req);
  1335. if(name[0] == '.')
  1336. {
  1337. if(name[1] == '\0')
  1338. {
  1339. name = NULL;
  1340. pthread_mutex_lock(&f->lock);
  1341. dot = get_node_nocheck(f,nodeid);
  1342. if(dot == NULL)
  1343. {
  1344. pthread_mutex_unlock(&f->lock);
  1345. reply_entry(req,&e,-ESTALE);
  1346. return;
  1347. }
  1348. dot->refctr++;
  1349. pthread_mutex_unlock(&f->lock);
  1350. }
  1351. else if((name[1] == '.') && (name[2] == '\0'))
  1352. {
  1353. if(nodeid == 1)
  1354. {
  1355. reply_entry(req,&e,-ENOENT);
  1356. return;
  1357. }
  1358. name = NULL;
  1359. pthread_mutex_lock(&f->lock);
  1360. nodeid = get_node(f,nodeid)->parent->nodeid;
  1361. pthread_mutex_unlock(&f->lock);
  1362. }
  1363. }
  1364. err = get_path_name(f,nodeid,name,&path);
  1365. if(!err)
  1366. {
  1367. err = lookup_path(f,nodeid,name,path,&e,NULL);
  1368. if(err == -ENOENT)
  1369. {
  1370. e.ino = 0;
  1371. err = 0;
  1372. }
  1373. free_path(f,nodeid,path);
  1374. }
  1375. if(dot)
  1376. {
  1377. pthread_mutex_lock(&f->lock);
  1378. unref_node(f,dot);
  1379. pthread_mutex_unlock(&f->lock);
  1380. }
  1381. reply_entry(req,&e,err);
  1382. }
  1383. static
  1384. void
  1385. fuse_lib_forget(fuse_req_t req,
  1386. struct fuse_in_header *hdr_)
  1387. {
  1388. struct fuse *f;
  1389. struct fuse_forget_in *arg;
  1390. f = req_fuse(req);
  1391. arg = (fuse_forget_in*)fuse_hdr_arg(hdr_);
  1392. forget_node(f,hdr_->nodeid,arg->nlookup);
  1393. fuse_reply_none(req);
  1394. }
  1395. static
  1396. void
  1397. fuse_lib_forget_multi(fuse_req_t req,
  1398. struct fuse_in_header *hdr_)
  1399. {
  1400. struct fuse *f;
  1401. struct fuse_batch_forget_in *arg;
  1402. struct fuse_forget_one *entry;
  1403. f = req_fuse(req);
  1404. arg = (fuse_batch_forget_in*)fuse_hdr_arg(hdr_);
  1405. entry = (fuse_forget_one*)PARAM(arg);
  1406. for(uint32_t i = 0; i < arg->count; i++)
  1407. {
  1408. forget_node(f,
  1409. entry[i].nodeid,
  1410. entry[i].nlookup);
  1411. }
  1412. fuse_reply_none(req);
  1413. }
  1414. static
  1415. void
  1416. fuse_lib_getattr(fuse_req_t req,
  1417. struct fuse_in_header *hdr_)
  1418. {
  1419. int err;
  1420. char *path;
  1421. struct fuse *f;
  1422. struct stat buf;
  1423. node_t *node;
  1424. fuse_timeouts_t timeout;
  1425. fuse_file_info_t ffi = {0};
  1426. const struct fuse_getattr_in *arg;
  1427. arg = (fuse_getattr_in*)fuse_hdr_arg(hdr_);
  1428. f = req_fuse_prepare(req);
  1429. if(arg->getattr_flags & FUSE_GETATTR_FH)
  1430. {
  1431. ffi.fh = arg->fh;
  1432. }
  1433. else
  1434. {
  1435. pthread_mutex_lock(&f->lock);
  1436. node = get_node(f,hdr_->nodeid);
  1437. if(node->hidden_fh)
  1438. ffi.fh = node->hidden_fh;
  1439. pthread_mutex_unlock(&f->lock);
  1440. }
  1441. memset(&buf,0,sizeof(buf));
  1442. err = 0;
  1443. path = NULL;
  1444. if(ffi.fh == 0)
  1445. err = get_path(f,hdr_->nodeid,&path);
  1446. if(!err)
  1447. {
  1448. err = ((ffi.fh == 0) ?
  1449. f->fs->op.getattr(path,&buf,&timeout) :
  1450. f->fs->op.fgetattr(&ffi,&buf,&timeout));
  1451. free_path(f,hdr_->nodeid,path);
  1452. }
  1453. if(!err)
  1454. {
  1455. pthread_mutex_lock(&f->lock);
  1456. node = get_node(f,hdr_->nodeid);
  1457. update_stat(node,&buf);
  1458. pthread_mutex_unlock(&f->lock);
  1459. set_stat(f,hdr_->nodeid,&buf);
  1460. fuse_reply_attr(req,&buf,timeout.attr);
  1461. }
  1462. else
  1463. {
  1464. fuse_reply_err(req,err);
  1465. }
  1466. }
  1467. static
  1468. void
  1469. fuse_lib_setattr(fuse_req_t req,
  1470. struct fuse_in_header *hdr_)
  1471. {
  1472. struct fuse *f = req_fuse_prepare(req);
  1473. struct stat stbuf = {0};
  1474. char *path;
  1475. int err;
  1476. node_t *node;
  1477. fuse_timeouts_t timeout;
  1478. fuse_file_info_t *fi;
  1479. fuse_file_info_t ffi = {0};
  1480. struct fuse_setattr_in *arg;
  1481. arg = (fuse_setattr_in*)fuse_hdr_arg(hdr_);
  1482. fi = NULL;
  1483. if(arg->valid & FATTR_FH)
  1484. {
  1485. fi = &ffi;
  1486. fi->fh = arg->fh;
  1487. }
  1488. else
  1489. {
  1490. pthread_mutex_lock(&f->lock);
  1491. node = get_node(f,hdr_->nodeid);
  1492. if(node->hidden_fh)
  1493. {
  1494. fi = &ffi;
  1495. fi->fh = node->hidden_fh;
  1496. }
  1497. pthread_mutex_unlock(&f->lock);
  1498. }
  1499. err = 0;
  1500. path = NULL;
  1501. if(fi == NULL)
  1502. err = get_path(f,hdr_->nodeid,&path);
  1503. if(!err)
  1504. {
  1505. err = 0;
  1506. if(!err && (arg->valid & FATTR_MODE))
  1507. err = ((fi == NULL) ?
  1508. f->fs->op.chmod(path,arg->mode) :
  1509. f->fs->op.fchmod(fi,arg->mode));
  1510. if(!err && (arg->valid & (FATTR_UID | FATTR_GID)))
  1511. {
  1512. uid_t uid = ((arg->valid & FATTR_UID) ? arg->uid : (uid_t)-1);
  1513. gid_t gid = ((arg->valid & FATTR_GID) ? arg->gid : (gid_t)-1);
  1514. err = ((fi == NULL) ?
  1515. f->fs->op.chown(path,uid,gid) :
  1516. f->fs->op.fchown(fi,uid,gid));
  1517. }
  1518. if(!err && (arg->valid & FATTR_SIZE))
  1519. err = ((fi == NULL) ?
  1520. f->fs->op.truncate(path,arg->size) :
  1521. f->fs->op.ftruncate(fi,arg->size));
  1522. #ifdef HAVE_UTIMENSAT
  1523. if(!err && (arg->valid & (FATTR_ATIME | FATTR_MTIME)))
  1524. {
  1525. struct timespec tv[2];
  1526. tv[0].tv_sec = 0;
  1527. tv[1].tv_sec = 0;
  1528. tv[0].tv_nsec = UTIME_OMIT;
  1529. tv[1].tv_nsec = UTIME_OMIT;
  1530. if(arg->valid & FATTR_ATIME_NOW)
  1531. tv[0].tv_nsec = UTIME_NOW;
  1532. else if(arg->valid & FATTR_ATIME)
  1533. tv[0] = (struct timespec){ static_cast<time_t>(arg->atime), arg->atimensec };
  1534. if(arg->valid & FATTR_MTIME_NOW)
  1535. tv[1].tv_nsec = UTIME_NOW;
  1536. else if(arg->valid & FATTR_MTIME)
  1537. tv[1] = (struct timespec){ static_cast<time_t>(arg->mtime), arg->mtimensec };
  1538. err = ((fi == NULL) ?
  1539. f->fs->op.utimens(path,tv) :
  1540. f->fs->op.futimens(fi,tv));
  1541. }
  1542. else
  1543. #endif
  1544. if(!err && ((arg->valid & (FATTR_ATIME|FATTR_MTIME)) == (FATTR_ATIME|FATTR_MTIME)))
  1545. {
  1546. struct timespec tv[2];
  1547. tv[0].tv_sec = arg->atime;
  1548. tv[0].tv_nsec = arg->atimensec;
  1549. tv[1].tv_sec = arg->mtime;
  1550. tv[1].tv_nsec = arg->mtimensec;
  1551. err = ((fi == NULL) ?
  1552. f->fs->op.utimens(path,tv) :
  1553. f->fs->op.futimens(fi,tv));
  1554. }
  1555. if(!err)
  1556. err = ((fi == NULL) ?
  1557. f->fs->op.getattr(path,&stbuf,&timeout) :
  1558. f->fs->op.fgetattr(fi,&stbuf,&timeout));
  1559. free_path(f,hdr_->nodeid,path);
  1560. }
  1561. if(!err)
  1562. {
  1563. pthread_mutex_lock(&f->lock);
  1564. update_stat(get_node(f,hdr_->nodeid),&stbuf);
  1565. pthread_mutex_unlock(&f->lock);
  1566. set_stat(f,hdr_->nodeid,&stbuf);
  1567. fuse_reply_attr(req,&stbuf,timeout.attr);
  1568. }
  1569. else
  1570. {
  1571. fuse_reply_err(req,err);
  1572. }
  1573. }
  1574. static
  1575. void
  1576. fuse_lib_access(fuse_req_t req,
  1577. struct fuse_in_header *hdr_)
  1578. {
  1579. int err;
  1580. char *path;
  1581. struct fuse *f;
  1582. struct fuse_access_in *arg;
  1583. arg = (fuse_access_in*)fuse_hdr_arg(hdr_);
  1584. f = req_fuse_prepare(req);
  1585. err = get_path(f,hdr_->nodeid,&path);
  1586. if(!err)
  1587. {
  1588. err = f->fs->op.access(path,arg->mask);
  1589. free_path(f,hdr_->nodeid,path);
  1590. }
  1591. fuse_reply_err(req,err);
  1592. }
  1593. static
  1594. void
  1595. fuse_lib_readlink(fuse_req_t req,
  1596. struct fuse_in_header *hdr_)
  1597. {
  1598. int err;
  1599. char *path;
  1600. struct fuse *f;
  1601. char linkname[PATH_MAX + 1];
  1602. f = req_fuse_prepare(req);
  1603. err = get_path(f,hdr_->nodeid,&path);
  1604. if(!err)
  1605. {
  1606. err = f->fs->op.readlink(path,linkname,sizeof(linkname));
  1607. free_path(f,hdr_->nodeid,path);
  1608. }
  1609. if(!err)
  1610. {
  1611. linkname[PATH_MAX] = '\0';
  1612. fuse_reply_readlink(req,linkname);
  1613. }
  1614. else
  1615. {
  1616. fuse_reply_err(req,err);
  1617. }
  1618. }
  1619. static
  1620. void
  1621. fuse_lib_mknod(fuse_req_t req,
  1622. struct fuse_in_header *hdr_)
  1623. {
  1624. int err;
  1625. char *path;
  1626. struct fuse *f;
  1627. const char* name;
  1628. struct fuse_entry_param e;
  1629. struct fuse_mknod_in *arg;
  1630. arg = (fuse_mknod_in*)fuse_hdr_arg(hdr_);
  1631. name = (const char*)PARAM(arg);
  1632. if(req->f->conn.proto_minor >= 12)
  1633. req->ctx.umask = arg->umask;
  1634. else
  1635. name = (char*)arg + FUSE_COMPAT_MKNOD_IN_SIZE;
  1636. f = req_fuse_prepare(req);
  1637. err = get_path_name(f,hdr_->nodeid,name,&path);
  1638. if(!err)
  1639. {
  1640. err = -ENOSYS;
  1641. if(S_ISREG(arg->mode))
  1642. {
  1643. fuse_file_info_t fi;
  1644. memset(&fi,0,sizeof(fi));
  1645. fi.flags = O_CREAT | O_EXCL | O_WRONLY;
  1646. err = f->fs->op.create(path,arg->mode,&fi);
  1647. if(!err)
  1648. {
  1649. err = lookup_path(f,hdr_->nodeid,name,path,&e,&fi);
  1650. f->fs->op.release(&fi);
  1651. }
  1652. }
  1653. if(err == -ENOSYS)
  1654. {
  1655. err = f->fs->op.mknod(path,arg->mode,arg->rdev);
  1656. if(!err)
  1657. err = lookup_path(f,hdr_->nodeid,name,path,&e,NULL);
  1658. }
  1659. free_path(f,hdr_->nodeid,path);
  1660. }
  1661. reply_entry(req,&e,err);
  1662. }
  1663. static
  1664. void
  1665. fuse_lib_mkdir(fuse_req_t req,
  1666. struct fuse_in_header *hdr_)
  1667. {
  1668. int err;
  1669. char *path;
  1670. struct fuse *f;
  1671. const char *name;
  1672. struct fuse_entry_param e;
  1673. struct fuse_mkdir_in *arg;
  1674. arg = (fuse_mkdir_in*)fuse_hdr_arg(hdr_);
  1675. name = (const char*)PARAM(arg);
  1676. if(req->f->conn.proto_minor >= 12)
  1677. req->ctx.umask = arg->umask;
  1678. f = req_fuse_prepare(req);
  1679. err = get_path_name(f,hdr_->nodeid,name,&path);
  1680. if(!err)
  1681. {
  1682. err = f->fs->op.mkdir(path,arg->mode);
  1683. if(!err)
  1684. err = lookup_path(f,hdr_->nodeid,name,path,&e,NULL);
  1685. free_path(f,hdr_->nodeid,path);
  1686. }
  1687. reply_entry(req,&e,err);
  1688. }
  1689. static
  1690. void
  1691. fuse_lib_unlink(fuse_req_t req,
  1692. struct fuse_in_header *hdr_)
  1693. {
  1694. int err;
  1695. char *path;
  1696. struct fuse *f;
  1697. const char *name;
  1698. node_t *wnode;
  1699. name = (const char*)PARAM(hdr_);
  1700. f = req_fuse_prepare(req);
  1701. err = get_path_wrlock(f,hdr_->nodeid,name,&path,&wnode);
  1702. if(!err)
  1703. {
  1704. pthread_mutex_lock(&f->lock);
  1705. if(node_open(wnode))
  1706. err = f->fs->op.prepare_hide(path,&wnode->hidden_fh);
  1707. pthread_mutex_unlock(&f->lock);
  1708. err = f->fs->op.unlink(path);
  1709. if(!err)
  1710. remove_node(f,hdr_->nodeid,name);
  1711. free_path_wrlock(f,hdr_->nodeid,wnode,path);
  1712. }
  1713. fuse_reply_err(req,err);
  1714. }
  1715. static
  1716. void
  1717. fuse_lib_rmdir(fuse_req_t req,
  1718. struct fuse_in_header *hdr_)
  1719. {
  1720. int err;
  1721. char *path;
  1722. struct fuse *f;
  1723. const char *name;
  1724. node_t *wnode;
  1725. name = (const char*)PARAM(hdr_);
  1726. f = req_fuse_prepare(req);
  1727. err = get_path_wrlock(f,hdr_->nodeid,name,&path,&wnode);
  1728. if(!err)
  1729. {
  1730. err = f->fs->op.rmdir(path);
  1731. if(!err)
  1732. remove_node(f,hdr_->nodeid,name);
  1733. free_path_wrlock(f,hdr_->nodeid,wnode,path);
  1734. }
  1735. fuse_reply_err(req,err);
  1736. }
  1737. static
  1738. void
  1739. fuse_lib_symlink(fuse_req_t req_,
  1740. struct fuse_in_header *hdr_)
  1741. {
  1742. int rv;
  1743. char *path;
  1744. struct fuse *f;
  1745. const char *name;
  1746. const char *linkname;
  1747. struct fuse_entry_param e = {0};
  1748. name = (const char*)fuse_hdr_arg(hdr_);
  1749. linkname = (name + strlen(name) + 1);
  1750. f = req_fuse_prepare(req_);
  1751. rv = get_path_name(f,hdr_->nodeid,name,&path);
  1752. if(rv == 0)
  1753. {
  1754. rv = f->fs->op.symlink(linkname,path,&e.attr,&e.timeout);
  1755. if(rv == 0)
  1756. rv = set_path_info(f,hdr_->nodeid,name,&e);
  1757. free_path(f,hdr_->nodeid,path);
  1758. }
  1759. reply_entry(req_,&e,rv);
  1760. }
  1761. static
  1762. void
  1763. fuse_lib_rename(fuse_req_t req,
  1764. struct fuse_in_header *hdr_)
  1765. {
  1766. int err;
  1767. struct fuse *f;
  1768. char *oldpath;
  1769. char *newpath;
  1770. const char *oldname;
  1771. const char *newname;
  1772. node_t *wnode1;
  1773. node_t *wnode2;
  1774. struct fuse_rename_in *arg;
  1775. arg = (fuse_rename_in*)fuse_hdr_arg(hdr_);
  1776. oldname = (const char*)PARAM(arg);
  1777. newname = (oldname + strlen(oldname) + 1);
  1778. f = req_fuse_prepare(req);
  1779. err = get_path2(f,hdr_->nodeid,oldname,arg->newdir,newname,
  1780. &oldpath,&newpath,&wnode1,&wnode2);
  1781. if(!err)
  1782. {
  1783. pthread_mutex_lock(&f->lock);
  1784. if(node_open(wnode2))
  1785. err = f->fs->op.prepare_hide(newpath,&wnode2->hidden_fh);
  1786. pthread_mutex_unlock(&f->lock);
  1787. err = f->fs->op.rename(oldpath,newpath);
  1788. if(!err)
  1789. err = rename_node(f,hdr_->nodeid,oldname,arg->newdir,newname);
  1790. free_path2(f,hdr_->nodeid,arg->newdir,wnode1,wnode2,oldpath,newpath);
  1791. }
  1792. fuse_reply_err(req,err);
  1793. }
  1794. static
  1795. void
  1796. fuse_lib_link(fuse_req_t req,
  1797. struct fuse_in_header *hdr_)
  1798. {
  1799. int rv;
  1800. char *oldpath;
  1801. char *newpath;
  1802. struct fuse *f;
  1803. const char *newname;
  1804. struct fuse_link_in *arg;
  1805. struct fuse_entry_param e = {0};
  1806. arg = (fuse_link_in*)fuse_hdr_arg(hdr_);
  1807. newname = (const char*)PARAM(arg);
  1808. f = req_fuse_prepare(req);
  1809. rv = get_path2(f,
  1810. arg->oldnodeid,NULL,
  1811. hdr_->nodeid,newname,
  1812. &oldpath,&newpath,NULL,NULL);
  1813. if(!rv)
  1814. {
  1815. rv = f->fs->op.link(oldpath,newpath,&e.attr,&e.timeout);
  1816. if(rv == 0)
  1817. rv = set_path_info(f,hdr_->nodeid,newname,&e);
  1818. free_path2(f,arg->oldnodeid,hdr_->nodeid,NULL,NULL,oldpath,newpath);
  1819. }
  1820. reply_entry(req,&e,rv);
  1821. }
  1822. static
  1823. void
  1824. fuse_do_release(struct fuse *f,
  1825. uint64_t ino,
  1826. fuse_file_info_t *fi)
  1827. {
  1828. uint64_t fh;
  1829. node_t *node;
  1830. fh = 0;
  1831. f->fs->op.release(fi);
  1832. pthread_mutex_lock(&f->lock);
  1833. {
  1834. node = get_node(f,ino);
  1835. assert(node->open_count > 0);
  1836. node->open_count--;
  1837. if(node->hidden_fh && (node->open_count == 0))
  1838. {
  1839. fh = node->hidden_fh;
  1840. node->hidden_fh = 0;
  1841. }
  1842. }
  1843. pthread_mutex_unlock(&f->lock);
  1844. if(fh)
  1845. f->fs->op.free_hide(fh);
  1846. }
  1847. static
  1848. void
  1849. fuse_lib_create(fuse_req_t req,
  1850. struct fuse_in_header *hdr_)
  1851. {
  1852. int err;
  1853. char *path;
  1854. struct fuse *f;
  1855. const char *name;
  1856. fuse_file_info_t ffi = {0};
  1857. struct fuse_entry_param e;
  1858. struct fuse_create_in *arg;
  1859. arg = (fuse_create_in*)fuse_hdr_arg(hdr_);
  1860. name = (const char*)PARAM(arg);
  1861. ffi.flags = arg->flags;
  1862. if(req->f->conn.proto_minor >= 12)
  1863. req->ctx.umask = arg->umask;
  1864. else
  1865. name = (char*)arg + sizeof(struct fuse_open_in);
  1866. f = req_fuse_prepare(req);
  1867. err = get_path_name(f,hdr_->nodeid,name,&path);
  1868. if(!err)
  1869. {
  1870. err = f->fs->op.create(path,arg->mode,&ffi);
  1871. if(!err)
  1872. {
  1873. err = lookup_path(f,hdr_->nodeid,name,path,&e,&ffi);
  1874. if(err)
  1875. {
  1876. f->fs->op.release(&ffi);
  1877. }
  1878. else if(!S_ISREG(e.attr.st_mode))
  1879. {
  1880. err = -EIO;
  1881. f->fs->op.release(&ffi);
  1882. forget_node(f,e.ino,1);
  1883. }
  1884. }
  1885. }
  1886. if(!err)
  1887. {
  1888. pthread_mutex_lock(&f->lock);
  1889. get_node(f,e.ino)->open_count++;
  1890. pthread_mutex_unlock(&f->lock);
  1891. if(fuse_reply_create(req,&e,&ffi) == -ENOENT)
  1892. {
  1893. /* The open syscall was interrupted,so it
  1894. must be cancelled */
  1895. fuse_do_release(f,e.ino,&ffi);
  1896. forget_node(f,e.ino,1);
  1897. }
  1898. }
  1899. else
  1900. {
  1901. fuse_reply_err(req,err);
  1902. }
  1903. free_path(f,hdr_->nodeid,path);
  1904. }
  1905. static
  1906. void
  1907. open_auto_cache(struct fuse *f,
  1908. uint64_t ino,
  1909. const char *path,
  1910. fuse_file_info_t *fi)
  1911. {
  1912. node_t *node;
  1913. fuse_timeouts_t timeout;
  1914. pthread_mutex_lock(&f->lock);
  1915. node = get_node(f,ino);
  1916. if(node->is_stat_cache_valid)
  1917. {
  1918. int err;
  1919. struct stat stbuf;
  1920. pthread_mutex_unlock(&f->lock);
  1921. err = f->fs->op.fgetattr(fi,&stbuf,&timeout);
  1922. pthread_mutex_lock(&f->lock);
  1923. if(!err)
  1924. update_stat(node,&stbuf);
  1925. else
  1926. node->is_stat_cache_valid = 0;
  1927. }
  1928. if(node->is_stat_cache_valid)
  1929. fi->keep_cache = 1;
  1930. node->is_stat_cache_valid = 1;
  1931. pthread_mutex_unlock(&f->lock);
  1932. }
  1933. static
  1934. void
  1935. fuse_lib_open(fuse_req_t req,
  1936. struct fuse_in_header *hdr_)
  1937. {
  1938. int err;
  1939. char *path;
  1940. struct fuse *f;
  1941. fuse_file_info_t ffi = {0};
  1942. struct fuse_open_in *arg;
  1943. arg = (fuse_open_in*)fuse_hdr_arg(hdr_);
  1944. ffi.flags = arg->flags;
  1945. f = req_fuse_prepare(req);
  1946. err = get_path(f,hdr_->nodeid,&path);
  1947. if(!err)
  1948. {
  1949. err = f->fs->op.open(path,&ffi);
  1950. if(!err)
  1951. {
  1952. if(ffi.auto_cache)
  1953. open_auto_cache(f,hdr_->nodeid,path,&ffi);
  1954. }
  1955. }
  1956. if(!err)
  1957. {
  1958. pthread_mutex_lock(&f->lock);
  1959. get_node(f,hdr_->nodeid)->open_count++;
  1960. pthread_mutex_unlock(&f->lock);
  1961. /* The open syscall was interrupted,so it must be cancelled */
  1962. if(fuse_reply_open(req,&ffi) == -ENOENT)
  1963. fuse_do_release(f,hdr_->nodeid,&ffi);
  1964. }
  1965. else
  1966. {
  1967. fuse_reply_err(req,err);
  1968. }
  1969. free_path(f,hdr_->nodeid,path);
  1970. }
  1971. static
  1972. void
  1973. fuse_lib_read(fuse_req_t req,
  1974. struct fuse_in_header *hdr_)
  1975. {
  1976. int res;
  1977. struct fuse *f;
  1978. fuse_file_info_t ffi = {0};
  1979. struct fuse_read_in *arg;
  1980. fuse_msgbuf_t *msgbuf;
  1981. arg = (fuse_read_in*)fuse_hdr_arg(hdr_);
  1982. ffi.fh = arg->fh;
  1983. if(req->f->conn.proto_minor >= 9)
  1984. {
  1985. ffi.flags = arg->flags;
  1986. ffi.lock_owner = arg->lock_owner;
  1987. }
  1988. f = req_fuse_prepare(req);
  1989. msgbuf = msgbuf_alloc_page_aligned();
  1990. res = f->fs->op.read(&ffi,msgbuf->mem,arg->size,arg->offset);
  1991. if(res >= 0)
  1992. fuse_reply_data(req,msgbuf->mem,res);
  1993. else
  1994. fuse_reply_err(req,res);
  1995. msgbuf_free(msgbuf);
  1996. }
  1997. static
  1998. void
  1999. fuse_lib_write(fuse_req_t req,
  2000. struct fuse_in_header *hdr_)
  2001. {
  2002. int res;
  2003. char *data;
  2004. struct fuse *f;
  2005. fuse_file_info_t ffi = {0};
  2006. struct fuse_write_in *arg;
  2007. arg = (fuse_write_in*)fuse_hdr_arg(hdr_);
  2008. ffi.fh = arg->fh;
  2009. ffi.writepage = !!(arg->write_flags & 1);
  2010. if(req->f->conn.proto_minor < 9)
  2011. {
  2012. data = ((char*)arg) + FUSE_COMPAT_WRITE_IN_SIZE;
  2013. }
  2014. else
  2015. {
  2016. ffi.flags = arg->flags;
  2017. ffi.lock_owner = arg->lock_owner;
  2018. data = (char*)PARAM(arg);
  2019. }
  2020. f = req_fuse_prepare(req);
  2021. res = f->fs->op.write(&ffi,data,arg->size,arg->offset);
  2022. free_path(f,hdr_->nodeid,NULL);
  2023. if(res >= 0)
  2024. fuse_reply_write(req,res);
  2025. else
  2026. fuse_reply_err(req,res);
  2027. }
  2028. static
  2029. void
  2030. fuse_lib_fsync(fuse_req_t req,
  2031. struct fuse_in_header *hdr_)
  2032. {
  2033. int err;
  2034. struct fuse *f;
  2035. struct fuse_fsync_in *arg;
  2036. fuse_file_info_t ffi = {0};
  2037. arg = (fuse_fsync_in*)fuse_hdr_arg(hdr_);
  2038. ffi.fh = arg->fh;
  2039. f = req_fuse_prepare(req);
  2040. err = f->fs->op.fsync(&ffi,
  2041. !!(arg->fsync_flags & 1));
  2042. fuse_reply_err(req,err);
  2043. }
  2044. static
  2045. struct fuse_dh*
  2046. get_dirhandle(const fuse_file_info_t *llfi,
  2047. fuse_file_info_t *fi)
  2048. {
  2049. struct fuse_dh *dh = (struct fuse_dh *)(uintptr_t)llfi->fh;
  2050. memset(fi,0,sizeof(fuse_file_info_t));
  2051. fi->fh = dh->fh;
  2052. return dh;
  2053. }
  2054. static
  2055. void
  2056. fuse_lib_opendir(fuse_req_t req,
  2057. struct fuse_in_header *hdr_)
  2058. {
  2059. int err;
  2060. char *path;
  2061. struct fuse_dh *dh;
  2062. fuse_file_info_t llffi = {0};
  2063. fuse_file_info_t ffi = {0};
  2064. struct fuse *f;
  2065. struct fuse_open_in *arg;
  2066. arg = (fuse_open_in*)fuse_hdr_arg(hdr_);
  2067. llffi.flags = arg->flags;
  2068. f = req_fuse_prepare(req);
  2069. dh = (struct fuse_dh *)calloc(1,sizeof(struct fuse_dh));
  2070. if(dh == NULL)
  2071. {
  2072. fuse_reply_err(req,ENOMEM);
  2073. return;
  2074. }
  2075. fuse_dirents_init(&dh->d);
  2076. fuse_mutex_init(&dh->lock);
  2077. llffi.fh = (uintptr_t)dh;
  2078. ffi.flags = llffi.flags;
  2079. err = get_path(f,hdr_->nodeid,&path);
  2080. if(!err)
  2081. {
  2082. err = f->fs->op.opendir(path,&ffi);
  2083. dh->fh = ffi.fh;
  2084. llffi.keep_cache = ffi.keep_cache;
  2085. llffi.cache_readdir = ffi.cache_readdir;
  2086. }
  2087. if(!err)
  2088. {
  2089. if(fuse_reply_open(req,&llffi) == -ENOENT)
  2090. {
  2091. /* The opendir syscall was interrupted,so it
  2092. must be cancelled */
  2093. f->fs->op.releasedir(&ffi);
  2094. pthread_mutex_destroy(&dh->lock);
  2095. free(dh);
  2096. }
  2097. }
  2098. else
  2099. {
  2100. fuse_reply_err(req,err);
  2101. pthread_mutex_destroy(&dh->lock);
  2102. free(dh);
  2103. }
  2104. free_path(f,hdr_->nodeid,path);
  2105. }
  2106. static
  2107. size_t
  2108. readdir_buf_size(fuse_dirents_t *d_,
  2109. size_t size_,
  2110. off_t off_)
  2111. {
  2112. if(off_ >= kv_size(d_->offs))
  2113. return 0;
  2114. if((kv_A(d_->offs,off_) + size_) > kv_size(d_->data))
  2115. return (kv_size(d_->data) - kv_A(d_->offs,off_));
  2116. return size_;
  2117. }
  2118. static
  2119. char*
  2120. readdir_buf(fuse_dirents_t *d_,
  2121. off_t off_)
  2122. {
  2123. size_t i;
  2124. i = kv_A(d_->offs,off_);
  2125. return &kv_A(d_->data,i);
  2126. }
  2127. static
  2128. void
  2129. fuse_lib_readdir(fuse_req_t req_,
  2130. struct fuse_in_header *hdr_)
  2131. {
  2132. int rv;
  2133. size_t size;
  2134. struct fuse *f;
  2135. fuse_dirents_t *d;
  2136. struct fuse_dh *dh;
  2137. fuse_file_info_t ffi = {0};
  2138. fuse_file_info_t llffi = {0};
  2139. struct fuse_read_in *arg;
  2140. arg = (fuse_read_in*)fuse_hdr_arg(hdr_);
  2141. size = arg->size;
  2142. llffi.fh = arg->fh;
  2143. f = req_fuse_prepare(req_);
  2144. dh = get_dirhandle(&llffi,&ffi);
  2145. d = &dh->d;
  2146. pthread_mutex_lock(&dh->lock);
  2147. rv = 0;
  2148. if((arg->offset == 0) || (kv_size(d->data) == 0))
  2149. rv = f->fs->op.readdir(&ffi,d);
  2150. if(rv)
  2151. {
  2152. fuse_reply_err(req_,rv);
  2153. goto out;
  2154. }
  2155. size = readdir_buf_size(d,size,arg->offset);
  2156. fuse_reply_buf(req_,
  2157. readdir_buf(d,arg->offset),
  2158. size);
  2159. out:
  2160. pthread_mutex_unlock(&dh->lock);
  2161. }
  2162. static
  2163. void
  2164. fuse_lib_readdir_plus(fuse_req_t req_,
  2165. struct fuse_in_header *hdr_)
  2166. {
  2167. int rv;
  2168. size_t size;
  2169. struct fuse *f;
  2170. fuse_dirents_t *d;
  2171. struct fuse_dh *dh;
  2172. fuse_file_info_t ffi = {0};
  2173. fuse_file_info_t llffi = {0};
  2174. struct fuse_read_in *arg;
  2175. arg = (fuse_read_in*)fuse_hdr_arg(hdr_);
  2176. size = arg->size;
  2177. llffi.fh = arg->fh;
  2178. f = req_fuse_prepare(req_);
  2179. dh = get_dirhandle(&llffi,&ffi);
  2180. d = &dh->d;
  2181. pthread_mutex_lock(&dh->lock);
  2182. rv = 0;
  2183. if((arg->offset == 0) || (kv_size(d->data) == 0))
  2184. rv = f->fs->op.readdir_plus(&ffi,d);
  2185. if(rv)
  2186. {
  2187. fuse_reply_err(req_,rv);
  2188. goto out;
  2189. }
  2190. size = readdir_buf_size(d,size,arg->offset);
  2191. fuse_reply_buf(req_,
  2192. readdir_buf(d,arg->offset),
  2193. size);
  2194. out:
  2195. pthread_mutex_unlock(&dh->lock);
  2196. }
  2197. static
  2198. void
  2199. fuse_lib_releasedir(fuse_req_t req_,
  2200. struct fuse_in_header *hdr_)
  2201. {
  2202. struct fuse *f;
  2203. struct fuse_dh *dh;
  2204. fuse_file_info_t ffi;
  2205. fuse_file_info_t llffi = {0};
  2206. struct fuse_release_in *arg;
  2207. arg = (fuse_release_in*)fuse_hdr_arg(hdr_);
  2208. llffi.fh = arg->fh;
  2209. llffi.flags = arg->flags;
  2210. f = req_fuse_prepare(req_);
  2211. dh = get_dirhandle(&llffi,&ffi);
  2212. f->fs->op.releasedir(&ffi);
  2213. /* Done to keep race condition between last readdir reply and the unlock */
  2214. pthread_mutex_lock(&dh->lock);
  2215. pthread_mutex_unlock(&dh->lock);
  2216. pthread_mutex_destroy(&dh->lock);
  2217. fuse_dirents_free(&dh->d);
  2218. free(dh);
  2219. fuse_reply_err(req_,0);
  2220. }
  2221. static
  2222. void
  2223. fuse_lib_fsyncdir(fuse_req_t req,
  2224. struct fuse_in_header *hdr_)
  2225. {
  2226. int err;
  2227. struct fuse *f;
  2228. fuse_file_info_t ffi;
  2229. fuse_file_info_t llffi = {0};
  2230. struct fuse_fsync_in *arg;
  2231. arg = (fuse_fsync_in*)fuse_hdr_arg(hdr_);
  2232. llffi.fh = arg->fh;
  2233. f = req_fuse_prepare(req);
  2234. get_dirhandle(&llffi,&ffi);
  2235. err = f->fs->op.fsyncdir(&ffi,
  2236. !!(arg->fsync_flags & FUSE_FSYNC_FDATASYNC));
  2237. fuse_reply_err(req,err);
  2238. }
  2239. static
  2240. void
  2241. fuse_lib_statfs(fuse_req_t req,
  2242. struct fuse_in_header *hdr_)
  2243. {
  2244. int err = 0;
  2245. char *path = NULL;
  2246. struct fuse *f;
  2247. struct statvfs buf = {0};
  2248. f = req_fuse_prepare(req);
  2249. if(hdr_->nodeid)
  2250. err = get_path(f,hdr_->nodeid,&path);
  2251. if(!err)
  2252. {
  2253. err = f->fs->op.statfs(path ? path : "/",&buf);
  2254. free_path(f,hdr_->nodeid,path);
  2255. }
  2256. if(!err)
  2257. fuse_reply_statfs(req,&buf);
  2258. else
  2259. fuse_reply_err(req,err);
  2260. }
  2261. static
  2262. void
  2263. fuse_lib_setxattr(fuse_req_t req,
  2264. struct fuse_in_header *hdr_)
  2265. {
  2266. int err;
  2267. char *path;
  2268. const char *name;
  2269. const char *value;
  2270. struct fuse *f;
  2271. struct fuse_setxattr_in *arg;
  2272. arg = (fuse_setxattr_in*)fuse_hdr_arg(hdr_);
  2273. if((req->f->conn.capable & FUSE_SETXATTR_EXT) && (req->f->conn.want & FUSE_SETXATTR_EXT))
  2274. name = (const char*)PARAM(arg);
  2275. else
  2276. name = (((char*)arg) + FUSE_COMPAT_SETXATTR_IN_SIZE);
  2277. value = (name + strlen(name) + 1);
  2278. f = req_fuse_prepare(req);
  2279. err = get_path(f,hdr_->nodeid,&path);
  2280. if(!err)
  2281. {
  2282. err = f->fs->op.setxattr(path,name,value,arg->size,arg->flags);
  2283. free_path(f,hdr_->nodeid,path);
  2284. }
  2285. fuse_reply_err(req,err);
  2286. }
  2287. static
  2288. int
  2289. common_getxattr(struct fuse *f,
  2290. fuse_req_t req,
  2291. uint64_t ino,
  2292. const char *name,
  2293. char *value,
  2294. size_t size)
  2295. {
  2296. int err;
  2297. char *path;
  2298. err = get_path(f,ino,&path);
  2299. if(!err)
  2300. {
  2301. err = f->fs->op.getxattr(path,name,value,size);
  2302. free_path(f,ino,path);
  2303. }
  2304. return err;
  2305. }
  2306. static
  2307. void
  2308. fuse_lib_getxattr(fuse_req_t req,
  2309. struct fuse_in_header *hdr_)
  2310. {
  2311. int res;
  2312. struct fuse *f;
  2313. const char* name;
  2314. struct fuse_getxattr_in *arg;
  2315. arg = (fuse_getxattr_in*)fuse_hdr_arg(hdr_);
  2316. name = (const char*)PARAM(arg);
  2317. f = req_fuse_prepare(req);
  2318. if(arg->size)
  2319. {
  2320. char *value = (char*)malloc(arg->size);
  2321. if(value == NULL)
  2322. {
  2323. fuse_reply_err(req,ENOMEM);
  2324. return;
  2325. }
  2326. res = common_getxattr(f,req,hdr_->nodeid,name,value,arg->size);
  2327. if(res > 0)
  2328. fuse_reply_buf(req,value,res);
  2329. else
  2330. fuse_reply_err(req,res);
  2331. free(value);
  2332. }
  2333. else
  2334. {
  2335. res = common_getxattr(f,req,hdr_->nodeid,name,NULL,0);
  2336. if(res >= 0)
  2337. fuse_reply_xattr(req,res);
  2338. else
  2339. fuse_reply_err(req,res);
  2340. }
  2341. }
  2342. static
  2343. int
  2344. common_listxattr(struct fuse *f,
  2345. fuse_req_t req,
  2346. uint64_t ino,
  2347. char *list,
  2348. size_t size)
  2349. {
  2350. char *path;
  2351. int err;
  2352. err = get_path(f,ino,&path);
  2353. if(!err)
  2354. {
  2355. err = f->fs->op.listxattr(path,list,size);
  2356. free_path(f,ino,path);
  2357. }
  2358. return err;
  2359. }
  2360. static
  2361. void
  2362. fuse_lib_listxattr(fuse_req_t req,
  2363. struct fuse_in_header *hdr_)
  2364. {
  2365. int res;
  2366. struct fuse *f;
  2367. struct fuse_getxattr_in *arg;
  2368. arg = (fuse_getxattr_in*)fuse_hdr_arg(hdr_);
  2369. f = req_fuse_prepare(req);
  2370. if(arg->size)
  2371. {
  2372. char *list = (char*)malloc(arg->size);
  2373. if(list == NULL)
  2374. {
  2375. fuse_reply_err(req,ENOMEM);
  2376. return;
  2377. }
  2378. res = common_listxattr(f,req,hdr_->nodeid,list,arg->size);
  2379. if(res > 0)
  2380. fuse_reply_buf(req,list,res);
  2381. else
  2382. fuse_reply_err(req,res);
  2383. free(list);
  2384. }
  2385. else
  2386. {
  2387. res = common_listxattr(f,req,hdr_->nodeid,NULL,0);
  2388. if(res >= 0)
  2389. fuse_reply_xattr(req,res);
  2390. else
  2391. fuse_reply_err(req,res);
  2392. }
  2393. }
  2394. static
  2395. void
  2396. fuse_lib_removexattr(fuse_req_t req,
  2397. const struct fuse_in_header *hdr_)
  2398. {
  2399. int err;
  2400. char *path;
  2401. const char *name;
  2402. struct fuse *f;
  2403. name = (const char*)fuse_hdr_arg(hdr_);
  2404. f = req_fuse_prepare(req);
  2405. err = get_path(f,hdr_->nodeid,&path);
  2406. if(!err)
  2407. {
  2408. err = f->fs->op.removexattr(path,name);
  2409. free_path(f,hdr_->nodeid,path);
  2410. }
  2411. fuse_reply_err(req,err);
  2412. }
  2413. static
  2414. void
  2415. fuse_lib_copy_file_range(fuse_req_t req_,
  2416. const struct fuse_in_header *hdr_)
  2417. {
  2418. ssize_t rv;
  2419. struct fuse *f;
  2420. fuse_file_info_t ffi_in = {0};
  2421. fuse_file_info_t ffi_out = {0};
  2422. const struct fuse_copy_file_range_in *arg;
  2423. arg = (fuse_copy_file_range_in*)fuse_hdr_arg(hdr_);
  2424. ffi_in.fh = arg->fh_in;
  2425. ffi_out.fh = arg->fh_out;
  2426. f = req_fuse_prepare(req_);
  2427. rv = f->fs->op.copy_file_range(&ffi_in,
  2428. arg->off_in,
  2429. &ffi_out,
  2430. arg->off_out,
  2431. arg->len,
  2432. arg->flags);
  2433. if(rv >= 0)
  2434. fuse_reply_write(req_,rv);
  2435. else
  2436. fuse_reply_err(req_,rv);
  2437. }
  2438. static
  2439. void
  2440. fuse_lib_setupmapping(fuse_req_t req_,
  2441. const struct fuse_in_header *hdr_)
  2442. {
  2443. fuse_reply_err(req_,ENOSYS);
  2444. }
  2445. static
  2446. void
  2447. fuse_lib_removemapping(fuse_req_t req_,
  2448. const struct fuse_in_header *hdr_)
  2449. {
  2450. fuse_reply_err(req_,ENOSYS);
  2451. }
  2452. static
  2453. void
  2454. fuse_lib_syncfs(fuse_req_t req_,
  2455. const struct fuse_in_header *hdr_)
  2456. {
  2457. fuse_reply_err(req_,ENOSYS);
  2458. }
  2459. // TODO: This is just a copy of fuse_lib_create. Needs to be rewritten
  2460. // so a nameless node can be setup.
  2461. // name is always '/'
  2462. // nodeid is the base directory
  2463. static
  2464. void
  2465. fuse_lib_tmpfile(fuse_req_t req_,
  2466. const struct fuse_in_header *hdr_)
  2467. {
  2468. int err;
  2469. char *path;
  2470. struct fuse *f;
  2471. const char *name;
  2472. fuse_file_info_t ffi = {0};
  2473. struct fuse_entry_param e;
  2474. struct fuse_create_in *arg;
  2475. arg = (fuse_create_in*)fuse_hdr_arg(hdr_);
  2476. name = (const char*)PARAM(arg);
  2477. ffi.flags = arg->flags;
  2478. if(req_->f->conn.proto_minor >= 12)
  2479. req_->ctx.umask = arg->umask;
  2480. else
  2481. name = (char*)arg + sizeof(struct fuse_open_in);
  2482. f = req_fuse_prepare(req_);
  2483. err = get_path_name(f,hdr_->nodeid,name,&path);
  2484. if(!err)
  2485. {
  2486. err = f->fs->op.tmpfile(path,arg->mode,&ffi);
  2487. if(!err)
  2488. {
  2489. err = lookup_path(f,hdr_->nodeid,name,path,&e,&ffi);
  2490. if(err)
  2491. {
  2492. f->fs->op.release(&ffi);
  2493. }
  2494. else if(!S_ISREG(e.attr.st_mode))
  2495. {
  2496. err = -EIO;
  2497. f->fs->op.release(&ffi);
  2498. forget_node(f,e.ino,1);
  2499. }
  2500. }
  2501. }
  2502. if(!err)
  2503. {
  2504. pthread_mutex_lock(&f->lock);
  2505. get_node(f,e.ino)->open_count++;
  2506. pthread_mutex_unlock(&f->lock);
  2507. if(fuse_reply_create(req_,&e,&ffi) == -ENOENT)
  2508. {
  2509. /* The open syscall was interrupted,so it
  2510. must be cancelled */
  2511. fuse_do_release(f,e.ino,&ffi);
  2512. forget_node(f,e.ino,1);
  2513. }
  2514. }
  2515. else
  2516. {
  2517. fuse_reply_err(req_,err);
  2518. }
  2519. free_path(f,hdr_->nodeid,path);
  2520. }
  2521. static
  2522. lock_t*
  2523. locks_conflict(node_t *node,
  2524. const lock_t *lock)
  2525. {
  2526. lock_t *l;
  2527. for(l = node->locks; l; l = l->next)
  2528. if(l->owner != lock->owner &&
  2529. lock->start <= l->end && l->start <= lock->end &&
  2530. (l->type == F_WRLCK || lock->type == F_WRLCK))
  2531. break;
  2532. return l;
  2533. }
  2534. static
  2535. void
  2536. delete_lock(lock_t **lockp)
  2537. {
  2538. lock_t *l = *lockp;
  2539. *lockp = l->next;
  2540. free(l);
  2541. }
  2542. static
  2543. void
  2544. insert_lock(lock_t **pos,
  2545. lock_t *lock)
  2546. {
  2547. lock->next = *pos;
  2548. *pos = lock;
  2549. }
  2550. static
  2551. int
  2552. locks_insert(node_t *node,
  2553. lock_t *lock)
  2554. {
  2555. lock_t **lp;
  2556. lock_t *newl1 = NULL;
  2557. lock_t *newl2 = NULL;
  2558. if(lock->type != F_UNLCK || lock->start != 0 || lock->end != OFFSET_MAX)
  2559. {
  2560. newl1 = (lock_t*)malloc(sizeof(lock_t));
  2561. newl2 = (lock_t*)malloc(sizeof(lock_t));
  2562. if(!newl1 || !newl2)
  2563. {
  2564. free(newl1);
  2565. free(newl2);
  2566. return -ENOLCK;
  2567. }
  2568. }
  2569. for(lp = &node->locks; *lp;)
  2570. {
  2571. lock_t *l = *lp;
  2572. if(l->owner != lock->owner)
  2573. goto skip;
  2574. if(lock->type == l->type)
  2575. {
  2576. if(l->end < lock->start - 1)
  2577. goto skip;
  2578. if(lock->end < l->start - 1)
  2579. break;
  2580. if(l->start <= lock->start && lock->end <= l->end)
  2581. goto out;
  2582. if(l->start < lock->start)
  2583. lock->start = l->start;
  2584. if(lock->end < l->end)
  2585. lock->end = l->end;
  2586. goto delete_lock;
  2587. }
  2588. else
  2589. {
  2590. if(l->end < lock->start)
  2591. goto skip;
  2592. if(lock->end < l->start)
  2593. break;
  2594. if(lock->start <= l->start && l->end <= lock->end)
  2595. goto delete_lock;
  2596. if(l->end <= lock->end)
  2597. {
  2598. l->end = lock->start - 1;
  2599. goto skip;
  2600. }
  2601. if(lock->start <= l->start)
  2602. {
  2603. l->start = lock->end + 1;
  2604. break;
  2605. }
  2606. *newl2 = *l;
  2607. newl2->start = lock->end + 1;
  2608. l->end = lock->start - 1;
  2609. insert_lock(&l->next,newl2);
  2610. newl2 = NULL;
  2611. }
  2612. skip:
  2613. lp = &l->next;
  2614. continue;
  2615. delete_lock:
  2616. delete_lock(lp);
  2617. }
  2618. if(lock->type != F_UNLCK)
  2619. {
  2620. *newl1 = *lock;
  2621. insert_lock(lp,newl1);
  2622. newl1 = NULL;
  2623. }
  2624. out:
  2625. free(newl1);
  2626. free(newl2);
  2627. return 0;
  2628. }
  2629. static
  2630. void
  2631. flock_to_lock(struct flock *flock,
  2632. lock_t *lock)
  2633. {
  2634. memset(lock,0,sizeof(lock_t));
  2635. lock->type = flock->l_type;
  2636. lock->start = flock->l_start;
  2637. lock->end = flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
  2638. lock->pid = flock->l_pid;
  2639. }
  2640. static
  2641. void
  2642. lock_to_flock(lock_t *lock,
  2643. struct flock *flock)
  2644. {
  2645. flock->l_type = lock->type;
  2646. flock->l_start = lock->start;
  2647. flock->l_len = (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
  2648. flock->l_pid = lock->pid;
  2649. }
  2650. static
  2651. int
  2652. fuse_flush_common(struct fuse *f,
  2653. fuse_req_t req,
  2654. uint64_t ino,
  2655. fuse_file_info_t *fi)
  2656. {
  2657. struct flock lock;
  2658. lock_t l;
  2659. int err;
  2660. int errlock;
  2661. memset(&lock,0,sizeof(lock));
  2662. lock.l_type = F_UNLCK;
  2663. lock.l_whence = SEEK_SET;
  2664. err = f->fs->op.flush(fi);
  2665. errlock = f->fs->op.lock(fi,F_SETLK,&lock);
  2666. if(errlock != -ENOSYS)
  2667. {
  2668. flock_to_lock(&lock,&l);
  2669. l.owner = fi->lock_owner;
  2670. pthread_mutex_lock(&f->lock);
  2671. locks_insert(get_node(f,ino),&l);
  2672. pthread_mutex_unlock(&f->lock);
  2673. /* if op.lock() is defined FLUSH is needed regardless
  2674. of op.flush() */
  2675. if(err == -ENOSYS)
  2676. err = 0;
  2677. }
  2678. return err;
  2679. }
  2680. static
  2681. void
  2682. fuse_lib_release(fuse_req_t req,
  2683. struct fuse_in_header *hdr_)
  2684. {
  2685. int err = 0;
  2686. struct fuse *f;
  2687. fuse_file_info_t ffi = {0};
  2688. struct fuse_release_in *arg;
  2689. arg = (fuse_release_in*)fuse_hdr_arg(hdr_);
  2690. ffi.fh = arg->fh;
  2691. ffi.flags = arg->flags;
  2692. if(req->f->conn.proto_minor >= 8)
  2693. {
  2694. ffi.flush = !!(arg->release_flags & FUSE_RELEASE_FLUSH);
  2695. ffi.lock_owner = arg->lock_owner;
  2696. }
  2697. else
  2698. {
  2699. ffi.flock_release = 1;
  2700. ffi.lock_owner = arg->lock_owner;
  2701. }
  2702. f = req_fuse_prepare(req);
  2703. if(ffi.flush)
  2704. {
  2705. err = fuse_flush_common(f,req,hdr_->nodeid,&ffi);
  2706. if(err == -ENOSYS)
  2707. err = 0;
  2708. }
  2709. fuse_do_release(f,hdr_->nodeid,&ffi);
  2710. fuse_reply_err(req,err);
  2711. }
  2712. static
  2713. void
  2714. fuse_lib_flush(fuse_req_t req,
  2715. struct fuse_in_header *hdr_)
  2716. {
  2717. int err;
  2718. struct fuse *f;
  2719. fuse_file_info_t ffi = {0};
  2720. struct fuse_flush_in *arg;
  2721. arg = (fuse_flush_in*)fuse_hdr_arg(hdr_);
  2722. ffi.fh = arg->fh;
  2723. ffi.flush = 1;
  2724. if(req->f->conn.proto_minor >= 7)
  2725. ffi.lock_owner = arg->lock_owner;
  2726. f = req_fuse_prepare(req);
  2727. err = fuse_flush_common(f,req,hdr_->nodeid,&ffi);
  2728. fuse_reply_err(req,err);
  2729. }
  2730. static
  2731. int
  2732. fuse_lock_common(fuse_req_t req,
  2733. uint64_t ino,
  2734. fuse_file_info_t *fi,
  2735. struct flock *lock,
  2736. int cmd)
  2737. {
  2738. int err;
  2739. struct fuse *f = req_fuse_prepare(req);
  2740. err = f->fs->op.lock(fi,cmd,lock);
  2741. return err;
  2742. }
  2743. static
  2744. void
  2745. convert_fuse_file_lock(const struct fuse_file_lock *fl,
  2746. struct flock *flock)
  2747. {
  2748. memset(flock, 0, sizeof(struct flock));
  2749. flock->l_type = fl->type;
  2750. flock->l_whence = SEEK_SET;
  2751. flock->l_start = fl->start;
  2752. if (fl->end == OFFSET_MAX)
  2753. flock->l_len = 0;
  2754. else
  2755. flock->l_len = fl->end - fl->start + 1;
  2756. flock->l_pid = fl->pid;
  2757. }
  2758. static
  2759. void
  2760. fuse_lib_getlk(fuse_req_t req,
  2761. const struct fuse_in_header *hdr_)
  2762. {
  2763. int err;
  2764. struct fuse *f;
  2765. lock_t lk;
  2766. struct flock flk;
  2767. lock_t *conflict;
  2768. fuse_file_info_t ffi = {0};
  2769. const struct fuse_lk_in *arg;
  2770. arg = (fuse_lk_in*)fuse_hdr_arg(hdr_);
  2771. ffi.fh = arg->fh;
  2772. ffi.lock_owner = arg->owner;
  2773. convert_fuse_file_lock(&arg->lk,&flk);
  2774. f = req_fuse(req);
  2775. flock_to_lock(&flk,&lk);
  2776. lk.owner = ffi.lock_owner;
  2777. pthread_mutex_lock(&f->lock);
  2778. conflict = locks_conflict(get_node(f,hdr_->nodeid),&lk);
  2779. if(conflict)
  2780. lock_to_flock(conflict,&flk);
  2781. pthread_mutex_unlock(&f->lock);
  2782. if(!conflict)
  2783. err = fuse_lock_common(req,hdr_->nodeid,&ffi,&flk,F_GETLK);
  2784. else
  2785. err = 0;
  2786. if(!err)
  2787. fuse_reply_lock(req,&flk);
  2788. else
  2789. fuse_reply_err(req,err);
  2790. }
  2791. static
  2792. void
  2793. fuse_lib_setlk(fuse_req_t req,
  2794. uint64_t ino,
  2795. fuse_file_info_t *fi,
  2796. struct flock *lock,
  2797. int sleep)
  2798. {
  2799. int err = fuse_lock_common(req,ino,fi,lock,
  2800. sleep ? F_SETLKW : F_SETLK);
  2801. if(!err)
  2802. {
  2803. struct fuse *f = req_fuse(req);
  2804. lock_t l;
  2805. flock_to_lock(lock,&l);
  2806. l.owner = fi->lock_owner;
  2807. pthread_mutex_lock(&f->lock);
  2808. locks_insert(get_node(f,ino),&l);
  2809. pthread_mutex_unlock(&f->lock);
  2810. }
  2811. fuse_reply_err(req,err);
  2812. }
  2813. static
  2814. void
  2815. fuse_lib_flock(fuse_req_t req,
  2816. uint64_t ino,
  2817. fuse_file_info_t *fi,
  2818. int op)
  2819. {
  2820. int err;
  2821. struct fuse *f = req_fuse_prepare(req);
  2822. err = f->fs->op.flock(fi,op);
  2823. fuse_reply_err(req,err);
  2824. }
  2825. static
  2826. void
  2827. fuse_lib_bmap(fuse_req_t req,
  2828. const struct fuse_in_header *hdr_)
  2829. {
  2830. int err;
  2831. char *path;
  2832. struct fuse *f;
  2833. uint64_t block;
  2834. const struct fuse_bmap_in *arg;
  2835. arg = (fuse_bmap_in*)fuse_hdr_arg(hdr_);
  2836. block = arg->block;
  2837. f = req_fuse_prepare(req);
  2838. err = get_path(f,hdr_->nodeid,&path);
  2839. if(!err)
  2840. {
  2841. err = f->fs->op.bmap(path,arg->blocksize,&block);
  2842. free_path(f,hdr_->nodeid,path);
  2843. }
  2844. if(!err)
  2845. fuse_reply_bmap(req,block);
  2846. else
  2847. fuse_reply_err(req,err);
  2848. }
  2849. static
  2850. void
  2851. fuse_lib_ioctl(fuse_req_t req,
  2852. const struct fuse_in_header *hdr_)
  2853. {
  2854. int err;
  2855. char *out_buf = NULL;
  2856. struct fuse *f = req_fuse_prepare(req);
  2857. fuse_file_info_t ffi;
  2858. fuse_file_info_t llffi = {0};
  2859. const void *in_buf;
  2860. uint32_t out_size;
  2861. const struct fuse_ioctl_in *arg;
  2862. arg = (fuse_ioctl_in*)fuse_hdr_arg(hdr_);
  2863. if((arg->flags & FUSE_IOCTL_DIR) && !(req->f->conn.want & FUSE_CAP_IOCTL_DIR))
  2864. {
  2865. fuse_reply_err(req,ENOTTY);
  2866. return;
  2867. }
  2868. if((sizeof(void*) == 4) &&
  2869. (req->f->conn.proto_minor >= 16) &&
  2870. !(arg->flags & FUSE_IOCTL_32BIT))
  2871. {
  2872. req->ioctl_64bit = 1;
  2873. }
  2874. llffi.fh = arg->fh;
  2875. out_size = arg->out_size;
  2876. in_buf = (arg->in_size ? PARAM(arg) : NULL);
  2877. err = -EPERM;
  2878. if(arg->flags & FUSE_IOCTL_UNRESTRICTED)
  2879. goto err;
  2880. if(arg->flags & FUSE_IOCTL_DIR)
  2881. get_dirhandle(&llffi,&ffi);
  2882. else
  2883. ffi = llffi;
  2884. if(out_size)
  2885. {
  2886. err = -ENOMEM;
  2887. out_buf = (char*)malloc(out_size);
  2888. if(!out_buf)
  2889. goto err;
  2890. }
  2891. assert(!arg->in_size || !out_size || arg->in_size == out_size);
  2892. if(out_buf)
  2893. memcpy(out_buf,in_buf,arg->in_size);
  2894. err = f->fs->op.ioctl(&ffi,
  2895. arg->cmd,
  2896. (void*)(uintptr_t)arg->arg,
  2897. arg->flags,
  2898. out_buf ?: (void *)in_buf,
  2899. &out_size);
  2900. if(err < 0)
  2901. goto err;
  2902. fuse_reply_ioctl(req,err,out_buf,out_size);
  2903. goto out;
  2904. err:
  2905. fuse_reply_err(req,err);
  2906. out:
  2907. free(out_buf);
  2908. }
  2909. static
  2910. void
  2911. fuse_lib_poll(fuse_req_t req,
  2912. const struct fuse_in_header *hdr_)
  2913. {
  2914. int err;
  2915. struct fuse *f = req_fuse_prepare(req);
  2916. unsigned revents = 0;
  2917. fuse_file_info_t ffi = {0};
  2918. fuse_pollhandle_t *ph = NULL;
  2919. const struct fuse_poll_in *arg;
  2920. arg = (fuse_poll_in*)fuse_hdr_arg(hdr_);
  2921. ffi.fh = arg->fh;
  2922. if(arg->flags & FUSE_POLL_SCHEDULE_NOTIFY)
  2923. {
  2924. ph = (fuse_pollhandle_t*)malloc(sizeof(fuse_pollhandle_t));
  2925. if(ph == NULL)
  2926. {
  2927. fuse_reply_err(req,ENOMEM);
  2928. return;
  2929. }
  2930. ph->kh = arg->kh;
  2931. ph->ch = req->ch;
  2932. ph->f = req->f;
  2933. }
  2934. err = f->fs->op.poll(&ffi,ph,&revents);
  2935. if(!err)
  2936. fuse_reply_poll(req,revents);
  2937. else
  2938. fuse_reply_err(req,err);
  2939. }
  2940. static
  2941. void
  2942. fuse_lib_fallocate(fuse_req_t req,
  2943. const struct fuse_in_header *hdr_)
  2944. {
  2945. int err;
  2946. struct fuse *f;
  2947. fuse_file_info_t ffi = {0};
  2948. const struct fuse_fallocate_in *arg;
  2949. arg = (fuse_fallocate_in*)fuse_hdr_arg(hdr_);
  2950. ffi.fh = arg->fh;
  2951. f = req_fuse_prepare(req);
  2952. err = f->fs->op.fallocate(&ffi,
  2953. arg->mode,
  2954. arg->offset,
  2955. arg->length);
  2956. fuse_reply_err(req,err);
  2957. }
  2958. static
  2959. int
  2960. remembered_node_cmp(const void *a_,
  2961. const void *b_)
  2962. {
  2963. const remembered_node_t *a = (const remembered_node_t*)a_;
  2964. const remembered_node_t *b = (const remembered_node_t*)b_;
  2965. return (a->time - b->time);
  2966. }
  2967. static
  2968. void
  2969. remembered_nodes_sort(struct fuse *f_)
  2970. {
  2971. pthread_mutex_lock(&f_->lock);
  2972. qsort(&kv_first(f_->remembered_nodes),
  2973. kv_size(f_->remembered_nodes),
  2974. sizeof(remembered_node_t),
  2975. remembered_node_cmp);
  2976. pthread_mutex_unlock(&f_->lock);
  2977. }
  2978. #define MAX_PRUNE 100
  2979. #define MAX_CHECK 1000
  2980. int
  2981. fuse_prune_some_remembered_nodes(struct fuse *f_,
  2982. int *offset_)
  2983. {
  2984. time_t now;
  2985. int pruned;
  2986. int checked;
  2987. pthread_mutex_lock(&f_->lock);
  2988. pruned = 0;
  2989. checked = 0;
  2990. now = current_time();
  2991. while(*offset_ < kv_size(f_->remembered_nodes))
  2992. {
  2993. time_t age;
  2994. remembered_node_t *fn = &kv_A(f_->remembered_nodes,*offset_);
  2995. if(pruned >= MAX_PRUNE)
  2996. break;
  2997. if(checked >= MAX_CHECK)
  2998. break;
  2999. checked++;
  3000. age = (now - fn->time);
  3001. if(f_->conf.remember > age)
  3002. break;
  3003. assert(fn->node->nlookup == 1);
  3004. /* Don't forget active directories */
  3005. if(fn->node->refctr > 1)
  3006. {
  3007. (*offset_)++;
  3008. continue;
  3009. }
  3010. fn->node->nlookup = 0;
  3011. unref_node(f_,fn->node);
  3012. kv_delete(f_->remembered_nodes,*offset_);
  3013. pruned++;
  3014. }
  3015. pthread_mutex_unlock(&f_->lock);
  3016. if((pruned < MAX_PRUNE) && (checked < MAX_CHECK))
  3017. *offset_ = -1;
  3018. return pruned;
  3019. }
  3020. #undef MAX_PRUNE
  3021. #undef MAX_CHECK
  3022. static
  3023. void
  3024. sleep_100ms(void)
  3025. {
  3026. const struct timespec ms100 = {0,100 * 1000000};
  3027. nanosleep(&ms100,NULL);
  3028. }
  3029. void
  3030. fuse_prune_remembered_nodes(struct fuse *f_)
  3031. {
  3032. int offset;
  3033. int pruned;
  3034. offset = 0;
  3035. pruned = 0;
  3036. for(;;)
  3037. {
  3038. pruned += fuse_prune_some_remembered_nodes(f_,&offset);
  3039. if(offset >= 0)
  3040. {
  3041. sleep_100ms();
  3042. continue;
  3043. }
  3044. break;
  3045. }
  3046. if(pruned > 0)
  3047. remembered_nodes_sort(f_);
  3048. }
  3049. static struct fuse_lowlevel_ops fuse_path_ops =
  3050. {
  3051. .access = fuse_lib_access,
  3052. .bmap = fuse_lib_bmap,
  3053. .copy_file_range = fuse_lib_copy_file_range,
  3054. .create = fuse_lib_create,
  3055. .destroy = fuse_lib_destroy,
  3056. .fallocate = fuse_lib_fallocate,
  3057. .flock = fuse_lib_flock,
  3058. .flush = fuse_lib_flush,
  3059. .forget = fuse_lib_forget,
  3060. .forget_multi = fuse_lib_forget_multi,
  3061. .fsync = fuse_lib_fsync,
  3062. .fsyncdir = fuse_lib_fsyncdir,
  3063. .getattr = fuse_lib_getattr,
  3064. .getlk = fuse_lib_getlk,
  3065. .getxattr = fuse_lib_getxattr,
  3066. .init = fuse_lib_init,
  3067. .ioctl = fuse_lib_ioctl,
  3068. .link = fuse_lib_link,
  3069. .listxattr = fuse_lib_listxattr,
  3070. .lookup = fuse_lib_lookup,
  3071. .mkdir = fuse_lib_mkdir,
  3072. .mknod = fuse_lib_mknod,
  3073. .open = fuse_lib_open,
  3074. .opendir = fuse_lib_opendir,
  3075. .poll = fuse_lib_poll,
  3076. .read = fuse_lib_read,
  3077. .readdir = fuse_lib_readdir,
  3078. .readdir_plus = fuse_lib_readdir_plus,
  3079. .readlink = fuse_lib_readlink,
  3080. .release = fuse_lib_release,
  3081. .releasedir = fuse_lib_releasedir,
  3082. .removemapping = fuse_lib_removemapping,
  3083. .removexattr = fuse_lib_removexattr,
  3084. .rename = fuse_lib_rename,
  3085. .retrieve_reply = NULL,
  3086. .rmdir = fuse_lib_rmdir,
  3087. .setattr = fuse_lib_setattr,
  3088. .setlk = fuse_lib_setlk,
  3089. .setupmapping = fuse_lib_setupmapping,
  3090. .setxattr = fuse_lib_setxattr,
  3091. .statfs = fuse_lib_statfs,
  3092. .symlink = fuse_lib_symlink,
  3093. .syncfs = fuse_lib_syncfs,
  3094. .tmpfile = fuse_lib_tmpfile,
  3095. .unlink = fuse_lib_unlink,
  3096. .write = fuse_lib_write,
  3097. };
  3098. int
  3099. fuse_notify_poll(fuse_pollhandle_t *ph)
  3100. {
  3101. return fuse_lowlevel_notify_poll(ph);
  3102. }
  3103. int
  3104. fuse_exited(struct fuse *f)
  3105. {
  3106. return fuse_session_exited(f->se);
  3107. }
  3108. struct fuse_session*
  3109. fuse_get_session(struct fuse *f)
  3110. {
  3111. return f->se;
  3112. }
  3113. void
  3114. fuse_exit(struct fuse *f)
  3115. {
  3116. f->se->exited = 1;
  3117. }
  3118. struct fuse_context*
  3119. fuse_get_context(void)
  3120. {
  3121. return &fuse_get_context_internal()->ctx;
  3122. }
  3123. enum {
  3124. KEY_HELP,
  3125. };
  3126. #define FUSE_LIB_OPT(t,p,v) { t,offsetof(struct fuse_config,p),v }
  3127. static const struct fuse_opt fuse_lib_opts[] =
  3128. {
  3129. FUSE_OPT_KEY("-h", KEY_HELP),
  3130. FUSE_OPT_KEY("--help", KEY_HELP),
  3131. FUSE_OPT_KEY("debug", FUSE_OPT_KEY_KEEP),
  3132. FUSE_OPT_KEY("-d", FUSE_OPT_KEY_KEEP),
  3133. FUSE_LIB_OPT("debug", debug,1),
  3134. FUSE_LIB_OPT("-d", debug,1),
  3135. FUSE_LIB_OPT("nogc", nogc,1),
  3136. FUSE_LIB_OPT("umask=", set_mode,1),
  3137. FUSE_LIB_OPT("umask=%o", umask,0),
  3138. FUSE_LIB_OPT("uid=", set_uid,1),
  3139. FUSE_LIB_OPT("uid=%d", uid,0),
  3140. FUSE_LIB_OPT("gid=", set_gid,1),
  3141. FUSE_LIB_OPT("gid=%d", gid,0),
  3142. FUSE_LIB_OPT("noforget", remember,-1),
  3143. FUSE_LIB_OPT("remember=%u", remember,0),
  3144. FUSE_OPT_END
  3145. };
  3146. static void fuse_lib_help(void)
  3147. {
  3148. fprintf(stderr,
  3149. " -o umask=M set file permissions (octal)\n"
  3150. " -o uid=N set file owner\n"
  3151. " -o gid=N set file group\n"
  3152. " -o noforget never forget cached inodes\n"
  3153. " -o remember=T remember cached inodes for T seconds (0s)\n"
  3154. " -o threads=NUM number of worker threads. 0 = autodetect.\n"
  3155. " Negative values autodetect then divide by\n"
  3156. " absolute value. default = 0\n"
  3157. "\n");
  3158. }
  3159. static
  3160. int
  3161. fuse_lib_opt_proc(void *data,
  3162. const char *arg,
  3163. int key,
  3164. struct fuse_args *outargs)
  3165. {
  3166. (void)arg; (void)outargs;
  3167. if(key == KEY_HELP)
  3168. {
  3169. struct fuse_config *conf = (struct fuse_config *)data;
  3170. fuse_lib_help();
  3171. conf->help = 1;
  3172. }
  3173. return 1;
  3174. }
  3175. int
  3176. fuse_is_lib_option(const char *opt)
  3177. {
  3178. return fuse_lowlevel_is_lib_option(opt) || fuse_opt_match(fuse_lib_opts,opt);
  3179. }
  3180. struct fuse_fs*
  3181. fuse_fs_new(const struct fuse_operations *op,
  3182. size_t op_size)
  3183. {
  3184. struct fuse_fs *fs;
  3185. if(sizeof(struct fuse_operations) < op_size)
  3186. {
  3187. fprintf(stderr,"fuse: warning: library too old,some operations may not not work\n");
  3188. op_size = sizeof(struct fuse_operations);
  3189. }
  3190. fs = (struct fuse_fs *)calloc(1,sizeof(struct fuse_fs));
  3191. if(!fs)
  3192. {
  3193. fprintf(stderr,"fuse: failed to allocate fuse_fs object\n");
  3194. return NULL;
  3195. }
  3196. if(op)
  3197. memcpy(&fs->op,op,op_size);
  3198. return fs;
  3199. }
  3200. static
  3201. int
  3202. node_table_init(struct node_table *t)
  3203. {
  3204. t->size = NODE_TABLE_MIN_SIZE;
  3205. t->array = (node_t **)calloc(1,sizeof(node_t *) * t->size);
  3206. if(t->array == NULL)
  3207. {
  3208. fprintf(stderr,"fuse: memory allocation failed\n");
  3209. return -1;
  3210. }
  3211. t->use = 0;
  3212. t->split = 0;
  3213. return 0;
  3214. }
  3215. static
  3216. struct fuse*
  3217. fuse_get_fuse_obj()
  3218. {
  3219. static struct fuse f = {0};
  3220. return &f;
  3221. }
  3222. static
  3223. void
  3224. metrics_log_nodes_info(struct fuse *f_,
  3225. FILE *file_)
  3226. {
  3227. char buf[1024];
  3228. char time_str[64];
  3229. struct tm tm;
  3230. struct timeval tv;
  3231. uint64_t sizeof_node;
  3232. float node_usage_ratio;
  3233. uint64_t node_slab_count;
  3234. uint64_t node_avail_objs;
  3235. uint64_t node_total_alloc_mem;
  3236. gettimeofday(&tv,NULL);
  3237. localtime_r(&tv.tv_sec,&tm);
  3238. strftime(time_str,sizeof(time_str),"%Y-%m-%dT%H:%M:%S.000%z",&tm);
  3239. sizeof_node = sizeof(node_t);
  3240. lfmp_t *lfmp;
  3241. lfmp = node_lfmp();
  3242. lfmp_lock(lfmp);
  3243. node_slab_count = fmp_slab_count(&lfmp->fmp);
  3244. node_usage_ratio = fmp_slab_usage_ratio(&lfmp->fmp);
  3245. node_avail_objs = fmp_avail_objs(&lfmp->fmp);
  3246. node_total_alloc_mem = fmp_total_allocated_memory(&lfmp->fmp);
  3247. lfmp_unlock(lfmp);
  3248. snprintf(buf,sizeof(buf),
  3249. "time: %s\n"
  3250. "sizeof(node): %" PRIu64 "\n"
  3251. "node id_table size: %" PRIu64 "\n"
  3252. "node id_table usage: %" PRIu64 "\n"
  3253. "node id_table total allocated memory: %" PRIu64 "\n"
  3254. "node name_table size: %" PRIu64 "\n"
  3255. "node name_table usage: %" PRIu64 "\n"
  3256. "node name_table total allocated memory: %" PRIu64 "\n"
  3257. "node memory pool slab count: %" PRIu64 "\n"
  3258. "node memory pool usage ratio: %f\n"
  3259. "node memory pool avail objs: %" PRIu64 "\n"
  3260. "node memory pool total allocated memory: %" PRIu64 "\n"
  3261. "msgbuf bufsize: %" PRIu64 "\n"
  3262. "msgbuf allocation count: %" PRIu64 "\n"
  3263. "msgbuf available count: %" PRIu64 "\n"
  3264. "msgbuf total allocated memory: %" PRIu64 "\n"
  3265. "\n"
  3266. ,
  3267. time_str,
  3268. sizeof_node,
  3269. (uint64_t)f_->id_table.size,
  3270. (uint64_t)f_->id_table.use,
  3271. (uint64_t)(f_->id_table.size * sizeof(node_t*)),
  3272. (uint64_t)f_->name_table.size,
  3273. (uint64_t)f_->name_table.use,
  3274. (uint64_t)(f_->name_table.size * sizeof(node_t*)),
  3275. node_slab_count,
  3276. node_usage_ratio,
  3277. node_avail_objs,
  3278. node_total_alloc_mem,
  3279. msgbuf_get_bufsize(),
  3280. msgbuf_alloc_count(),
  3281. msgbuf_avail_count(),
  3282. msgbuf_alloc_count() * msgbuf_get_bufsize()
  3283. );
  3284. fputs(buf,file_);
  3285. }
  3286. static
  3287. void
  3288. metrics_log_nodes_info_to_tmp_dir(struct fuse *f_)
  3289. {
  3290. int rv;
  3291. FILE *file;
  3292. char filepath[256];
  3293. struct stat st;
  3294. char const *mode = "a";
  3295. off_t const max_size = (1024 * 1024);
  3296. sprintf(filepath,"/tmp/mergerfs.%d.info",getpid());
  3297. rv = lstat(filepath,&st);
  3298. if((rv == 0) && (st.st_size > max_size))
  3299. mode = "w";
  3300. file = fopen(filepath,mode);
  3301. if(file == NULL)
  3302. return;
  3303. metrics_log_nodes_info(f_,file);
  3304. fclose(file);
  3305. }
  3306. static
  3307. void
  3308. fuse_malloc_trim(void)
  3309. {
  3310. #ifdef HAVE_MALLOC_TRIM
  3311. malloc_trim(1024 * 1024);
  3312. #endif
  3313. }
  3314. void
  3315. fuse_invalidate_all_nodes()
  3316. {
  3317. struct fuse *f = fuse_get_fuse_obj();
  3318. syslog(LOG_INFO,"invalidating file entries");
  3319. pthread_mutex_lock(&f->lock);
  3320. for(size_t i = 0; i < f->id_table.size; i++)
  3321. {
  3322. node_t *node;
  3323. for(node = f->id_table.array[i]; node != NULL; node = node->id_next)
  3324. {
  3325. if(node->nodeid == FUSE_ROOT_ID)
  3326. continue;
  3327. if(node->parent->nodeid != FUSE_ROOT_ID)
  3328. continue;
  3329. fuse_lowlevel_notify_inval_entry(f->se->ch,
  3330. node->parent->nodeid,
  3331. node->name,
  3332. strlen(node->name));
  3333. }
  3334. }
  3335. pthread_mutex_unlock(&f->lock);
  3336. }
  3337. void
  3338. fuse_gc()
  3339. {
  3340. syslog(LOG_INFO,"running thorough garbage collection");
  3341. node_gc();
  3342. msgbuf_gc();
  3343. fuse_malloc_trim();
  3344. }
  3345. void
  3346. fuse_gc1()
  3347. {
  3348. syslog(LOG_INFO,"running basic garbage collection");
  3349. node_gc1();
  3350. msgbuf_gc_10percent();
  3351. fuse_malloc_trim();
  3352. }
  3353. static
  3354. void*
  3355. fuse_maintenance_loop(void *fuse_)
  3356. {
  3357. int loops;
  3358. int sleep_time;
  3359. struct fuse *f = (struct fuse*)fuse_;
  3360. pthread_setname_np(pthread_self(),"fuse.maint");
  3361. loops = 0;
  3362. sleep_time = 60;
  3363. while(1)
  3364. {
  3365. if(remember_nodes(f))
  3366. fuse_prune_remembered_nodes(f);
  3367. if((loops % 15) == 0)
  3368. fuse_gc1();
  3369. if(g_LOG_METRICS)
  3370. metrics_log_nodes_info_to_tmp_dir(f);
  3371. loops++;
  3372. sleep(sleep_time);
  3373. }
  3374. return NULL;
  3375. }
  3376. int
  3377. fuse_start_maintenance_thread(struct fuse *f_)
  3378. {
  3379. return fuse_start_thread(&f_->maintenance_thread,fuse_maintenance_loop,f_);
  3380. }
  3381. void
  3382. fuse_stop_maintenance_thread(struct fuse *f_)
  3383. {
  3384. pthread_mutex_lock(&f_->lock);
  3385. pthread_cancel(f_->maintenance_thread);
  3386. pthread_mutex_unlock(&f_->lock);
  3387. pthread_join(f_->maintenance_thread,NULL);
  3388. }
  3389. struct fuse*
  3390. fuse_new_common(struct fuse_chan *ch,
  3391. struct fuse_args *args,
  3392. const struct fuse_operations *op,
  3393. size_t op_size)
  3394. {
  3395. struct fuse *f;
  3396. node_t *root;
  3397. struct fuse_fs *fs;
  3398. struct fuse_lowlevel_ops llop = fuse_path_ops;
  3399. if(fuse_create_context_key() == -1)
  3400. goto out;
  3401. f = fuse_get_fuse_obj();
  3402. if(f == NULL)
  3403. {
  3404. fprintf(stderr,"fuse: failed to allocate fuse object\n");
  3405. goto out_delete_context_key;
  3406. }
  3407. fs = fuse_fs_new(op,op_size);
  3408. if(!fs)
  3409. goto out_free;
  3410. f->fs = fs;
  3411. /* Oh f**k,this is ugly! */
  3412. if(!fs->op.lock)
  3413. {
  3414. llop.getlk = NULL;
  3415. llop.setlk = NULL;
  3416. }
  3417. if(fuse_opt_parse(args,&f->conf,fuse_lib_opts,fuse_lib_opt_proc) == -1)
  3418. goto out_free_fs;
  3419. g_LOG_METRICS = f->conf.debug;
  3420. f->se = fuse_lowlevel_new_common(args,&llop,sizeof(llop),f);
  3421. if(f->se == NULL)
  3422. goto out_free_fs;
  3423. fuse_session_add_chan(f->se,ch);
  3424. /* Trace topmost layer by default */
  3425. srand(time(NULL));
  3426. f->nodeid_gen.nodeid = FUSE_ROOT_ID;
  3427. f->nodeid_gen.generation = rand64();
  3428. if(node_table_init(&f->name_table) == -1)
  3429. goto out_free_session;
  3430. if(node_table_init(&f->id_table) == -1)
  3431. goto out_free_name_table;
  3432. fuse_mutex_init(&f->lock);
  3433. kv_init(f->remembered_nodes);
  3434. root = node_alloc();
  3435. if(root == NULL)
  3436. {
  3437. fprintf(stderr,"fuse: memory allocation failed\n");
  3438. goto out_free_id_table;
  3439. }
  3440. root->name = filename_strdup(f,"/");
  3441. root->parent = NULL;
  3442. root->nodeid = FUSE_ROOT_ID;
  3443. inc_nlookup(root);
  3444. hash_id(f,root);
  3445. return f;
  3446. out_free_id_table:
  3447. free(f->id_table.array);
  3448. out_free_name_table:
  3449. free(f->name_table.array);
  3450. out_free_session:
  3451. fuse_session_destroy(f->se);
  3452. out_free_fs:
  3453. /* Horrible compatibility hack to stop the destructor from being
  3454. called on the filesystem without init being called first */
  3455. fs->op.destroy = NULL;
  3456. free(f->fs);
  3457. out_free:
  3458. // free(f);
  3459. out_delete_context_key:
  3460. fuse_delete_context_key();
  3461. out:
  3462. return NULL;
  3463. }
  3464. struct fuse*
  3465. fuse_new(struct fuse_chan *ch,
  3466. struct fuse_args *args,
  3467. const struct fuse_operations *op,
  3468. size_t op_size)
  3469. {
  3470. return fuse_new_common(ch,args,op,op_size);
  3471. }
  3472. void
  3473. fuse_destroy(struct fuse *f)
  3474. {
  3475. size_t i;
  3476. if(f->fs)
  3477. {
  3478. struct fuse_context_i *c = fuse_get_context_internal();
  3479. memset(c,0,sizeof(*c));
  3480. c->ctx.fuse = f;
  3481. for(i = 0; i < f->id_table.size; i++)
  3482. {
  3483. node_t *node;
  3484. for(node = f->id_table.array[i]; node != NULL; node = node->id_next)
  3485. {
  3486. if(!node->hidden_fh)
  3487. continue;
  3488. f->fs->op.free_hide(node->hidden_fh);
  3489. node->hidden_fh = 0;
  3490. }
  3491. }
  3492. }
  3493. for(i = 0; i < f->id_table.size; i++)
  3494. {
  3495. node_t *node;
  3496. node_t *next;
  3497. for(node = f->id_table.array[i]; node != NULL; node = next)
  3498. {
  3499. next = node->id_next;
  3500. free_node(f,node);
  3501. f->id_table.use--;
  3502. }
  3503. }
  3504. free(f->id_table.array);
  3505. free(f->name_table.array);
  3506. pthread_mutex_destroy(&f->lock);
  3507. fuse_session_destroy(f->se);
  3508. kv_destroy(f->remembered_nodes);
  3509. fuse_delete_context_key();
  3510. }
  3511. void
  3512. fuse_log_metrics_set(int log_)
  3513. {
  3514. g_LOG_METRICS = log_;
  3515. }
  3516. int
  3517. fuse_log_metrics_get(void)
  3518. {
  3519. return g_LOG_METRICS;
  3520. }