You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2425 lines
55 KiB

  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU LGPLv2.
  5. See the file COPYING.LIB
  6. */
  7. #define _GNU_SOURCE
  8. #include "lfmp.h"
  9. #include "config.h"
  10. #include "debug.h"
  11. #include "fuse_i.h"
  12. #include "fuse_kernel.h"
  13. #include "fuse_opt.h"
  14. #include "fuse_misc.h"
  15. #include "fuse_pollhandle.h"
  16. #include "fuse_msgbuf.h"
  17. #include <stdio.h>
  18. #include <stdlib.h>
  19. #include <stddef.h>
  20. #include <string.h>
  21. #include <unistd.h>
  22. #include <limits.h>
  23. #include <errno.h>
  24. #include <assert.h>
  25. #include <sys/file.h>
  26. #ifndef F_LINUX_SPECIFIC_BASE
  27. #define F_LINUX_SPECIFIC_BASE 1024
  28. #endif
  29. #ifndef F_SETPIPE_SZ
  30. #define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
  31. #endif
  32. #define PARAM(inarg) (((char*)(inarg)) + sizeof(*(inarg)))
  33. #define OFFSET_MAX 0x7fffffffffffffffLL
  34. #define container_of(ptr, type, member) ({ \
  35. const typeof( ((type*)0)->member ) *__mptr = (ptr); \
  36. (type *)( (char*)__mptr - offsetof(type,member) );})
  37. static size_t pagesize;
  38. static lfmp_t g_FMP_fuse_req;
  39. static
  40. __attribute__((constructor))
  41. void
  42. fuse_ll_constructor(void)
  43. {
  44. pagesize = getpagesize();
  45. lfmp_init(&g_FMP_fuse_req,sizeof(struct fuse_req),1);
  46. }
  47. static
  48. __attribute__((destructor))
  49. void
  50. fuse_ll_destructor(void)
  51. {
  52. lfmp_destroy(&g_FMP_fuse_req);
  53. }
  54. static
  55. void
  56. convert_stat(const struct stat *stbuf_,
  57. struct fuse_attr *attr_)
  58. {
  59. attr_->ino = stbuf_->st_ino;
  60. attr_->mode = stbuf_->st_mode;
  61. attr_->nlink = stbuf_->st_nlink;
  62. attr_->uid = stbuf_->st_uid;
  63. attr_->gid = stbuf_->st_gid;
  64. attr_->rdev = stbuf_->st_rdev;
  65. attr_->size = stbuf_->st_size;
  66. attr_->blksize = stbuf_->st_blksize;
  67. attr_->blocks = stbuf_->st_blocks;
  68. attr_->atime = stbuf_->st_atime;
  69. attr_->mtime = stbuf_->st_mtime;
  70. attr_->ctime = stbuf_->st_ctime;
  71. attr_->atimensec = ST_ATIM_NSEC(stbuf_);
  72. attr_->mtimensec = ST_MTIM_NSEC(stbuf_);
  73. attr_->ctimensec = ST_CTIM_NSEC(stbuf_);
  74. }
  75. static
  76. size_t
  77. iov_length(const struct iovec *iov,
  78. size_t count)
  79. {
  80. size_t seg;
  81. size_t ret = 0;
  82. for(seg = 0; seg < count; seg++)
  83. ret += iov[seg].iov_len;
  84. return ret;
  85. }
  86. static
  87. void
  88. destroy_req(fuse_req_t req)
  89. {
  90. lfmp_free(&g_FMP_fuse_req,req);
  91. }
  92. static
  93. struct fuse_req*
  94. fuse_ll_alloc_req(struct fuse_ll *f)
  95. {
  96. struct fuse_req *req;
  97. req = (struct fuse_req*)lfmp_calloc(&g_FMP_fuse_req);
  98. if(req == NULL)
  99. {
  100. fprintf(stderr, "fuse: failed to allocate request\n");
  101. }
  102. else
  103. {
  104. req->f = f;
  105. }
  106. return req;
  107. }
  108. static
  109. int
  110. fuse_send_msg(struct fuse_ll *f,
  111. struct fuse_chan *ch,
  112. struct iovec *iov,
  113. int count)
  114. {
  115. int rv;
  116. struct fuse_out_header *out = iov[0].iov_base;
  117. out->len = iov_length(iov, count);
  118. rv = writev(fuse_chan_fd(ch),iov,count);
  119. if(rv == -1)
  120. return -errno;
  121. return 0;
  122. }
  123. int
  124. fuse_send_reply_iov_nofree(fuse_req_t req,
  125. int error,
  126. struct iovec *iov,
  127. int count)
  128. {
  129. struct fuse_out_header out;
  130. if(error <= -1000 || error > 0)
  131. {
  132. fprintf(stderr, "fuse: bad error value: %i\n",error);
  133. error = -ERANGE;
  134. }
  135. out.unique = req->unique;
  136. out.error = error;
  137. iov[0].iov_base = &out;
  138. iov[0].iov_len = sizeof(struct fuse_out_header);
  139. return fuse_send_msg(req->f, req->ch, iov, count);
  140. }
  141. static
  142. int
  143. send_reply_iov(fuse_req_t req,
  144. int error,
  145. struct iovec *iov,
  146. int count)
  147. {
  148. int res;
  149. res = fuse_send_reply_iov_nofree(req, error, iov, count);
  150. destroy_req(req);
  151. return res;
  152. }
  153. static
  154. int
  155. send_reply(fuse_req_t req,
  156. int error,
  157. const void *arg,
  158. size_t argsize)
  159. {
  160. struct iovec iov[2];
  161. int count = 1;
  162. if(argsize)
  163. {
  164. iov[1].iov_base = (void *) arg;
  165. iov[1].iov_len = argsize;
  166. count++;
  167. }
  168. return send_reply_iov(req, error, iov, count);
  169. }
  170. static
  171. void
  172. convert_statfs(const struct statvfs *stbuf,
  173. struct fuse_kstatfs *kstatfs)
  174. {
  175. kstatfs->bsize = stbuf->f_bsize;
  176. kstatfs->frsize = stbuf->f_frsize;
  177. kstatfs->blocks = stbuf->f_blocks;
  178. kstatfs->bfree = stbuf->f_bfree;
  179. kstatfs->bavail = stbuf->f_bavail;
  180. kstatfs->files = stbuf->f_files;
  181. kstatfs->ffree = stbuf->f_ffree;
  182. kstatfs->namelen = stbuf->f_namemax;
  183. }
  184. static
  185. int
  186. send_reply_ok(fuse_req_t req,
  187. const void *arg,
  188. size_t argsize)
  189. {
  190. return send_reply(req, 0, arg, argsize);
  191. }
  192. int
  193. fuse_reply_err(fuse_req_t req_,
  194. int err_)
  195. {
  196. return send_reply(req_,-err_,NULL,0);
  197. }
  198. void
  199. fuse_reply_none(fuse_req_t req)
  200. {
  201. destroy_req(req);
  202. }
  203. static
  204. void
  205. fill_entry(struct fuse_entry_out *arg,
  206. const struct fuse_entry_param *e)
  207. {
  208. arg->nodeid = e->ino;
  209. arg->generation = e->generation;
  210. arg->entry_valid = e->timeout.entry;
  211. arg->entry_valid_nsec = 0;
  212. arg->attr_valid = e->timeout.attr;
  213. arg->attr_valid_nsec = 0;
  214. convert_stat(&e->attr,&arg->attr);
  215. }
  216. static
  217. void
  218. fill_open(struct fuse_open_out *arg,
  219. const fuse_file_info_t *f)
  220. {
  221. arg->fh = f->fh;
  222. if(f->direct_io)
  223. arg->open_flags |= FOPEN_DIRECT_IO;
  224. if(f->keep_cache)
  225. arg->open_flags |= FOPEN_KEEP_CACHE;
  226. if(f->nonseekable)
  227. arg->open_flags |= FOPEN_NONSEEKABLE;
  228. if(f->cache_readdir)
  229. arg->open_flags |= FOPEN_CACHE_DIR;
  230. }
  231. int
  232. fuse_reply_entry(fuse_req_t req,
  233. const struct fuse_entry_param *e)
  234. {
  235. struct fuse_entry_out arg = {0};
  236. size_t size = req->f->conn.proto_minor < 9 ?
  237. FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
  238. /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
  239. negative entry */
  240. if(!e->ino && req->f->conn.proto_minor < 4)
  241. return fuse_reply_err(req, ENOENT);
  242. fill_entry(&arg, e);
  243. return send_reply_ok(req, &arg, size);
  244. }
  245. struct fuse_create_out
  246. {
  247. struct fuse_entry_out e;
  248. struct fuse_open_out o;
  249. };
  250. int
  251. fuse_reply_create(fuse_req_t req,
  252. const struct fuse_entry_param *e,
  253. const fuse_file_info_t *f)
  254. {
  255. struct fuse_create_out buf = {0};
  256. size_t entrysize = req->f->conn.proto_minor < 9 ?
  257. FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
  258. struct fuse_entry_out *earg = (struct fuse_entry_out*)&buf.e;
  259. struct fuse_open_out *oarg = (struct fuse_open_out*)(((char*)&buf)+entrysize);
  260. fill_entry(earg, e);
  261. fill_open(oarg, f);
  262. return send_reply_ok(req, &buf, entrysize + sizeof(struct fuse_open_out));
  263. }
  264. int
  265. fuse_reply_attr(fuse_req_t req,
  266. const struct stat *attr,
  267. const uint64_t timeout)
  268. {
  269. struct fuse_attr_out arg = {0};
  270. size_t size = req->f->conn.proto_minor < 9 ?
  271. FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
  272. arg.attr_valid = timeout;
  273. arg.attr_valid_nsec = 0;
  274. convert_stat(attr,&arg.attr);
  275. return send_reply_ok(req,&arg,size);
  276. }
  277. int
  278. fuse_reply_readlink(fuse_req_t req,
  279. const char *linkname)
  280. {
  281. return send_reply_ok(req, linkname, strlen(linkname));
  282. }
  283. int
  284. fuse_reply_open(fuse_req_t req,
  285. const fuse_file_info_t *f)
  286. {
  287. struct fuse_open_out arg = {0};
  288. fill_open(&arg, f);
  289. return send_reply_ok(req, &arg, sizeof(arg));
  290. }
  291. int
  292. fuse_reply_write(fuse_req_t req,
  293. size_t count)
  294. {
  295. struct fuse_write_out arg = {0};
  296. arg.size = count;
  297. return send_reply_ok(req, &arg, sizeof(arg));
  298. }
  299. int
  300. fuse_reply_buf(fuse_req_t req,
  301. const char *buf,
  302. size_t size)
  303. {
  304. return send_reply_ok(req, buf, size);
  305. }
  306. static
  307. int
  308. fuse_send_data_iov_fallback(struct fuse_ll *f,
  309. struct fuse_chan *ch,
  310. struct iovec *iov,
  311. int iov_count,
  312. struct fuse_bufvec *buf,
  313. size_t len)
  314. {
  315. int res;
  316. void *mbuf;
  317. struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
  318. /* Optimize common case */
  319. if(buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
  320. !(buf->buf[0].flags & FUSE_BUF_IS_FD))
  321. {
  322. /* FIXME: also avoid memory copy if there are multiple buffers
  323. but none of them contain an fd */
  324. iov[iov_count].iov_base = buf->buf[0].mem;
  325. iov[iov_count].iov_len = len;
  326. iov_count++;
  327. return fuse_send_msg(f, ch, iov, iov_count);
  328. }
  329. res = posix_memalign(&mbuf, pagesize, len);
  330. if(res != 0)
  331. return res;
  332. mem_buf.buf[0].mem = mbuf;
  333. res = fuse_buf_copy(&mem_buf, buf, 0);
  334. if(res < 0)
  335. {
  336. free(mbuf);
  337. return -res;
  338. }
  339. len = res;
  340. iov[iov_count].iov_base = mbuf;
  341. iov[iov_count].iov_len = len;
  342. iov_count++;
  343. res = fuse_send_msg(f, ch, iov, iov_count);
  344. free(mbuf);
  345. return res;
  346. }
  347. struct fuse_ll_pipe
  348. {
  349. size_t size;
  350. int can_grow;
  351. int pipe[2];
  352. };
  353. static
  354. void
  355. fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
  356. {
  357. close(llp->pipe[0]);
  358. close(llp->pipe[1]);
  359. free(llp);
  360. }
  361. #ifdef HAVE_SPLICE
  362. static
  363. struct fuse_ll_pipe*
  364. fuse_ll_get_pipe(struct fuse_ll *f)
  365. {
  366. struct fuse_ll_pipe *llp = pthread_getspecific(f->pipe_key);
  367. if(llp == NULL)
  368. {
  369. int res;
  370. llp = malloc(sizeof(struct fuse_ll_pipe));
  371. if(llp == NULL)
  372. return NULL;
  373. res = pipe(llp->pipe);
  374. if(res == -1)
  375. {
  376. free(llp);
  377. return NULL;
  378. }
  379. if(fcntl(llp->pipe[0], F_SETFL, O_NONBLOCK) == -1 ||
  380. fcntl(llp->pipe[1], F_SETFL, O_NONBLOCK) == -1)
  381. {
  382. close(llp->pipe[0]);
  383. close(llp->pipe[1]);
  384. free(llp);
  385. return NULL;
  386. }
  387. /*
  388. *the default size is 16 pages on linux
  389. */
  390. llp->size = pagesize * 16;
  391. llp->can_grow = 1;
  392. pthread_setspecific(f->pipe_key, llp);
  393. }
  394. return llp;
  395. }
  396. #endif
  397. static
  398. void
  399. fuse_ll_clear_pipe(struct fuse_ll *f)
  400. {
  401. struct fuse_ll_pipe *llp = pthread_getspecific(f->pipe_key);
  402. if(llp)
  403. {
  404. pthread_setspecific(f->pipe_key, NULL);
  405. fuse_ll_pipe_free(llp);
  406. }
  407. }
  408. #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
  409. static
  410. int
  411. read_back(int fd,
  412. char *buf,
  413. size_t len)
  414. {
  415. int res;
  416. res = read(fd, buf, len);
  417. if(res == -1)
  418. {
  419. fprintf(stderr, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
  420. return -EIO;
  421. }
  422. if(res != len)
  423. {
  424. fprintf(stderr, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
  425. return -EIO;
  426. }
  427. return 0;
  428. }
  429. static
  430. int
  431. fuse_send_data_iov(struct fuse_ll *f,
  432. struct fuse_chan *ch,
  433. struct iovec *iov,
  434. int iov_count,
  435. struct fuse_bufvec *buf,
  436. unsigned int flags)
  437. {
  438. int res;
  439. size_t len = fuse_buf_size(buf);
  440. struct fuse_out_header *out = iov[0].iov_base;
  441. struct fuse_ll_pipe *llp;
  442. int splice_flags;
  443. size_t pipesize;
  444. size_t total_fd_size;
  445. size_t idx;
  446. size_t headerlen;
  447. struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
  448. if(f->broken_splice_nonblock)
  449. goto fallback;
  450. if(flags & FUSE_BUF_NO_SPLICE)
  451. goto fallback;
  452. total_fd_size = 0;
  453. for (idx = buf->idx; idx < buf->count; idx++)
  454. {
  455. if(buf->buf[idx].flags & FUSE_BUF_IS_FD)
  456. {
  457. total_fd_size = buf->buf[idx].size;
  458. if(idx == buf->idx)
  459. total_fd_size -= buf->off;
  460. }
  461. }
  462. if(total_fd_size < 2 * pagesize)
  463. goto fallback;
  464. if(f->conn.proto_minor < 14 || !(f->conn.want & FUSE_CAP_SPLICE_WRITE))
  465. goto fallback;
  466. llp = fuse_ll_get_pipe(f);
  467. if(llp == NULL)
  468. goto fallback;
  469. headerlen = iov_length(iov, iov_count);
  470. out->len = headerlen + len;
  471. /*
  472. * Heuristic for the required pipe size, does not work if the
  473. * source contains less than page size fragments
  474. */
  475. pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
  476. if(llp->size < pipesize)
  477. {
  478. if(llp->can_grow)
  479. {
  480. res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
  481. if(res == -1)
  482. {
  483. llp->can_grow = 0;
  484. goto fallback;
  485. }
  486. llp->size = res;
  487. }
  488. if(llp->size < pipesize)
  489. goto fallback;
  490. }
  491. res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
  492. if(res == -1)
  493. goto fallback;
  494. if(res != headerlen)
  495. {
  496. res = -EIO;
  497. fprintf(stderr, "fuse: short vmsplice to pipe: %u/%zu\n", res,
  498. headerlen);
  499. goto clear_pipe;
  500. }
  501. pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
  502. pipe_buf.buf[0].fd = llp->pipe[1];
  503. res = fuse_buf_copy(&pipe_buf, buf,
  504. FUSE_BUF_FORCE_SPLICE | FUSE_BUF_SPLICE_NONBLOCK);
  505. if(res < 0)
  506. {
  507. if(res == -EAGAIN || res == -EINVAL)
  508. {
  509. /*
  510. * Should only get EAGAIN on kernels with
  511. * broken SPLICE_F_NONBLOCK support (<=
  512. * 2.6.35) where this error or a short read is
  513. * returned even if the pipe itself is not
  514. * full
  515. *
  516. * EINVAL might mean that splice can't handle
  517. * this combination of input and output.
  518. */
  519. if(res == -EAGAIN)
  520. f->broken_splice_nonblock = 1;
  521. pthread_setspecific(f->pipe_key, NULL);
  522. fuse_ll_pipe_free(llp);
  523. goto fallback;
  524. }
  525. res = -res;
  526. goto clear_pipe;
  527. }
  528. if(res != 0 && res < len)
  529. {
  530. struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
  531. void *mbuf;
  532. size_t now_len = res;
  533. /*
  534. * For regular files a short count is either
  535. * 1) due to EOF, or
  536. * 2) because of broken SPLICE_F_NONBLOCK (see above)
  537. *
  538. * For other inputs it's possible that we overflowed
  539. * the pipe because of small buffer fragments.
  540. */
  541. res = posix_memalign(&mbuf, pagesize, len);
  542. if(res != 0)
  543. goto clear_pipe;
  544. mem_buf.buf[0].mem = mbuf;
  545. mem_buf.off = now_len;
  546. res = fuse_buf_copy(&mem_buf, buf, 0);
  547. if(res > 0)
  548. {
  549. char *tmpbuf;
  550. size_t extra_len = res;
  551. /*
  552. * Trickiest case: got more data. Need to get
  553. * back the data from the pipe and then fall
  554. * back to regular write.
  555. */
  556. tmpbuf = malloc(headerlen);
  557. if(tmpbuf == NULL)
  558. {
  559. free(mbuf);
  560. res = ENOMEM;
  561. goto clear_pipe;
  562. }
  563. res = read_back(llp->pipe[0], tmpbuf, headerlen);
  564. free(tmpbuf);
  565. if(res != 0)
  566. {
  567. free(mbuf);
  568. goto clear_pipe;
  569. }
  570. res = read_back(llp->pipe[0], mbuf, now_len);
  571. if(res != 0)
  572. {
  573. free(mbuf);
  574. goto clear_pipe;
  575. }
  576. len = now_len + extra_len;
  577. iov[iov_count].iov_base = mbuf;
  578. iov[iov_count].iov_len = len;
  579. iov_count++;
  580. res = fuse_send_msg(f, ch, iov, iov_count);
  581. free(mbuf);
  582. return res;
  583. }
  584. free(mbuf);
  585. res = now_len;
  586. }
  587. len = res;
  588. out->len = headerlen + len;
  589. splice_flags = 0;
  590. if((flags & FUSE_BUF_SPLICE_MOVE) &&
  591. (f->conn.want & FUSE_CAP_SPLICE_MOVE))
  592. splice_flags |= SPLICE_F_MOVE;
  593. res = splice(llp->pipe[0], NULL, fuse_chan_fd(ch), NULL, out->len, splice_flags);
  594. if(res == -1)
  595. {
  596. res = -errno;
  597. perror("fuse: splice from pipe");
  598. goto clear_pipe;
  599. }
  600. if(res != out->len)
  601. {
  602. res = -EIO;
  603. fprintf(stderr, "fuse: short splice from pipe: %u/%u\n",
  604. res, out->len);
  605. goto clear_pipe;
  606. }
  607. return 0;
  608. clear_pipe:
  609. fuse_ll_clear_pipe(f);
  610. return res;
  611. fallback:
  612. return fuse_send_data_iov_fallback(f, ch, iov, iov_count, buf, len);
  613. }
  614. #else
  615. static
  616. int
  617. fuse_send_data_iov(struct fuse_ll *f,
  618. struct fuse_chan *ch,
  619. struct iovec *iov,
  620. int iov_count,
  621. struct fuse_bufvec *buf,
  622. unsigned int flags)
  623. {
  624. size_t len = fuse_buf_size(buf);
  625. (void) flags;
  626. return fuse_send_data_iov_fallback(f, ch, iov, iov_count, buf, len);
  627. }
  628. #endif
  629. int
  630. fuse_reply_data(fuse_req_t req,
  631. struct fuse_bufvec *bufv,
  632. enum fuse_buf_copy_flags flags)
  633. {
  634. struct iovec iov[2];
  635. struct fuse_out_header out;
  636. int res;
  637. iov[0].iov_base = &out;
  638. iov[0].iov_len = sizeof(struct fuse_out_header);
  639. out.unique = req->unique;
  640. out.error = 0;
  641. res = fuse_send_data_iov(req->f, req->ch, iov, 1, bufv, flags);
  642. if(res <= 0)
  643. {
  644. destroy_req(req);
  645. return res;
  646. }
  647. else
  648. {
  649. return fuse_reply_err(req, res);
  650. }
  651. }
  652. int
  653. fuse_reply_statfs(fuse_req_t req,
  654. const struct statvfs *stbuf)
  655. {
  656. struct fuse_statfs_out arg = {0};
  657. size_t size = req->f->conn.proto_minor < 4 ?
  658. FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
  659. convert_statfs(stbuf, &arg.st);
  660. return send_reply_ok(req, &arg, size);
  661. }
  662. int
  663. fuse_reply_xattr(fuse_req_t req,
  664. size_t count)
  665. {
  666. struct fuse_getxattr_out arg = {0};
  667. arg.size = count;
  668. return send_reply_ok(req, &arg, sizeof(arg));
  669. }
  670. int
  671. fuse_reply_lock(fuse_req_t req,
  672. const struct flock *lock)
  673. {
  674. struct fuse_lk_out arg = {0};
  675. arg.lk.type = lock->l_type;
  676. if(lock->l_type != F_UNLCK)
  677. {
  678. arg.lk.start = lock->l_start;
  679. if(lock->l_len == 0)
  680. arg.lk.end = OFFSET_MAX;
  681. else
  682. arg.lk.end = lock->l_start + lock->l_len - 1;
  683. }
  684. arg.lk.pid = lock->l_pid;
  685. return send_reply_ok(req, &arg, sizeof(arg));
  686. }
  687. int
  688. fuse_reply_bmap(fuse_req_t req,
  689. uint64_t idx)
  690. {
  691. struct fuse_bmap_out arg = {0};
  692. arg.block = idx;
  693. return send_reply_ok(req, &arg, sizeof(arg));
  694. }
  695. static
  696. struct fuse_ioctl_iovec*
  697. fuse_ioctl_iovec_copy(const struct iovec *iov,
  698. size_t count)
  699. {
  700. struct fuse_ioctl_iovec *fiov;
  701. size_t i;
  702. fiov = malloc(sizeof(fiov[0]) * count);
  703. if(!fiov)
  704. return NULL;
  705. for (i = 0; i < count; i++)
  706. {
  707. fiov[i].base = (uintptr_t) iov[i].iov_base;
  708. fiov[i].len = iov[i].iov_len;
  709. }
  710. return fiov;
  711. }
  712. int
  713. fuse_reply_ioctl_retry(fuse_req_t req,
  714. const struct iovec *in_iov,
  715. size_t in_count,
  716. const struct iovec *out_iov,
  717. size_t out_count)
  718. {
  719. struct fuse_ioctl_out arg = {0};
  720. struct fuse_ioctl_iovec *in_fiov = NULL;
  721. struct fuse_ioctl_iovec *out_fiov = NULL;
  722. struct iovec iov[4];
  723. size_t count = 1;
  724. int res;
  725. arg.flags |= FUSE_IOCTL_RETRY;
  726. arg.in_iovs = in_count;
  727. arg.out_iovs = out_count;
  728. iov[count].iov_base = &arg;
  729. iov[count].iov_len = sizeof(arg);
  730. count++;
  731. if(req->f->conn.proto_minor < 16)
  732. {
  733. if(in_count)
  734. {
  735. iov[count].iov_base = (void *)in_iov;
  736. iov[count].iov_len = sizeof(in_iov[0]) * in_count;
  737. count++;
  738. }
  739. if(out_count)
  740. {
  741. iov[count].iov_base = (void *)out_iov;
  742. iov[count].iov_len = sizeof(out_iov[0]) * out_count;
  743. count++;
  744. }
  745. }
  746. else
  747. {
  748. /* Can't handle non-compat 64bit ioctls on 32bit */
  749. if((sizeof(void *) == 4) && (req->ioctl_64bit))
  750. {
  751. res = fuse_reply_err(req, EINVAL);
  752. goto out;
  753. }
  754. if(in_count)
  755. {
  756. in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
  757. if(!in_fiov)
  758. goto enomem;
  759. iov[count].iov_base = (void *)in_fiov;
  760. iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
  761. count++;
  762. }
  763. if(out_count)
  764. {
  765. out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
  766. if(!out_fiov)
  767. goto enomem;
  768. iov[count].iov_base = (void *)out_fiov;
  769. iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
  770. count++;
  771. }
  772. }
  773. res = send_reply_iov(req, 0, iov, count);
  774. out:
  775. free(in_fiov);
  776. free(out_fiov);
  777. return res;
  778. enomem:
  779. res = fuse_reply_err(req, ENOMEM);
  780. goto out;
  781. }
  782. int
  783. fuse_reply_ioctl(fuse_req_t req,
  784. int result,
  785. const void *buf,
  786. uint32_t size)
  787. {
  788. int count;
  789. struct iovec iov[3];
  790. struct fuse_ioctl_out arg;
  791. arg.result = result;
  792. arg.flags = 0;
  793. arg.in_iovs = 0;
  794. arg.out_iovs = 0;
  795. count = 1;
  796. iov[count].iov_base = &arg;
  797. iov[count].iov_len = sizeof(arg);
  798. count++;
  799. if(size)
  800. {
  801. iov[count].iov_base = (char*)buf;
  802. iov[count].iov_len = size;
  803. count++;
  804. }
  805. return send_reply_iov(req, 0, iov, count);
  806. }
  807. int
  808. fuse_reply_ioctl_iov(fuse_req_t req,
  809. int result,
  810. const struct iovec *iov,
  811. int count)
  812. {
  813. struct iovec *padded_iov;
  814. struct fuse_ioctl_out arg = {0};
  815. int res;
  816. padded_iov = malloc((count + 2) * sizeof(struct iovec));
  817. if(padded_iov == NULL)
  818. return fuse_reply_err(req, ENOMEM);
  819. arg.result = result;
  820. padded_iov[1].iov_base = &arg;
  821. padded_iov[1].iov_len = sizeof(arg);
  822. memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
  823. res = send_reply_iov(req, 0, padded_iov, count + 2);
  824. free(padded_iov);
  825. return res;
  826. }
  827. int
  828. fuse_reply_poll(fuse_req_t req,
  829. unsigned revents)
  830. {
  831. struct fuse_poll_out arg = {0};
  832. arg.revents = revents;
  833. return send_reply_ok(req, &arg, sizeof(arg));
  834. }
  835. static
  836. void
  837. do_lookup(fuse_req_t req,
  838. struct fuse_in_header *hdr_)
  839. {
  840. req->f->op.lookup(req,hdr_);
  841. }
  842. static
  843. void
  844. do_forget(fuse_req_t req,
  845. struct fuse_in_header *hdr_)
  846. {
  847. req->f->op.forget(req,hdr_);
  848. }
  849. static
  850. void
  851. do_batch_forget(fuse_req_t req,
  852. struct fuse_in_header *hdr_)
  853. {
  854. req->f->op.forget_multi(req,hdr_);
  855. }
  856. static
  857. void
  858. do_getattr(fuse_req_t req,
  859. struct fuse_in_header *hdr_)
  860. {
  861. req->f->op.getattr(req, hdr_);
  862. }
  863. static
  864. void
  865. do_setattr(fuse_req_t req_,
  866. struct fuse_in_header *hdr_)
  867. {
  868. req_->f->op.setattr(req_,hdr_);
  869. }
  870. static
  871. void
  872. do_access(fuse_req_t req,
  873. struct fuse_in_header *hdr_)
  874. {
  875. req->f->op.access(req,hdr_);
  876. }
  877. static
  878. void
  879. do_readlink(fuse_req_t req,
  880. struct fuse_in_header *hdr_)
  881. {
  882. req->f->op.readlink(req,hdr_);
  883. }
  884. static
  885. void
  886. do_mknod(fuse_req_t req,
  887. struct fuse_in_header *hdr_)
  888. {
  889. req->f->op.mknod(req,hdr_);
  890. }
  891. static
  892. void
  893. do_mkdir(fuse_req_t req,
  894. struct fuse_in_header *hdr_)
  895. {
  896. req->f->op.mkdir(req,hdr_);
  897. }
  898. static
  899. void
  900. do_unlink(fuse_req_t req,
  901. struct fuse_in_header *hdr_)
  902. {
  903. req->f->op.unlink(req,hdr_);
  904. }
  905. static
  906. void
  907. do_rmdir(fuse_req_t req,
  908. struct fuse_in_header *hdr_)
  909. {
  910. req->f->op.rmdir(req,hdr_);
  911. }
  912. static
  913. void
  914. do_symlink(fuse_req_t req,
  915. struct fuse_in_header *hdr_)
  916. {
  917. req->f->op.symlink(req,hdr_);
  918. }
  919. static
  920. void
  921. do_rename(fuse_req_t req,
  922. struct fuse_in_header *hdr_)
  923. {
  924. req->f->op.rename(req,hdr_);
  925. }
  926. static
  927. void
  928. do_link(fuse_req_t req,
  929. struct fuse_in_header *hdr_)
  930. {
  931. req->f->op.link(req,hdr_);
  932. }
  933. static
  934. void
  935. do_create(fuse_req_t req,
  936. struct fuse_in_header *hdr_)
  937. {
  938. req->f->op.create(req,hdr_);
  939. }
  940. static
  941. void
  942. do_open(fuse_req_t req,
  943. struct fuse_in_header *hdr_)
  944. {
  945. req->f->op.open(req,hdr_);
  946. }
  947. static
  948. void
  949. do_read(fuse_req_t req,
  950. struct fuse_in_header *hdr_)
  951. {
  952. req->f->op.read(req,hdr_);
  953. }
  954. static
  955. void
  956. do_write(fuse_req_t req,
  957. struct fuse_in_header *hdr_)
  958. {
  959. req->f->op.write(req,hdr_);
  960. }
  961. static
  962. void
  963. do_flush(fuse_req_t req,
  964. struct fuse_in_header *hdr_)
  965. {
  966. req->f->op.flush(req,hdr_);
  967. }
  968. static
  969. void
  970. do_release(fuse_req_t req,
  971. struct fuse_in_header *hdr_)
  972. {
  973. req->f->op.release(req,hdr_);
  974. }
  975. static
  976. void
  977. do_fsync(fuse_req_t req,
  978. struct fuse_in_header *hdr_)
  979. {
  980. req->f->op.fsync(req,hdr_);
  981. }
  982. static
  983. void
  984. do_opendir(fuse_req_t req,
  985. struct fuse_in_header *hdr_)
  986. {
  987. req->f->op.opendir(req,hdr_);
  988. }
  989. static
  990. void
  991. do_readdir(fuse_req_t req,
  992. struct fuse_in_header *hdr_)
  993. {
  994. req->f->op.readdir(req,hdr_);
  995. }
  996. static
  997. void
  998. do_readdir_plus(fuse_req_t req_,
  999. struct fuse_in_header *hdr_)
  1000. {
  1001. req_->f->op.readdir_plus(req_,hdr_);
  1002. }
  1003. static
  1004. void
  1005. do_releasedir(fuse_req_t req,
  1006. struct fuse_in_header *hdr_)
  1007. {
  1008. req->f->op.releasedir(req,hdr_);
  1009. }
  1010. static
  1011. void
  1012. do_fsyncdir(fuse_req_t req,
  1013. struct fuse_in_header *hdr_)
  1014. {
  1015. req->f->op.fsyncdir(req,hdr_);
  1016. }
  1017. static
  1018. void
  1019. do_statfs(fuse_req_t req,
  1020. struct fuse_in_header *hdr_)
  1021. {
  1022. req->f->op.statfs(req,hdr_);
  1023. }
  1024. static
  1025. void
  1026. do_setxattr(fuse_req_t req,
  1027. struct fuse_in_header *hdr_)
  1028. {
  1029. req->f->op.setxattr(req,hdr_);
  1030. }
  1031. static
  1032. void
  1033. do_getxattr(fuse_req_t req,
  1034. struct fuse_in_header *hdr_)
  1035. {
  1036. req->f->op.getxattr(req,hdr_);
  1037. }
  1038. static
  1039. void
  1040. do_listxattr(fuse_req_t req,
  1041. struct fuse_in_header *hdr_)
  1042. {
  1043. req->f->op.listxattr(req,hdr_);
  1044. }
  1045. static
  1046. void
  1047. do_removexattr(fuse_req_t req,
  1048. struct fuse_in_header *hdr_)
  1049. {
  1050. req->f->op.removexattr(req,hdr_);
  1051. }
  1052. static
  1053. void
  1054. convert_fuse_file_lock(struct fuse_file_lock *fl,
  1055. struct flock *flock)
  1056. {
  1057. memset(flock, 0, sizeof(struct flock));
  1058. flock->l_type = fl->type;
  1059. flock->l_whence = SEEK_SET;
  1060. flock->l_start = fl->start;
  1061. if(fl->end == OFFSET_MAX)
  1062. flock->l_len = 0;
  1063. else
  1064. flock->l_len = fl->end - fl->start + 1;
  1065. flock->l_pid = fl->pid;
  1066. }
  1067. static
  1068. void
  1069. do_getlk(fuse_req_t req,
  1070. struct fuse_in_header *hdr_)
  1071. {
  1072. req->f->op.getlk(req,hdr_);
  1073. }
  1074. static
  1075. void
  1076. do_setlk_common(fuse_req_t req,
  1077. uint64_t nodeid,
  1078. const void *inarg,
  1079. int sleep)
  1080. {
  1081. struct flock flock;
  1082. fuse_file_info_t fi = {0};
  1083. struct fuse_lk_in *arg = (struct fuse_lk_in*)inarg;
  1084. fi.fh = arg->fh;
  1085. fi.lock_owner = arg->owner;
  1086. if(arg->lk_flags & FUSE_LK_FLOCK)
  1087. {
  1088. int op = 0;
  1089. switch (arg->lk.type)
  1090. {
  1091. case F_RDLCK:
  1092. op = LOCK_SH;
  1093. break;
  1094. case F_WRLCK:
  1095. op = LOCK_EX;
  1096. break;
  1097. case F_UNLCK:
  1098. op = LOCK_UN;
  1099. break;
  1100. }
  1101. if(!sleep)
  1102. op |= LOCK_NB;
  1103. req->f->op.flock(req,nodeid,&fi,op);
  1104. }
  1105. else
  1106. {
  1107. convert_fuse_file_lock(&arg->lk, &flock);
  1108. req->f->op.setlk(req,nodeid,&fi,&flock,sleep);
  1109. }
  1110. }
  1111. static
  1112. void
  1113. do_setlk(fuse_req_t req,
  1114. struct fuse_in_header *hdr_)
  1115. {
  1116. do_setlk_common(req, hdr_->nodeid, &hdr_[1], 0);
  1117. }
  1118. static
  1119. void
  1120. do_setlkw(fuse_req_t req,
  1121. struct fuse_in_header *hdr_)
  1122. {
  1123. do_setlk_common(req, hdr_->nodeid, &hdr_[1], 1);
  1124. }
  1125. static
  1126. void
  1127. do_interrupt(fuse_req_t req,
  1128. struct fuse_in_header *hdr_)
  1129. {
  1130. destroy_req(req);
  1131. }
  1132. static
  1133. void
  1134. do_bmap(fuse_req_t req,
  1135. struct fuse_in_header *hdr_)
  1136. {
  1137. req->f->op.bmap(req,hdr_);
  1138. }
  1139. static
  1140. void
  1141. do_ioctl(fuse_req_t req,
  1142. struct fuse_in_header *hdr_)
  1143. {
  1144. req->f->op.ioctl(req, hdr_);
  1145. }
  1146. void
  1147. fuse_pollhandle_destroy(fuse_pollhandle_t *ph)
  1148. {
  1149. free(ph);
  1150. }
  1151. static
  1152. void
  1153. do_poll(fuse_req_t req,
  1154. struct fuse_in_header *hdr_)
  1155. {
  1156. req->f->op.poll(req,hdr_);
  1157. }
  1158. static
  1159. void
  1160. do_fallocate(fuse_req_t req,
  1161. struct fuse_in_header *hdr_)
  1162. {
  1163. req->f->op.fallocate(req,hdr_);
  1164. }
  1165. static
  1166. void
  1167. do_init(fuse_req_t req,
  1168. struct fuse_in_header *hdr_)
  1169. {
  1170. struct fuse_init_out outarg = {0};
  1171. struct fuse_init_in *arg = (struct fuse_init_in *) &hdr_[1];
  1172. struct fuse_ll *f = req->f;
  1173. size_t bufsize = fuse_chan_bufsize(req->ch);
  1174. if(f->debug)
  1175. debug_fuse_init_in(arg);
  1176. f->conn.proto_major = arg->major;
  1177. f->conn.proto_minor = arg->minor;
  1178. f->conn.capable = 0;
  1179. f->conn.want = 0;
  1180. outarg.major = FUSE_KERNEL_VERSION;
  1181. outarg.minor = FUSE_KERNEL_MINOR_VERSION;
  1182. outarg.max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
  1183. if(arg->major < 7)
  1184. {
  1185. fprintf(stderr, "fuse: unsupported protocol version: %u.%u\n",
  1186. arg->major, arg->minor);
  1187. fuse_reply_err(req, EPROTO);
  1188. return;
  1189. }
  1190. if(arg->major > 7)
  1191. {
  1192. /* Wait for a second INIT request with a 7.X version */
  1193. send_reply_ok(req, &outarg, sizeof(outarg));
  1194. return;
  1195. }
  1196. if(arg->minor >= 6)
  1197. {
  1198. if(arg->max_readahead < f->conn.max_readahead)
  1199. f->conn.max_readahead = arg->max_readahead;
  1200. if(arg->flags & FUSE_ASYNC_READ)
  1201. f->conn.capable |= FUSE_CAP_ASYNC_READ;
  1202. if(arg->flags & FUSE_POSIX_LOCKS)
  1203. f->conn.capable |= FUSE_CAP_POSIX_LOCKS;
  1204. if(arg->flags & FUSE_ATOMIC_O_TRUNC)
  1205. f->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
  1206. if(arg->flags & FUSE_EXPORT_SUPPORT)
  1207. f->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
  1208. if(arg->flags & FUSE_BIG_WRITES)
  1209. f->conn.capable |= FUSE_CAP_BIG_WRITES;
  1210. if(arg->flags & FUSE_DONT_MASK)
  1211. f->conn.capable |= FUSE_CAP_DONT_MASK;
  1212. if(arg->flags & FUSE_FLOCK_LOCKS)
  1213. f->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
  1214. if(arg->flags & FUSE_POSIX_ACL)
  1215. f->conn.capable |= FUSE_CAP_POSIX_ACL;
  1216. if(arg->flags & FUSE_CACHE_SYMLINKS)
  1217. f->conn.capable |= FUSE_CAP_CACHE_SYMLINKS;
  1218. if(arg->flags & FUSE_ASYNC_DIO)
  1219. f->conn.capable |= FUSE_CAP_ASYNC_DIO;
  1220. if(arg->flags & FUSE_PARALLEL_DIROPS)
  1221. f->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
  1222. if(arg->flags & FUSE_MAX_PAGES)
  1223. f->conn.capable |= FUSE_CAP_MAX_PAGES;
  1224. if(arg->flags & FUSE_WRITEBACK_CACHE)
  1225. f->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
  1226. if(arg->flags & FUSE_DO_READDIRPLUS)
  1227. f->conn.capable |= FUSE_CAP_READDIR_PLUS;
  1228. if(arg->flags & FUSE_READDIRPLUS_AUTO)
  1229. f->conn.capable |= FUSE_CAP_READDIR_PLUS_AUTO;
  1230. if(arg->flags & FUSE_SETXATTR_EXT)
  1231. f->conn.capable |= FUSE_CAP_SETXATTR_EXT;
  1232. }
  1233. else
  1234. {
  1235. f->conn.want &= ~FUSE_CAP_ASYNC_READ;
  1236. f->conn.max_readahead = 0;
  1237. }
  1238. if(req->f->conn.proto_minor >= 14)
  1239. {
  1240. #ifdef HAVE_SPLICE
  1241. #ifdef HAVE_VMSPLICE
  1242. f->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
  1243. if(f->splice_write)
  1244. f->conn.want |= FUSE_CAP_SPLICE_WRITE;
  1245. if(f->splice_move)
  1246. f->conn.want |= FUSE_CAP_SPLICE_MOVE;
  1247. #endif
  1248. f->conn.capable |= FUSE_CAP_SPLICE_READ;
  1249. if(f->splice_read)
  1250. f->conn.want |= FUSE_CAP_SPLICE_READ;
  1251. #endif
  1252. }
  1253. if(req->f->conn.proto_minor >= 18)
  1254. f->conn.capable |= FUSE_CAP_IOCTL_DIR;
  1255. if(f->op.getlk && f->op.setlk && !f->no_remote_posix_lock)
  1256. f->conn.want |= FUSE_CAP_POSIX_LOCKS;
  1257. if(f->op.flock && !f->no_remote_flock)
  1258. f->conn.want |= FUSE_CAP_FLOCK_LOCKS;
  1259. if(bufsize < FUSE_MIN_READ_BUFFER)
  1260. {
  1261. fprintf(stderr, "fuse: warning: buffer size too small: %zu\n",
  1262. bufsize);
  1263. bufsize = FUSE_MIN_READ_BUFFER;
  1264. }
  1265. bufsize -= 4096;
  1266. if(bufsize < f->conn.max_write)
  1267. f->conn.max_write = bufsize;
  1268. f->got_init = 1;
  1269. if(f->op.init)
  1270. f->op.init(f->userdata, &f->conn);
  1271. if(f->no_splice_read)
  1272. f->conn.want &= ~FUSE_CAP_SPLICE_READ;
  1273. if(f->no_splice_write)
  1274. f->conn.want &= ~FUSE_CAP_SPLICE_WRITE;
  1275. if(f->no_splice_move)
  1276. f->conn.want &= ~FUSE_CAP_SPLICE_MOVE;
  1277. if((arg->flags & FUSE_MAX_PAGES) && (f->conn.want & FUSE_CAP_MAX_PAGES))
  1278. {
  1279. outarg.flags |= FUSE_MAX_PAGES;
  1280. outarg.max_pages = f->conn.max_pages;
  1281. }
  1282. if(f->conn.want & FUSE_CAP_ASYNC_READ)
  1283. outarg.flags |= FUSE_ASYNC_READ;
  1284. if(f->conn.want & FUSE_CAP_POSIX_LOCKS)
  1285. outarg.flags |= FUSE_POSIX_LOCKS;
  1286. if(f->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
  1287. outarg.flags |= FUSE_ATOMIC_O_TRUNC;
  1288. if(f->conn.want & FUSE_CAP_EXPORT_SUPPORT)
  1289. outarg.flags |= FUSE_EXPORT_SUPPORT;
  1290. if(f->conn.want & FUSE_CAP_BIG_WRITES)
  1291. outarg.flags |= FUSE_BIG_WRITES;
  1292. if(f->conn.want & FUSE_CAP_DONT_MASK)
  1293. outarg.flags |= FUSE_DONT_MASK;
  1294. if(f->conn.want & FUSE_CAP_FLOCK_LOCKS)
  1295. outarg.flags |= FUSE_FLOCK_LOCKS;
  1296. if(f->conn.want & FUSE_CAP_POSIX_ACL)
  1297. outarg.flags |= FUSE_POSIX_ACL;
  1298. if(f->conn.want & FUSE_CAP_CACHE_SYMLINKS)
  1299. outarg.flags |= FUSE_CACHE_SYMLINKS;
  1300. if(f->conn.want & FUSE_CAP_ASYNC_DIO)
  1301. outarg.flags |= FUSE_ASYNC_DIO;
  1302. if(f->conn.want & FUSE_CAP_PARALLEL_DIROPS)
  1303. outarg.flags |= FUSE_PARALLEL_DIROPS;
  1304. if(f->conn.want & FUSE_CAP_WRITEBACK_CACHE)
  1305. outarg.flags |= FUSE_WRITEBACK_CACHE;
  1306. if(f->conn.want & FUSE_CAP_READDIR_PLUS)
  1307. outarg.flags |= FUSE_DO_READDIRPLUS;
  1308. if(f->conn.want & FUSE_CAP_READDIR_PLUS_AUTO)
  1309. outarg.flags |= FUSE_READDIRPLUS_AUTO;
  1310. if(f->conn.want & FUSE_CAP_SETXATTR_EXT)
  1311. outarg.flags |= FUSE_SETXATTR_EXT;
  1312. outarg.max_readahead = f->conn.max_readahead;
  1313. outarg.max_write = f->conn.max_write;
  1314. if(f->conn.proto_minor >= 13)
  1315. {
  1316. if(f->conn.max_background >= (1 << 16))
  1317. f->conn.max_background = (1 << 16) - 1;
  1318. if(f->conn.congestion_threshold > f->conn.max_background)
  1319. f->conn.congestion_threshold = f->conn.max_background;
  1320. if(!f->conn.congestion_threshold)
  1321. {
  1322. f->conn.congestion_threshold = f->conn.max_background * 3 / 4;
  1323. }
  1324. outarg.max_background = f->conn.max_background;
  1325. outarg.congestion_threshold = f->conn.congestion_threshold;
  1326. }
  1327. size_t outargsize;
  1328. if(arg->minor < 5)
  1329. outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
  1330. else if(arg->minor < 23)
  1331. outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
  1332. else
  1333. outargsize = sizeof(outarg);
  1334. if(f->debug)
  1335. debug_fuse_init_out(req->unique,&outarg,outargsize);
  1336. send_reply_ok(req, &outarg, outargsize);
  1337. }
  1338. static
  1339. void
  1340. do_destroy(fuse_req_t req,
  1341. struct fuse_in_header *hdr_)
  1342. {
  1343. struct fuse_ll *f = req->f;
  1344. f->got_destroy = 1;
  1345. f->op.destroy(f->userdata);
  1346. send_reply_ok(req,NULL,0);
  1347. }
  1348. static
  1349. void
  1350. list_del_nreq(struct fuse_notify_req *nreq)
  1351. {
  1352. struct fuse_notify_req *prev = nreq->prev;
  1353. struct fuse_notify_req *next = nreq->next;
  1354. prev->next = next;
  1355. next->prev = prev;
  1356. }
  1357. static
  1358. void
  1359. list_add_nreq(struct fuse_notify_req *nreq,
  1360. struct fuse_notify_req *next)
  1361. {
  1362. struct fuse_notify_req *prev = next->prev;
  1363. nreq->next = next;
  1364. nreq->prev = prev;
  1365. prev->next = nreq;
  1366. next->prev = nreq;
  1367. }
  1368. static
  1369. void
  1370. list_init_nreq(struct fuse_notify_req *nreq)
  1371. {
  1372. nreq->next = nreq;
  1373. nreq->prev = nreq;
  1374. }
  1375. static
  1376. void
  1377. do_notify_reply(fuse_req_t req,
  1378. struct fuse_in_header *hdr_)
  1379. {
  1380. struct fuse_ll *f = req->f;
  1381. struct fuse_notify_req *nreq;
  1382. struct fuse_notify_req *head;
  1383. pthread_mutex_lock(&f->lock);
  1384. head = &f->notify_list;
  1385. for(nreq = head->next; nreq != head; nreq = nreq->next)
  1386. {
  1387. if(nreq->unique == req->unique)
  1388. {
  1389. list_del_nreq(nreq);
  1390. break;
  1391. }
  1392. }
  1393. pthread_mutex_unlock(&f->lock);
  1394. if(nreq != head)
  1395. nreq->reply(nreq, req, hdr_->nodeid, &hdr_[1]);
  1396. }
  1397. static
  1398. void
  1399. do_copy_file_range(fuse_req_t req_,
  1400. struct fuse_in_header *hdr_)
  1401. {
  1402. req_->f->op.copy_file_range(req_,hdr_);
  1403. }
  1404. static
  1405. int
  1406. send_notify_iov(struct fuse_ll *f,
  1407. struct fuse_chan *ch,
  1408. int notify_code,
  1409. struct iovec *iov,
  1410. int count)
  1411. {
  1412. struct fuse_out_header out;
  1413. if(!f->got_init)
  1414. return -ENOTCONN;
  1415. out.unique = 0;
  1416. out.error = notify_code;
  1417. iov[0].iov_base = &out;
  1418. iov[0].iov_len = sizeof(struct fuse_out_header);
  1419. return fuse_send_msg(f, ch, iov, count);
  1420. }
  1421. int
  1422. fuse_lowlevel_notify_poll(fuse_pollhandle_t *ph)
  1423. {
  1424. if(ph != NULL)
  1425. {
  1426. struct fuse_notify_poll_wakeup_out outarg;
  1427. struct iovec iov[2];
  1428. outarg.kh = ph->kh;
  1429. iov[1].iov_base = &outarg;
  1430. iov[1].iov_len = sizeof(outarg);
  1431. return send_notify_iov(ph->f, ph->ch, FUSE_NOTIFY_POLL, iov, 2);
  1432. }
  1433. else
  1434. {
  1435. return 0;
  1436. }
  1437. }
  1438. int
  1439. fuse_lowlevel_notify_inval_inode(struct fuse_chan *ch,
  1440. uint64_t ino,
  1441. off_t off,
  1442. off_t len)
  1443. {
  1444. struct fuse_notify_inval_inode_out outarg;
  1445. struct fuse_ll *f;
  1446. struct iovec iov[2];
  1447. if(!ch)
  1448. return -EINVAL;
  1449. f = (struct fuse_ll*)fuse_session_data(fuse_chan_session(ch));
  1450. if(!f)
  1451. return -ENODEV;
  1452. outarg.ino = ino;
  1453. outarg.off = off;
  1454. outarg.len = len;
  1455. iov[1].iov_base = &outarg;
  1456. iov[1].iov_len = sizeof(outarg);
  1457. return send_notify_iov(f, ch, FUSE_NOTIFY_INVAL_INODE, iov, 2);
  1458. }
  1459. int
  1460. fuse_lowlevel_notify_inval_entry(struct fuse_chan *ch,
  1461. uint64_t parent,
  1462. const char *name,
  1463. size_t namelen)
  1464. {
  1465. struct fuse_notify_inval_entry_out outarg;
  1466. struct fuse_ll *f;
  1467. struct iovec iov[3];
  1468. if(!ch)
  1469. return -EINVAL;
  1470. f = (struct fuse_ll*)fuse_session_data(fuse_chan_session(ch));
  1471. if(!f)
  1472. return -ENODEV;
  1473. outarg.parent = parent;
  1474. outarg.namelen = namelen;
  1475. outarg.padding = 0;
  1476. iov[1].iov_base = &outarg;
  1477. iov[1].iov_len = sizeof(outarg);
  1478. iov[2].iov_base = (void *)name;
  1479. iov[2].iov_len = namelen + 1;
  1480. return send_notify_iov(f, ch, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
  1481. }
  1482. int
  1483. fuse_lowlevel_notify_delete(struct fuse_chan *ch,
  1484. uint64_t parent,
  1485. uint64_t child,
  1486. const char *name,
  1487. size_t namelen)
  1488. {
  1489. struct fuse_notify_delete_out outarg;
  1490. struct fuse_ll *f;
  1491. struct iovec iov[3];
  1492. if(!ch)
  1493. return -EINVAL;
  1494. f = (struct fuse_ll*)fuse_session_data(fuse_chan_session(ch));
  1495. if(!f)
  1496. return -ENODEV;
  1497. if(f->conn.proto_minor < 18)
  1498. return -ENOSYS;
  1499. outarg.parent = parent;
  1500. outarg.child = child;
  1501. outarg.namelen = namelen;
  1502. outarg.padding = 0;
  1503. iov[1].iov_base = &outarg;
  1504. iov[1].iov_len = sizeof(outarg);
  1505. iov[2].iov_base = (void *)name;
  1506. iov[2].iov_len = namelen + 1;
  1507. return send_notify_iov(f, ch, FUSE_NOTIFY_DELETE, iov, 3);
  1508. }
  1509. int
  1510. fuse_lowlevel_notify_store(struct fuse_chan *ch,
  1511. uint64_t ino,
  1512. off_t offset,
  1513. struct fuse_bufvec *bufv,
  1514. enum fuse_buf_copy_flags flags)
  1515. {
  1516. struct fuse_out_header out;
  1517. struct fuse_notify_store_out outarg;
  1518. struct fuse_ll *f;
  1519. struct iovec iov[3];
  1520. size_t size = fuse_buf_size(bufv);
  1521. int res;
  1522. if(!ch)
  1523. return -EINVAL;
  1524. f = (struct fuse_ll*)fuse_session_data(fuse_chan_session(ch));
  1525. if(!f)
  1526. return -ENODEV;
  1527. if(f->conn.proto_minor < 15)
  1528. return -ENOSYS;
  1529. out.unique = 0;
  1530. out.error = FUSE_NOTIFY_STORE;
  1531. outarg.nodeid = ino;
  1532. outarg.offset = offset;
  1533. outarg.size = size;
  1534. outarg.padding = 0;
  1535. iov[0].iov_base = &out;
  1536. iov[0].iov_len = sizeof(out);
  1537. iov[1].iov_base = &outarg;
  1538. iov[1].iov_len = sizeof(outarg);
  1539. res = fuse_send_data_iov(f, ch, iov, 2, bufv, flags);
  1540. if(res > 0)
  1541. res = -res;
  1542. return res;
  1543. }
  1544. struct fuse_retrieve_req
  1545. {
  1546. struct fuse_notify_req nreq;
  1547. void *cookie;
  1548. };
  1549. static
  1550. void
  1551. fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
  1552. fuse_req_t req,
  1553. uint64_t ino,
  1554. const void *inarg)
  1555. {
  1556. struct fuse_retrieve_req *rreq =
  1557. container_of(nreq, struct fuse_retrieve_req, nreq);
  1558. fuse_reply_none(req);
  1559. free(rreq);
  1560. }
  1561. int
  1562. fuse_lowlevel_notify_retrieve(struct fuse_chan *ch,
  1563. uint64_t ino,
  1564. size_t size,
  1565. off_t offset,
  1566. void *cookie)
  1567. {
  1568. struct fuse_notify_retrieve_out outarg;
  1569. struct fuse_ll *f;
  1570. struct iovec iov[2];
  1571. struct fuse_retrieve_req *rreq;
  1572. int err;
  1573. if(!ch)
  1574. return -EINVAL;
  1575. f = (struct fuse_ll*)fuse_session_data(fuse_chan_session(ch));
  1576. if(!f)
  1577. return -ENODEV;
  1578. if(f->conn.proto_minor < 15)
  1579. return -ENOSYS;
  1580. rreq = malloc(sizeof(*rreq));
  1581. if(rreq == NULL)
  1582. return -ENOMEM;
  1583. pthread_mutex_lock(&f->lock);
  1584. rreq->cookie = cookie;
  1585. rreq->nreq.unique = f->notify_ctr++;
  1586. rreq->nreq.reply = fuse_ll_retrieve_reply;
  1587. list_add_nreq(&rreq->nreq, &f->notify_list);
  1588. pthread_mutex_unlock(&f->lock);
  1589. outarg.notify_unique = rreq->nreq.unique;
  1590. outarg.nodeid = ino;
  1591. outarg.offset = offset;
  1592. outarg.size = size;
  1593. iov[1].iov_base = &outarg;
  1594. iov[1].iov_len = sizeof(outarg);
  1595. err = send_notify_iov(f, ch, FUSE_NOTIFY_RETRIEVE, iov, 2);
  1596. if(err)
  1597. {
  1598. pthread_mutex_lock(&f->lock);
  1599. list_del_nreq(&rreq->nreq);
  1600. pthread_mutex_unlock(&f->lock);
  1601. free(rreq);
  1602. }
  1603. return err;
  1604. }
  1605. void *
  1606. fuse_req_userdata(fuse_req_t req)
  1607. {
  1608. return req->f->userdata;
  1609. }
  1610. const
  1611. struct fuse_ctx *
  1612. fuse_req_ctx(fuse_req_t req)
  1613. {
  1614. return &req->ctx;
  1615. }
  1616. static struct {
  1617. void (*func)(fuse_req_t, struct fuse_in_header *);
  1618. const char *name;
  1619. } fuse_ll_ops[] =
  1620. {
  1621. [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
  1622. [FUSE_FORGET] = { do_forget, "FORGET" },
  1623. [FUSE_GETATTR] = { do_getattr, "GETATTR" },
  1624. [FUSE_SETATTR] = { do_setattr, "SETATTR" },
  1625. [FUSE_READLINK] = { do_readlink, "READLINK" },
  1626. [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
  1627. [FUSE_MKNOD] = { do_mknod, "MKNOD" },
  1628. [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
  1629. [FUSE_UNLINK] = { do_unlink, "UNLINK" },
  1630. [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
  1631. [FUSE_RENAME] = { do_rename, "RENAME" },
  1632. [FUSE_LINK] = { do_link, "LINK" },
  1633. [FUSE_OPEN] = { do_open, "OPEN" },
  1634. [FUSE_READ] = { do_read, "READ" },
  1635. [FUSE_WRITE] = { do_write, "WRITE" },
  1636. [FUSE_STATFS] = { do_statfs, "STATFS" },
  1637. [FUSE_RELEASE] = { do_release, "RELEASE" },
  1638. [FUSE_FSYNC] = { do_fsync, "FSYNC" },
  1639. [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
  1640. [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
  1641. [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
  1642. [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
  1643. [FUSE_FLUSH] = { do_flush, "FLUSH" },
  1644. [FUSE_INIT] = { do_init, "INIT" },
  1645. [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
  1646. [FUSE_READDIR] = { do_readdir, "READDIR" },
  1647. [FUSE_READDIRPLUS] = { do_readdir_plus, "READDIR_PLUS" },
  1648. [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
  1649. [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
  1650. [FUSE_GETLK] = { do_getlk, "GETLK" },
  1651. [FUSE_SETLK] = { do_setlk, "SETLK" },
  1652. [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
  1653. [FUSE_ACCESS] = { do_access, "ACCESS" },
  1654. [FUSE_CREATE] = { do_create, "CREATE" },
  1655. [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
  1656. [FUSE_BMAP] = { do_bmap, "BMAP" },
  1657. [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
  1658. [FUSE_POLL] = { do_poll, "POLL" },
  1659. [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
  1660. [FUSE_DESTROY] = { do_destroy, "DESTROY" },
  1661. [FUSE_NOTIFY_REPLY] = { do_notify_reply, "NOTIFY_REPLY" },
  1662. [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
  1663. [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
  1664. };
  1665. #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
  1666. enum {
  1667. KEY_HELP,
  1668. KEY_VERSION,
  1669. };
  1670. static const struct fuse_opt fuse_ll_opts[] =
  1671. {
  1672. { "debug", offsetof(struct fuse_ll, debug), 1 },
  1673. { "-d", offsetof(struct fuse_ll, debug), 1 },
  1674. { "max_readahead=%u", offsetof(struct fuse_ll, conn.max_readahead), 0 },
  1675. { "max_background=%u", offsetof(struct fuse_ll, conn.max_background), 0 },
  1676. { "congestion_threshold=%u",
  1677. offsetof(struct fuse_ll, conn.congestion_threshold), 0 },
  1678. { "no_remote_lock", offsetof(struct fuse_ll, no_remote_posix_lock), 1},
  1679. { "no_remote_lock", offsetof(struct fuse_ll, no_remote_flock), 1},
  1680. { "no_remote_flock", offsetof(struct fuse_ll, no_remote_flock), 1},
  1681. { "no_remote_posix_lock", offsetof(struct fuse_ll, no_remote_posix_lock), 1},
  1682. { "splice_write", offsetof(struct fuse_ll, splice_write), 1},
  1683. { "no_splice_write", offsetof(struct fuse_ll, no_splice_write), 1},
  1684. { "splice_move", offsetof(struct fuse_ll, splice_move), 1},
  1685. { "no_splice_move", offsetof(struct fuse_ll, no_splice_move), 1},
  1686. { "splice_read", offsetof(struct fuse_ll, splice_read), 1},
  1687. { "no_splice_read", offsetof(struct fuse_ll, no_splice_read), 1},
  1688. FUSE_OPT_KEY("max_read=", FUSE_OPT_KEY_DISCARD),
  1689. FUSE_OPT_KEY("-h", KEY_HELP),
  1690. FUSE_OPT_KEY("--help", KEY_HELP),
  1691. FUSE_OPT_KEY("-V", KEY_VERSION),
  1692. FUSE_OPT_KEY("--version", KEY_VERSION),
  1693. FUSE_OPT_END
  1694. };
  1695. static
  1696. void
  1697. fuse_ll_version(void)
  1698. {
  1699. fprintf(stderr, "using FUSE kernel interface version %i.%i\n",
  1700. FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
  1701. }
  1702. static
  1703. void
  1704. fuse_ll_help(void)
  1705. {
  1706. fprintf(stderr,
  1707. " -o max_readahead=N set maximum readahead\n"
  1708. " -o max_background=N set number of maximum background requests\n"
  1709. " -o congestion_threshold=N set kernel's congestion threshold\n"
  1710. " -o no_remote_lock disable remote file locking\n"
  1711. " -o no_remote_flock disable remote file locking (BSD)\n"
  1712. " -o no_remote_posix_lock disable remove file locking (POSIX)\n"
  1713. " -o [no_]splice_write use splice to write to the fuse device\n"
  1714. " -o [no_]splice_move move data while splicing to the fuse device\n"
  1715. " -o [no_]splice_read use splice to read from the fuse device\n"
  1716. );
  1717. }
  1718. static
  1719. int
  1720. fuse_ll_opt_proc(void *data,
  1721. const char *arg,
  1722. int key,
  1723. struct fuse_args *outargs)
  1724. {
  1725. (void) data; (void) outargs;
  1726. switch (key)
  1727. {
  1728. case KEY_HELP:
  1729. fuse_ll_help();
  1730. break;
  1731. case KEY_VERSION:
  1732. fuse_ll_version();
  1733. break;
  1734. default:
  1735. fprintf(stderr, "fuse: unknown option `%s'\n", arg);
  1736. }
  1737. return -1;
  1738. }
  1739. int
  1740. fuse_lowlevel_is_lib_option(const char *opt)
  1741. {
  1742. return fuse_opt_match(fuse_ll_opts, opt);
  1743. }
  1744. static
  1745. void
  1746. fuse_ll_destroy(void *data)
  1747. {
  1748. struct fuse_ll *f = (struct fuse_ll *)data;
  1749. struct fuse_ll_pipe *llp;
  1750. if(f->got_init && !f->got_destroy)
  1751. {
  1752. if(f->op.destroy)
  1753. f->op.destroy(f->userdata);
  1754. }
  1755. llp = pthread_getspecific(f->pipe_key);
  1756. if(llp != NULL)
  1757. fuse_ll_pipe_free(llp);
  1758. pthread_key_delete(f->pipe_key);
  1759. pthread_mutex_destroy(&f->lock);
  1760. free(f);
  1761. lfmp_clear(&g_FMP_fuse_req);
  1762. }
  1763. static
  1764. void
  1765. fuse_ll_pipe_destructor(void *data)
  1766. {
  1767. struct fuse_ll_pipe *llp = data;
  1768. fuse_ll_pipe_free(llp);
  1769. }
  1770. static
  1771. void
  1772. fuse_send_errno(struct fuse_ll *f_,
  1773. struct fuse_chan *ch_,
  1774. const int errno_,
  1775. const uint64_t unique_id_)
  1776. {
  1777. struct fuse_out_header out = {0};
  1778. struct iovec iov = {0};
  1779. out.unique = unique_id_;
  1780. out.error = -errno_;
  1781. iov.iov_base = &out;
  1782. iov.iov_len = sizeof(struct fuse_out_header);
  1783. fuse_send_msg(f_,ch_,&iov,1);
  1784. }
  1785. static
  1786. void
  1787. fuse_send_enomem(struct fuse_ll *f_,
  1788. struct fuse_chan *ch_,
  1789. const uint64_t unique_id_)
  1790. {
  1791. fuse_send_errno(f_,ch_,ENOMEM,unique_id_);
  1792. }
  1793. static
  1794. int
  1795. fuse_ll_buf_receive_read(struct fuse_session *se_,
  1796. fuse_msgbuf_t *msgbuf_)
  1797. {
  1798. int rv;
  1799. rv = read(fuse_chan_fd(se_->ch),msgbuf_->mem,msgbuf_->size);
  1800. if(rv == -1)
  1801. return -errno;
  1802. if(rv < sizeof(struct fuse_in_header))
  1803. {
  1804. fprintf(stderr, "short splice from fuse device\n");
  1805. return -EIO;
  1806. }
  1807. return rv;
  1808. }
  1809. static
  1810. void
  1811. fuse_ll_buf_process_read(struct fuse_session *se_,
  1812. const fuse_msgbuf_t *msgbuf_)
  1813. {
  1814. int err;
  1815. struct fuse_req *req;
  1816. struct fuse_in_header *in;
  1817. in = (struct fuse_in_header*)msgbuf_->mem;
  1818. req = fuse_ll_alloc_req(se_->f);
  1819. if(req == NULL)
  1820. return fuse_send_enomem(se_->f,se_->ch,in->unique);
  1821. req->unique = in->unique;
  1822. req->ctx.uid = in->uid;
  1823. req->ctx.gid = in->gid;
  1824. req->ctx.pid = in->pid;
  1825. req->ch = se_->ch;
  1826. err = ENOSYS;
  1827. if(in->opcode >= FUSE_MAXOP)
  1828. goto reply_err;
  1829. if(fuse_ll_ops[in->opcode].func == NULL)
  1830. goto reply_err;
  1831. fuse_ll_ops[in->opcode].func(req, in);
  1832. return;
  1833. reply_err:
  1834. fuse_reply_err(req, err);
  1835. return;
  1836. }
  1837. static
  1838. void
  1839. fuse_ll_buf_process_read_init(struct fuse_session *se_,
  1840. const fuse_msgbuf_t *msgbuf_)
  1841. {
  1842. int err;
  1843. struct fuse_req *req;
  1844. struct fuse_in_header *in;
  1845. in = (struct fuse_in_header*)msgbuf_->mem;
  1846. req = fuse_ll_alloc_req(se_->f);
  1847. if(req == NULL)
  1848. return fuse_send_enomem(se_->f,se_->ch,in->unique);
  1849. req->unique = in->unique;
  1850. req->ctx.uid = in->uid;
  1851. req->ctx.gid = in->gid;
  1852. req->ctx.pid = in->pid;
  1853. req->ch = se_->ch;
  1854. err = EIO;
  1855. if(in->opcode != FUSE_INIT)
  1856. goto reply_err;
  1857. if(fuse_ll_ops[in->opcode].func == NULL)
  1858. goto reply_err;
  1859. se_->process_buf = fuse_ll_buf_process_read;
  1860. fuse_ll_ops[in->opcode].func(req, in);
  1861. return;
  1862. reply_err:
  1863. fuse_reply_err(req, err);
  1864. return;
  1865. }
  1866. #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
  1867. static
  1868. int
  1869. fuse_ll_buf_receive_splice(struct fuse_session *se_,
  1870. fuse_msgbuf_t *msgbuf_)
  1871. {
  1872. int rv;
  1873. size_t bufsize = msgbuf_->size;
  1874. rv = splice(fuse_chan_fd(se_->ch),NULL,msgbuf_->pipefd[1],NULL,bufsize,SPLICE_F_MOVE);
  1875. if(rv == -1)
  1876. return -errno;
  1877. if(rv < sizeof(struct fuse_in_header))
  1878. {
  1879. fprintf(stderr,"short splice from fuse device\n");
  1880. return -EIO;
  1881. }
  1882. return rv;
  1883. }
  1884. static
  1885. void
  1886. fuse_ll_buf_process_splice(struct fuse_session *se_,
  1887. const fuse_msgbuf_t *msgbuf_)
  1888. {
  1889. int rv;
  1890. struct fuse_req *req;
  1891. struct fuse_in_header *in;
  1892. struct iovec iov = { msgbuf_->mem, msgbuf_->size };
  1893. retry:
  1894. rv = vmsplice(msgbuf_->pipefd[0], &iov, 1, 0);
  1895. if(rv == -1)
  1896. {
  1897. rv = errno;
  1898. if(rv == EAGAIN)
  1899. goto retry;
  1900. // TODO: Need to propagate back errors to caller
  1901. return;
  1902. }
  1903. in = (struct fuse_in_header*)msgbuf_->mem;
  1904. req = fuse_ll_alloc_req(se_->f);
  1905. if(req == NULL)
  1906. return fuse_send_enomem(se_->f,se_->ch,in->unique);
  1907. req->unique = in->unique;
  1908. req->ctx.uid = in->uid;
  1909. req->ctx.gid = in->gid;
  1910. req->ctx.pid = in->pid;
  1911. req->ch = se_->ch;
  1912. rv = ENOSYS;
  1913. if(in->opcode >= FUSE_MAXOP)
  1914. goto reply_err;
  1915. if(fuse_ll_ops[in->opcode].func == NULL)
  1916. goto reply_err;
  1917. fuse_ll_ops[in->opcode].func(req, in);
  1918. return;
  1919. reply_err:
  1920. fuse_reply_err(req, rv);
  1921. return;
  1922. }
  1923. static
  1924. void
  1925. fuse_ll_buf_process_splice_init(struct fuse_session *se_,
  1926. const fuse_msgbuf_t *msgbuf_)
  1927. {
  1928. int rv;
  1929. struct fuse_req *req;
  1930. struct fuse_in_header *in;
  1931. struct iovec iov = { msgbuf_->mem, msgbuf_->size };
  1932. retry:
  1933. rv = vmsplice(msgbuf_->pipefd[0], &iov, 1, 0);
  1934. if(rv == -1)
  1935. {
  1936. rv = errno;
  1937. if(rv == EAGAIN)
  1938. goto retry;
  1939. // TODO: Need to propagate back errors to caller
  1940. return;
  1941. }
  1942. in = (struct fuse_in_header*)msgbuf_->mem;
  1943. req = fuse_ll_alloc_req(se_->f);
  1944. if(req == NULL)
  1945. return fuse_send_enomem(se_->f,se_->ch,in->unique);
  1946. req->unique = in->unique;
  1947. req->ctx.uid = in->uid;
  1948. req->ctx.gid = in->gid;
  1949. req->ctx.pid = in->pid;
  1950. req->ch = se_->ch;
  1951. rv = EIO;
  1952. if(in->opcode != FUSE_INIT)
  1953. goto reply_err;
  1954. if(fuse_ll_ops[in->opcode].func == NULL)
  1955. goto reply_err;
  1956. se_->process_buf = fuse_ll_buf_process_splice;
  1957. fuse_ll_ops[in->opcode].func(req, in);
  1958. return;
  1959. reply_err:
  1960. fuse_reply_err(req, rv);
  1961. return;
  1962. }
  1963. #else
  1964. static
  1965. int
  1966. fuse_ll_buf_receive_splice(struct fuse_session *se_,
  1967. fuse_msgbuf_t *msgbuf_)
  1968. {
  1969. return fuse_ll_buf_receive_read(se_,msgbuf_);
  1970. }
  1971. static
  1972. void
  1973. fuse_ll_buf_process_splice(struct fuse_session *se_,
  1974. const fuse_msgbuf_t *msgbuf_)
  1975. {
  1976. return fuse_ll_buf_process_read(se_,msgbuf_);
  1977. }
  1978. static
  1979. void
  1980. fuse_ll_buf_process_splice_init(struct fuse_session *se_,
  1981. const fuse_msgbuf_t *msgbuf_)
  1982. {
  1983. return fuse_ll_buf_process_read_init(se_,msgbuf_);
  1984. }
  1985. #endif
  1986. /*
  1987. * always call fuse_lowlevel_new_common() internally, to work around a
  1988. * misfeature in the FreeBSD runtime linker, which links the old
  1989. * version of a symbol to internal references.
  1990. */
  1991. struct fuse_session *
  1992. fuse_lowlevel_new_common(struct fuse_args *args,
  1993. const struct fuse_lowlevel_ops *op,
  1994. size_t op_size,
  1995. void *userdata)
  1996. {
  1997. int err;
  1998. struct fuse_ll *f;
  1999. struct fuse_session *se;
  2000. if(sizeof(struct fuse_lowlevel_ops) < op_size)
  2001. {
  2002. fprintf(stderr, "fuse: warning: library too old, some operations may not work\n");
  2003. op_size = sizeof(struct fuse_lowlevel_ops);
  2004. }
  2005. f = (struct fuse_ll *) calloc(1, sizeof(struct fuse_ll));
  2006. if(f == NULL)
  2007. {
  2008. fprintf(stderr, "fuse: failed to allocate fuse object\n");
  2009. goto out;
  2010. }
  2011. f->conn.max_write = UINT_MAX;
  2012. f->conn.max_readahead = UINT_MAX;
  2013. list_init_nreq(&f->notify_list);
  2014. f->notify_ctr = 1;
  2015. fuse_mutex_init(&f->lock);
  2016. err = pthread_key_create(&f->pipe_key, fuse_ll_pipe_destructor);
  2017. if(err)
  2018. {
  2019. fprintf(stderr, "fuse: failed to create thread specific key: %s\n",
  2020. strerror(err));
  2021. goto out_free;
  2022. }
  2023. if(fuse_opt_parse(args, f, fuse_ll_opts, fuse_ll_opt_proc) == -1)
  2024. goto out_key_destroy;
  2025. memcpy(&f->op, op, op_size);
  2026. f->owner = getuid();
  2027. f->userdata = userdata;
  2028. if(f->splice_read)
  2029. {
  2030. se = fuse_session_new(f,
  2031. fuse_ll_buf_receive_splice,
  2032. fuse_ll_buf_process_splice_init,
  2033. fuse_ll_destroy);
  2034. }
  2035. else
  2036. {
  2037. se = fuse_session_new(f,
  2038. fuse_ll_buf_receive_read,
  2039. fuse_ll_buf_process_read_init,
  2040. fuse_ll_destroy);
  2041. }
  2042. if(!se)
  2043. goto out_key_destroy;
  2044. return se;
  2045. out_key_destroy:
  2046. pthread_key_delete(f->pipe_key);
  2047. out_free:
  2048. pthread_mutex_destroy(&f->lock);
  2049. free(f);
  2050. out:
  2051. return NULL;
  2052. }
  2053. struct fuse_session*
  2054. fuse_lowlevel_new(struct fuse_args *args,
  2055. const struct fuse_lowlevel_ops *op,
  2056. size_t op_size,
  2057. void *userdata)
  2058. {
  2059. return fuse_lowlevel_new_common(args, op, op_size, userdata);
  2060. }