diff --git a/libfuse/Makefile b/libfuse/Makefile index 03d0a0c2..95b363b8 100644 --- a/libfuse/Makefile +++ b/libfuse/Makefile @@ -32,6 +32,7 @@ INSTALLMAN1DIR = $(DESTDIR)$(MAN1DIR) AR ?= ar SRC = \ + lib/fuse_node.c \ lib/buffer.c \ lib/fuse_dirents.c \ lib/fuse.c \ @@ -49,7 +50,8 @@ DEPS = $(SRC:lib/%.c=build/%.d) CFLAGS ?= \ $(OPT_FLAGS) CFLAGS := \ - ${CFLAGS} \ + ${CFLAGS} \ + -std=gnu99 \ -Wall \ -pipe \ -MMD diff --git a/libfuse/include/fuse.h b/libfuse/include/fuse.h index 1a62f960..74691926 100644 --- a/libfuse/include/fuse.h +++ b/libfuse/include/fuse.h @@ -25,7 +25,6 @@ #include "extern_c.h" #include "fuse_common.h" -#include "fuse_dirents.h" #include #include @@ -47,6 +46,9 @@ struct fuse; /** Structure containing a raw command */ struct fuse_cmd; +struct fuse_dirents_t; +typedef struct fuse_dirents_t fuse_dirents_t; + /** * The file system operations: * @@ -689,22 +691,8 @@ int fuse_is_lib_option(const char *opt); */ int fuse_main_real(int argc, char *argv[], const struct fuse_operations *op, size_t op_size); -/** - * Start the cleanup thread when using option "remember". - * - * This is done automatically by fuse_loop_mt() - * @param fuse struct fuse pointer for fuse instance - * @return 0 on success and -1 on error - */ -int fuse_start_cleanup_thread(struct fuse *fuse); - -/** - * Stop the cleanup thread when using option "remember". - * - * This is done automatically by fuse_loop_mt() - * @param fuse struct fuse pointer for fuse instance - */ -void fuse_stop_cleanup_thread(struct fuse *fuse); +int fuse_start_maintenance_thread(struct fuse *fuse); +void fuse_stop_maintenance_thread(struct fuse *fuse); /** * Iterate over cache removing stale entries @@ -765,8 +753,6 @@ int fuse_fs_release(struct fuse_fs *fs, fuse_file_info_t *fi); int fuse_fs_open(struct fuse_fs *fs, const char *path, fuse_file_info_t *fi); -int fuse_fs_read(struct fuse_fs *fs, char *buf, size_t size, - off_t off, fuse_file_info_t *fi); int fuse_fs_read_buf(struct fuse_fs *fs, struct fuse_bufvec **bufp, size_t size, off_t off, fuse_file_info_t *fi); diff --git a/libfuse/include/fuse_dirents.h b/libfuse/include/fuse_dirents.h index 92920141..25abf198 100644 --- a/libfuse/include/fuse_dirents.h +++ b/libfuse/include/fuse_dirents.h @@ -43,14 +43,12 @@ enum fuse_dirents_type_e }; typedef enum fuse_dirents_type_e fuse_dirents_type_t; -typedef struct fuse_dirents_s fuse_dirents_t; -struct fuse_dirents_s +typedef struct fuse_dirents_t fuse_dirents_t; +struct fuse_dirents_t { - char *buf; - uint64_t buf_len; - uint64_t data_len; - kvec_t(uint32_t) offs; - fuse_dirents_type_t type; + kvec_t(char) data; + kvec_t(uint32_t) offs; + fuse_dirents_type_t type; }; int fuse_dirents_init(fuse_dirents_t *d); diff --git a/libfuse/include/kvec.h b/libfuse/include/kvec.h index 038de9e7..e600b8e6 100644 --- a/libfuse/include/kvec.h +++ b/libfuse/include/kvec.h @@ -59,6 +59,10 @@ #define kv_pop(v) ((v).a[--(v).n]) #define kv_size(v) ((v).n) #define kv_max(v) ((v).m) +#define kv_first(v) (kv_A(v,0)) +#define kv_last(v) (kv_A(v,kv_size(v)-1)) +#define kv_end(v) (kv_A(v,kv_size(v))) +#define kv_delete(v,i) (kv_A(v,i) = kv_pop(v)) #define kv_resize(type, v, s) ((v).m = (s), (v).a = (type*)realloc((v).a, sizeof(type) * (v).m)) diff --git a/libfuse/lib/fmp.h b/libfuse/lib/fmp.h new file mode 100644 index 00000000..dfa8057e --- /dev/null +++ b/libfuse/lib/fmp.h @@ -0,0 +1,356 @@ +/* + ISC License + + Copyright (c) 2021, Antonio SJ Musumeci + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#pragma once + +#include "kvec.h" + +#include +#include +#include +#include +#include +#include + +#define ROUND_UP(N,S) ((((N) + (S) - 1) / (S)) * (S)) + +typedef kvec_t(void*) slab_kvec_t; + +typedef struct mem_stack_t mem_stack_t; +struct mem_stack_t +{ + mem_stack_t *next; +}; + +typedef struct fmp_t fmp_t; +struct fmp_t +{ + mem_stack_t *objs; + slab_kvec_t slabs; + uint64_t avail_objs; + uint64_t obj_size; + uint64_t page_size; + uint64_t slab_size; +}; + +static +inline +uint64_t +fmp_page_size() +{ + return sysconf(_SC_PAGESIZE); +} + +static +inline +void +fmp_init(fmp_t *fmp_, + const uint64_t obj_size_, + const uint64_t page_multiple_) +{ + kv_init(fmp_->slabs); + fmp_->objs = NULL; + fmp_->avail_objs = 0; + fmp_->obj_size = ROUND_UP(obj_size_,sizeof(void*)); + fmp_->page_size = fmp_page_size(); + fmp_->slab_size = (fmp_->page_size * page_multiple_); +} + +static +inline +uint64_t +fmp_slab_count(fmp_t *fmp_) +{ + return kv_size(fmp_->slabs); +} + +static +inline +void* +fmp_slab_alloc_posix_memalign(fmp_t *fmp_) +{ + int rv; + void *mem; + const size_t alignment = fmp_->page_size; + const size_t size = fmp_->slab_size; + + rv = posix_memalign(&mem,alignment,size); + if(rv != 0) + return NULL; + + return NULL; +} + +static +inline +void* +fmp_slab_alloc_mmap(fmp_t *fmp_) +{ + void *mem; + void *address = NULL; + const size_t length = fmp_->slab_size; + const int protect = PROT_READ|PROT_WRITE; + const int flags = MAP_PRIVATE|MAP_ANONYMOUS; + const int filedes = -1; + const off_t offset = 0; + + mem = mmap(address,length,protect,flags,filedes,offset); + if(mem == MAP_FAILED) + return NULL; + + return mem; +} + +static +inline +void +fmp_slab_free_posix_memalign(fmp_t* fmp_, + void *mem_) +{ + (void)fmp_; + free(mem_); +} + +static +inline +void +fmp_slab_free_mmap(fmp_t* fmp_, + void *mem_) +{ + void *addr = mem_; + size_t length = fmp_->slab_size; + + (void)munmap(addr,length); +} + +static +inline +int +fmp_slab_alloc(fmp_t *fmp_) +{ + char *i; + void *mem; + + mem = fmp_slab_alloc_mmap(fmp_); + if(mem == NULL) + return -ENOMEM; + + kv_push(void*,fmp_->slabs,mem); + + i = ((char*)mem + fmp_->slab_size - fmp_->obj_size); + while(i >= (char*)mem) + { + mem_stack_t *obj = (mem_stack_t*)i; + + obj->next = fmp_->objs; + fmp_->objs = obj; + fmp_->avail_objs++; + + i -= fmp_->obj_size; + } + + return 0; +} + +static +inline +void* +fmp_alloc(fmp_t *fmp_) +{ + void *rv; + + if(fmp_->objs == NULL) + fmp_slab_alloc(fmp_); + if(fmp_->objs == NULL) + return NULL; + + rv = fmp_->objs; + + fmp_->objs = fmp_->objs->next; + fmp_->avail_objs--; + + return rv; +} + +static +inline +void* +fmp_calloc(fmp_t *fmp_) +{ + void *obj; + + obj = fmp_alloc(fmp_); + if(obj == NULL) + return NULL; + + memset(obj,0,fmp_->obj_size); + + return obj; +} + +static +inline +void +fmp_free(fmp_t *fmp_, + void *obj_) +{ + mem_stack_t *obj = (mem_stack_t*)obj_; + + obj->next = fmp_->objs; + fmp_->objs = obj; + fmp_->avail_objs++; +} + +static +inline +void +fmp_clear(fmp_t *fmp_) +{ + while(kv_size(fmp_->slabs)) + { + void *slab = kv_pop(fmp_->slabs); + + fmp_slab_free_mmap(fmp_,slab); + } + + fmp_->objs = NULL; + fmp_->avail_objs = 0; +} + +static +inline +void +fmp_destroy(fmp_t *fmp_) +{ + fmp_clear(fmp_); + kv_destroy(fmp_->slabs); +} + +static +inline +uint64_t +fmp_avail_objs(fmp_t *fmp_) +{ + return fmp_->avail_objs; +} + +static +inline +uint64_t +fmp_objs_in_slab(fmp_t *fmp_, + void *slab_) +{ + char *slab; + uint64_t objs_in_slab; + + objs_in_slab = 0; + slab = (char*)slab_; + for(mem_stack_t *stack = fmp_->objs; stack != NULL; stack = stack->next) + { + char *obj = (char*)stack; + if((obj >= slab) && (obj < (slab + fmp_->slab_size))) + objs_in_slab++; + } + + return objs_in_slab; +} + +static +inline +void +fmp_remove_objs_in_slab(fmp_t *fmp_, + void *slab_) +{ + char *slab = (char*)slab_; + mem_stack_t **p = &fmp_->objs; + + while((*p) != NULL) + { + char *obj = (char*)*p; + + if((obj >= slab) && (obj < (slab + fmp_->slab_size))) + { + *p = (*p)->next; + fmp_->avail_objs--; + continue; + } + + p = &(*p)->next; + } +} + +static +inline +int +fmp_gc(fmp_t *fmp_) +{ + int i; + int freed_slabs; + uint64_t objs_per_slab; + + objs_per_slab = (fmp_->slab_size / fmp_->obj_size); + + i = 0; + freed_slabs = 0; + while(i < kv_size(fmp_->slabs)) + { + char *slab; + uint64_t objs_in_slab; + + slab = kv_A(fmp_->slabs,i); + + objs_in_slab = fmp_objs_in_slab(fmp_,slab); + if(objs_in_slab != objs_per_slab) + { + i++; + continue; + } + + fmp_remove_objs_in_slab(fmp_,slab); + + kv_delete(fmp_->slabs,i); + + fmp_slab_free_mmap(fmp_,slab); + freed_slabs++; + } + + return freed_slabs; +} + +static +inline +uint64_t +fmp_objs_per_slab(fmp_t *fmp_) +{ + return (fmp_->slab_size / fmp_->obj_size); +} + +static +inline +double +fmp_slab_usage_ratio(fmp_t *fmp_) +{ + double rv; + uint64_t objs_per_slab; + + objs_per_slab = fmp_objs_per_slab(fmp_); + + rv = ((double)fmp_->avail_objs / (double)objs_per_slab); + + return rv; +} diff --git a/libfuse/lib/fuse.c b/libfuse/lib/fuse.c index 13192668..a0c033d3 100644 --- a/libfuse/lib/fuse.c +++ b/libfuse/lib/fuse.c @@ -9,6 +9,10 @@ /* For pthread_rwlock_t */ #define _GNU_SOURCE +#include "fuse_node.h" +#include "lfmp.h" +#include "kvec.h" + #include "config.h" #include "fuse_i.h" #include "fuse_lowlevel.h" @@ -38,10 +42,8 @@ #include #include -#define FUSE_NODE_SLAB 1 - -#ifndef MAP_ANONYMOUS -#undef FUSE_NODE_SLAB +#ifdef HAVE_MALLOC_TRIM +#include #endif #define FUSE_UNKNOWN_INO UINT64_MAX @@ -108,11 +110,11 @@ struct list_head struct list_head *prev; }; -struct node_slab +typedef struct remembered_node_t remembered_node_t; +struct remembered_node_t { - struct list_head list; /* must be the first member */ - struct list_head freelist; - int used; + struct node *node; + time_t time; }; struct fuse @@ -120,7 +122,6 @@ struct fuse struct fuse_session *se; struct node_table name_table; struct node_table id_table; - struct list_head lru_table; fuse_ino_t ctr; uint64_t generation; unsigned int hidectr; @@ -128,10 +129,10 @@ struct fuse struct fuse_config conf; struct fuse_fs *fs; struct lock_queue_element *lockq; - int pagesize; - struct list_head partial_slabs; - struct list_head full_slabs; - pthread_t prune_thread; + + pthread_t maintenance_thread; + lfmp_t node_fmp; + kvec_t(remembered_node_t) remembered_nodes; }; struct lock @@ -166,16 +167,10 @@ struct node char inline_name[32]; }; + #define TREELOCK_WRITE -1 #define TREELOCK_WAIT_OFFSET INT_MIN -struct node_lru -{ - struct node node; - struct list_head lru; - struct timespec forget_time; -}; - struct fuse_dh { pthread_mutex_t lock; @@ -193,21 +188,6 @@ static pthread_key_t fuse_context_key; static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER; static int fuse_context_ref; -static -void -init_list_head(struct list_head *list) -{ - list->next = list; - list->prev = list; -} - -static -int -list_empty(const struct list_head *head) -{ - return head->next == head; -} - static void list_add(struct list_head *new, @@ -250,152 +230,11 @@ list_del(struct list_head *entry) prev->next = next; } -static -inline -int -lru_enabled(struct fuse *f) -{ - return f->conf.remember > 0; -} - -static -struct -node_lru* -node_lru(struct node *node) -{ - return (struct node_lru*)node; -} - -static -size_t -get_node_size(struct fuse *f) -{ - if(lru_enabled(f)) - return sizeof(struct node_lru); - else - return sizeof(struct node); -} - -#ifdef FUSE_NODE_SLAB -static -struct node_slab* -list_to_slab(struct list_head *head) -{ - return (struct node_slab *)head; -} - -static -struct node_slab* -node_to_slab(struct fuse *f, - struct node *node) -{ - return (struct node_slab *)(((uintptr_t)node) & ~((uintptr_t)f->pagesize - 1)); -} - -static -int -alloc_slab(struct fuse *f) -{ - void *mem; - struct node_slab *slab; - char *start; - size_t num; - size_t i; - size_t node_size = get_node_size(f); - - mem = mmap(NULL,f->pagesize,PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS,-1,0); - - if(mem == MAP_FAILED) - return -1; - - slab = mem; - init_list_head(&slab->freelist); - slab->used = 0; - num = (f->pagesize - sizeof(struct node_slab)) / node_size; - - start = (char *)mem + f->pagesize - num * node_size; - for(i = 0; i < num; i++) - { - struct list_head *n; - - n = (struct list_head *)(start + i * node_size); - list_add_tail(n,&slab->freelist); - } - list_add_tail(&slab->list,&f->partial_slabs); - - return 0; -} - static struct node* alloc_node(struct fuse *f) { - struct node_slab *slab; - struct list_head *node; - - if(list_empty(&f->partial_slabs)) - { - int res = alloc_slab(f); - if(res != 0) - return NULL; - } - slab = list_to_slab(f->partial_slabs.next); - slab->used++; - node = slab->freelist.next; - list_del(node); - if(list_empty(&slab->freelist)) - { - list_del(&slab->list); - list_add_tail(&slab->list,&f->full_slabs); - } - memset(node,0,sizeof(struct node)); - - return (struct node *)node; -} - -static -void -free_slab(struct fuse *f, - struct node_slab *slab) -{ - int res; - - list_del(&slab->list); - res = munmap(slab,f->pagesize); - if(res == -1) - fprintf(stderr,"fuse warning: munmap(%p) failed\n",slab); -} - -static -void -free_node_mem(struct fuse *f, - struct node *node) -{ - struct node_slab *slab = node_to_slab(f,node); - struct list_head *n = (struct list_head *)node; - - slab->used--; - if(slab->used) - { - if(list_empty(&slab->freelist)) - { - list_del(&slab->list); - list_add_tail(&slab->list,&f->partial_slabs); - } - list_add_head(n,&slab->freelist); - } - else - { - free_slab(f,slab); - } -} -#else -static -struct node* -alloc_node(struct fuse *f) -{ - return (struct node *)calloc(1,get_node_size(f)); + return lfmp_calloc(&f->node_fmp); } static @@ -403,10 +242,8 @@ void free_node_mem(struct fuse *f, struct node *node) { - (void)f; - free(node); + return lfmp_free(&f->node_fmp,node); } -#endif static size_t @@ -454,29 +291,44 @@ get_node(struct fuse *f, return node; } -static void curr_time(struct timespec *now); -static double diff_timespec(const struct timespec *t1, - const struct timespec *t2); - static void -remove_node_lru(struct node *node) +remove_remembered_node(struct fuse *f_, + struct node *node_) { - struct node_lru *lnode = node_lru(node); - list_del(&lnode->lru); - init_list_head(&lnode->lru); + for(size_t i = 0; i < kv_size(f_->remembered_nodes); i++) + { + if(kv_A(f_->remembered_nodes,i).node != node_) + continue; + + kv_delete(f_->remembered_nodes,i); + break; + } } +#ifndef CLOCK_MONOTONIC +# define CLOCK_MONOTONIC CLOCK_REALTIME +#endif + static -void -set_forget_time(struct fuse *f, - struct node *node) +time_t +current_time() { - struct node_lru *lnode = node_lru(node); + int rv; + struct timespec now; + static clockid_t clockid = CLOCK_MONOTONIC; - list_del(&lnode->lru); - list_add_tail(&lnode->lru,&f->lru_table); - curr_time(&lnode->forget_time); + rv = clock_gettime(clockid,&now); + if((rv == -1) && (errno == EINVAL)) + { + clockid = CLOCK_REALTIME; + rv = clock_gettime(clockid,&now); + } + + if(rv == -1) + now.tv_sec = time(NULL); + + return now.tv_sec; } static @@ -777,7 +629,7 @@ hash_name(struct fuse *f, return -1; } - parent->refctr ++; + parent->refctr++; node->parent = parent; node->name_next = f->name_table.array[hash]; f->name_table.array[hash] = node; @@ -789,6 +641,14 @@ hash_name(struct fuse *f, return 0; } +static +inline +int +remember_nodes(struct fuse *f_) +{ + return (f_->conf.remember > 0); +} + static void delete_node(struct fuse *f, @@ -796,8 +656,8 @@ delete_node(struct fuse *f, { assert(node->treelock == 0); unhash_name(f,node); - if(lru_enabled(f)) - remove_node_lru(node); + if(remember_nodes(f)) + remove_remembered_node(f,node); unhash_id(f,node); free_node(f,node); } @@ -881,6 +741,7 @@ find_node(struct fuse *f, node = get_node(f,parent); else node = lookup_node(f,parent,name); + if(node == NULL) { node = alloc_node(f); @@ -899,15 +760,10 @@ find_node(struct fuse *f, goto out_err; } hash_id(f,node); - if(lru_enabled(f)) - { - struct node_lru *lnode = node_lru(node); - init_list_head(&lnode->lru); - } } - else if(lru_enabled(f) && node->nlookup == 1) + else if((node->nlookup == 1) && remember_nodes(f)) { - remove_node_lru(node); + remove_remembered_node(f,node); } inc_nlookup(node); out_err: @@ -1420,10 +1276,20 @@ forget_node(struct fuse *f, assert(node->nlookup >= nlookup); node->nlookup -= nlookup; - if(!node->nlookup) - unref_node(f,node); - else if(lru_enabled(f) && node->nlookup == 1) - set_forget_time(f,node); + + if(node->nlookup == 0) + { + unref_node(f,node); + } + else if((node->nlookup == 1) && remember_nodes(f)) + { + remembered_node_t fn; + + fn.node = node; + fn.time = current_time(); + + kv_push(remembered_node_t,f->remembered_nodes,fn); + } pthread_mutex_unlock(&f->lock); } @@ -1433,11 +1299,8 @@ void unlink_node(struct fuse *f, struct node *node) { - if(f->conf.remember) - { - assert(node->nlookup > 1); - node->nlookup--; - } + assert(node->nlookup > 1); + node->nlookup--; unhash_name(f,node); } @@ -1640,29 +1503,6 @@ fuse_fs_read_buf(struct fuse_fs *fs, return 0; } -int -fuse_fs_read(struct fuse_fs *fs, - char *mem, - size_t size, - off_t off, - fuse_file_info_t *fi) -{ - int res; - struct fuse_bufvec *buf = NULL; - - res = fuse_fs_read_buf(fs,&buf,size,off,fi); - if(res == 0) - { - struct fuse_bufvec dst = FUSE_BUFVEC_INIT(size); - - dst.buf[0].mem = mem; - res = fuse_buf_copy(&dst,buf,0); - } - fuse_free_buf(buf); - - return res; -} - int fuse_fs_write_buf(struct fuse_fs *fs, struct fuse_bufvec *buf, @@ -1938,29 +1778,6 @@ node_open(const struct node *node_) return ((node_ != NULL) && (node_->open_count > 0)); } -#ifndef CLOCK_MONOTONIC -#define CLOCK_MONOTONIC CLOCK_REALTIME -#endif - -static -void -curr_time(struct timespec *now) -{ - static clockid_t clockid = CLOCK_MONOTONIC; - int res = clock_gettime(clockid,now); - if(res == -1 && errno == EINVAL) - { - clockid = CLOCK_REALTIME; - res = clock_gettime(clockid,now); - } - - if(res == -1) - { - perror("fuse: clock_gettime"); - abort(); - } -} - static void update_stat(struct node *node_, @@ -2164,7 +1981,8 @@ fuse_lib_init(void *data, void fuse_fs_destroy(struct fuse_fs *fs) { - fs->op.destroy(); + if(fs->op.destroy) + fs->op.destroy(); free(fs); } @@ -2195,28 +2013,26 @@ fuse_lib_lookup(fuse_req_t req, if(name[0] == '.') { - int len = strlen(name); - - if(len == 1 || (name[1] == '.' && len == 2)) + if(name[1] == '\0') { + name = NULL; pthread_mutex_lock(&f->lock); - if(len == 1) + dot = get_node_nocheck(f,parent); + if(dot == NULL) { - dot = get_node_nocheck(f,parent); - if(dot == NULL) - { - pthread_mutex_unlock(&f->lock); - reply_entry(req,&e,-ESTALE); - return; - } - dot->refctr++; - } - else - { - parent = get_node(f,parent)->parent->nodeid; + pthread_mutex_unlock(&f->lock); + reply_entry(req,&e,-ESTALE); + return; } + dot->refctr++; pthread_mutex_unlock(&f->lock); + } + else if((name[1] == '.') && (name[2] == '\0')) + { name = NULL; + pthread_mutex_lock(&f->lock); + parent = get_node(f,parent)->parent->nodeid; + pthread_mutex_unlock(&f->lock); } } @@ -2231,12 +2047,14 @@ fuse_lib_lookup(fuse_req_t req, } free_path(f,parent,path); } + if(dot) { pthread_mutex_lock(&f->lock); unref_node(f,dot); pthread_mutex_unlock(&f->lock); } + reply_entry(req,&e,err); } @@ -2820,15 +2638,6 @@ fuse_lib_create(fuse_req_t req, free_path(f,parent,path); } -static -double -diff_timespec(const struct timespec *t1, - const struct timespec *t2) -{ - return (t1->tv_sec - t2->tv_sec) + - ((double)t1->tv_nsec - (double)t2->tv_nsec) / 1000000000.0; -} - static void open_auto_cache(struct fuse *f, @@ -3036,8 +2845,8 @@ readdir_buf_size(fuse_dirents_t *d_, { if(off_ >= kv_size(d_->offs)) return 0; - if((kv_A(d_->offs,off_) + size_) > d_->data_len) - return (d_->data_len - kv_A(d_->offs,off_)); + if((kv_A(d_->offs,off_) + size_) > kv_size(d_->data)) + return (kv_size(d_->data) - kv_A(d_->offs,off_)); return size_; } @@ -3046,7 +2855,11 @@ char* readdir_buf(fuse_dirents_t *d_, off_t off_) { - return &d_->buf[kv_A(d_->offs,off_)]; + size_t i; + + i = kv_A(d_->offs,off_); + + return &kv_A(d_->data,i); } static @@ -3070,7 +2883,7 @@ fuse_lib_readdir(fuse_req_t req_, pthread_mutex_lock(&dh->lock); rv = 0; - if((off_ == 0) || (d->data_len == 0)) + if((off_ == 0) || (kv_size(d->data) == 0)) rv = fuse_fs_readdir(f->fs,&fi,d); if(rv) @@ -3110,7 +2923,7 @@ fuse_lib_readdir_plus(fuse_req_t req_, pthread_mutex_lock(&dh->lock); rv = 0; - if((off_ == 0) || (d->data_len == 0)) + if((off_ == 0) || (kv_size(d->data) == 0)) rv = fuse_fs_readdir_plus(f->fs,&fi,d); if(rv) @@ -3795,61 +3608,115 @@ fuse_lib_fallocate(fuse_req_t req, static int -clean_delay(struct fuse *f) +remembered_node_cmp(const void *a_, + const void *b_) { - /* - * This is calculating the delay between clean runs. To - * reduce the number of cleans we are doing them 10 times - * within the remember window. - */ - int min_sleep = 60; - int max_sleep = 3600; - int sleep_time = f->conf.remember / 10; + const remembered_node_t *a = a_; + const remembered_node_t *b = b_; - if(sleep_time > max_sleep) - return max_sleep; - if(sleep_time < min_sleep) - return min_sleep; - return sleep_time; + return (a->time - b->time); } -int -fuse_clean_cache(struct fuse *f) +static +void +remembered_nodes_sort(struct fuse *f_) { - struct node_lru *lnode; - struct list_head *curr,*next; - struct node *node; - struct timespec now; + pthread_mutex_lock(&f_->lock); + qsort(&kv_first(f_->remembered_nodes), + kv_size(f_->remembered_nodes), + sizeof(remembered_node_t), + remembered_node_cmp); + pthread_mutex_unlock(&f_->lock); +} - pthread_mutex_lock(&f->lock); +#define MAX_PRUNE 100 +#define MAX_CHECK 1000 - curr_time(&now); +int +fuse_prune_some_remembered_nodes(struct fuse *f_, + int *offset_) +{ + time_t now; + int pruned; + int checked; - for(curr = f->lru_table.next; curr != &f->lru_table; curr = next) + pthread_mutex_lock(&f_->lock); + + pruned = 0; + checked = 0; + now = current_time(); + while(*offset_ < kv_size(f_->remembered_nodes)) { - double age; + time_t age; + remembered_node_t *fn = &kv_A(f_->remembered_nodes,*offset_); - next = curr->next; - lnode = list_entry(curr,struct node_lru,lru); - node = &lnode->node; + if(pruned >= MAX_PRUNE) + break; + if(checked >= MAX_CHECK) + break; - age = diff_timespec(&now,&lnode->forget_time); - if(age <= f->conf.remember) + checked++; + age = (now - fn->time); + if(f_->conf.remember > age) break; - assert(node->nlookup == 1); + assert(fn->node->nlookup == 1); /* Don't forget active directories */ - if(node->refctr > 1) - continue; + if(fn->node->refctr > 1) + { + (*offset_)++; + continue; + } - node->nlookup = 0; - unhash_name(f,node); - unref_node(f,node); + fn->node->nlookup = 0; + unref_node(f_,fn->node); + kv_delete(f_->remembered_nodes,*offset_); + pruned++; } - pthread_mutex_unlock(&f->lock); - return clean_delay(f); + pthread_mutex_unlock(&f_->lock); + + if((pruned < MAX_PRUNE) && (checked < MAX_CHECK)) + *offset_ = -1; + + return pruned; +} + +#undef MAX_PRUNE +#undef MAX_CHECK + +static +void +sleep_100ms(void) +{ + const struct timespec ms100 = {0,100 * 1000000}; + + nanosleep(&ms100,NULL); +} + +void +fuse_prune_remembered_nodes(struct fuse *f_) +{ + int offset; + int pruned; + + offset = 0; + pruned = 0; + for(;;) + { + pruned += fuse_prune_some_remembered_nodes(f_,&offset); + if(offset >= 0) + { + sleep_100ms(); + continue; + } + + break; + } + + if(pruned > 0) + remembered_nodes_sort(f_); } static struct fuse_lowlevel_ops fuse_path_ops = @@ -3937,6 +3804,7 @@ struct fuse_cmd* fuse_alloc_cmd(size_t bufsize) { struct fuse_cmd *cmd = (struct fuse_cmd *)malloc(sizeof(*cmd)); + if(cmd == NULL) { fprintf(stderr,"fuse: failed to allocate cmd\n"); @@ -4098,40 +3966,58 @@ node_table_init(struct node_table *t) return 0; } +static +void +fuse_malloc_trim(void) +{ +#ifdef HAVE_MALLOC_TRIM + malloc_trim(1024 * 1024); +#endif +} + static void* -fuse_prune_nodes(void *fuse) +fuse_maintenance_loop(void *fuse_) { - struct fuse *f = fuse; + int loops; int sleep_time; + double slab_usage_ratio; + struct fuse *f = (struct fuse*)fuse_; + loops = 0; + sleep_time = 60; while(1) { - sleep_time = fuse_clean_cache(f); + if(remember_nodes(f)) + fuse_prune_remembered_nodes(f); + + slab_usage_ratio = lfmp_slab_usage_ratio(&f->node_fmp); + if(slab_usage_ratio > 3.0) + lfmp_gc(&f->node_fmp); + + if(loops % 15) + fuse_malloc_trim(); + + loops++; sleep(sleep_time); } + return NULL; } int -fuse_start_cleanup_thread(struct fuse *f) +fuse_start_maintenance_thread(struct fuse *f_) { - if(lru_enabled(f)) - return fuse_start_thread(&f->prune_thread,fuse_prune_nodes,f); - - return 0; + return fuse_start_thread(&f_->maintenance_thread,fuse_maintenance_loop,f_); } void -fuse_stop_cleanup_thread(struct fuse *f) +fuse_stop_maintenance_thread(struct fuse *f_) { - if(lru_enabled(f)) - { - pthread_mutex_lock(&f->lock); - pthread_cancel(f->prune_thread); - pthread_mutex_unlock(&f->lock); - pthread_join(f->prune_thread,NULL); - } + pthread_mutex_lock(&f_->lock); + pthread_cancel(f_->maintenance_thread); + pthread_mutex_unlock(&f_->lock); + pthread_join(f_->maintenance_thread,NULL); } struct fuse* @@ -4168,11 +4054,6 @@ fuse_new_common(struct fuse_chan *ch, llop.setlk = NULL; } - f->pagesize = getpagesize(); - init_list_head(&f->partial_slabs); - init_list_head(&f->full_slabs); - init_list_head(&f->lru_table); - if(fuse_opt_parse(args,&f->conf,fuse_lib_opts,fuse_lib_opt_proc) == -1) goto out_free_fs; @@ -4194,6 +4075,9 @@ fuse_new_common(struct fuse_chan *ch, fuse_mutex_init(&f->lock); + lfmp_init(&f->node_fmp,sizeof(struct node),256); + kv_init(f->remembered_nodes); + root = alloc_node(f); if(root == NULL) { @@ -4201,12 +4085,6 @@ fuse_new_common(struct fuse_chan *ch, goto out_free_id_table; } - if(lru_enabled(f)) - { - struct node_lru *lnode = node_lru(root); - init_list_head(&lnode->lru); - } - strcpy(root->inline_name,"/"); root->name = root->inline_name; @@ -4282,13 +4160,12 @@ fuse_destroy(struct fuse *f) } } - assert(list_empty(&f->partial_slabs)); - assert(list_empty(&f->full_slabs)); - free(f->id_table.array); free(f->name_table.array); pthread_mutex_destroy(&f->lock); fuse_session_destroy(f->se); + lfmp_destroy(&f->node_fmp); + kv_destroy(f->remembered_nodes); free(f); fuse_delete_context_key(); } diff --git a/libfuse/lib/fuse_dirents.c b/libfuse/lib/fuse_dirents.c index 3f265428..c8e9fc66 100644 --- a/libfuse/lib/fuse_dirents.c +++ b/libfuse/lib/fuse_dirents.c @@ -19,6 +19,14 @@ /* 32KB - same as glibc getdents buffer size */ #define DEFAULT_SIZE (1024 * 32) +static +uint64_t +round_up(const uint64_t number_, + const uint64_t multiple_) +{ + return (((number_ + multiple_ - 1) / multiple_) * multiple_); +} + static uint64_t align_uint64_t(uint64_t v_) @@ -57,16 +65,15 @@ int fuse_dirents_buf_resize(fuse_dirents_t *d_, uint64_t size_) { - void *p; - - if((d_->data_len + size_) >= d_->buf_len) + if((kv_size(d_->data) + size_) >= kv_max(d_->data)) { - p = realloc(d_->buf,(d_->buf_len * 2)); - if(p == NULL) - return -errno; + uint64_t new_size; - d_->buf = p; - d_->buf_len *= 2; + new_size = round_up((kv_size(d_->data) + size_),DEFAULT_SIZE); + + kv_resize(char,d_->data,new_size); + if(d_->data.a == NULL) + return -ENOMEM; } return 0; @@ -87,9 +94,8 @@ fuse_dirents_dirent_alloc(fuse_dirents_t *d_, if(rv) return NULL; - d = (fuse_dirent_t*)&d_->buf[d_->data_len]; - - d_->data_len += size; + d = (fuse_dirent_t*)&kv_end(d_->data); + kv_size(d_->data) += size; return d; } @@ -109,9 +115,8 @@ fuse_dirents_direntplus_alloc(fuse_dirents_t *d_, if(rv) return NULL; - d = (fuse_dirent_t*)&d_->buf[d_->data_len]; - - d_->data_len += size; + d = (fuse_dirent_t*)&kv_end(d_->data); + kv_size(d_->data) += size; return d; } @@ -170,8 +175,8 @@ fuse_dirent_find(fuse_dirents_t *d_, if(d_->type != NORMAL) return NULL; - cur = (fuse_dirent_t*)&d_->buf[0]; - end = (fuse_dirent_t*)&d_->buf[d_->data_len]; + cur = (fuse_dirent_t*)&kv_first(d_->data); + end = (fuse_dirent_t*)&kv_end(d_->data); while(cur < end) { if(cur->ino == ino_) @@ -193,8 +198,8 @@ fuse_direntplus_find(fuse_dirents_t *d_, if(d_->type != PLUS) return NULL; - cur = (fuse_direntplus_t*)&d_->buf[0]; - end = (fuse_direntplus_t*)&d_->buf[d_->data_len]; + cur = (fuse_direntplus_t*)&kv_first(d_->data); + end = (fuse_direntplus_t*)&kv_end(d_->data); while(cur < end) { if(cur->dirent.ino == ino_) @@ -251,7 +256,7 @@ fuse_dirents_add(fuse_dirents_t *d_, return -ENOMEM; d->off = kv_size(d_->offs); - kv_push(uint32_t,d_->offs,d_->data_len); + kv_push(uint32_t,d_->offs,kv_size(d_->data)); d->ino = dirent_->d_ino; d->namelen = namelen_; d->type = dirent_->d_type; @@ -285,7 +290,7 @@ fuse_dirents_add_plus(fuse_dirents_t *d_, return -ENOMEM; d->dirent.off = kv_size(d_->offs); - kv_push(uint32_t,d_->offs,d_->data_len); + kv_push(uint32_t,d_->offs,kv_size(d_->data)); d->dirent.ino = dirent_->d_ino; d->dirent.namelen = namelen_; d->dirent.type = dirent_->d_type; @@ -321,7 +326,7 @@ fuse_dirents_add_linux(fuse_dirents_t *d_, return -ENOMEM; d->off = kv_size(d_->offs); - kv_push(uint32_t,d_->offs,d_->data_len); + kv_push(uint32_t,d_->offs,kv_size(d_->data)); d->ino = dirent_->ino; d->namelen = namelen_; d->type = dirent_->type; @@ -355,7 +360,7 @@ fuse_dirents_add_linux_plus(fuse_dirents_t *d_, return -ENOMEM; d->dirent.off = kv_size(d_->offs); - kv_push(uint32_t,d_->offs,d_->data_len); + kv_push(uint32_t,d_->offs,kv_size(d_->data)); d->dirent.ino = dirent_->ino; d->dirent.namelen = namelen_; d->dirent.type = dirent_->type; @@ -371,25 +376,21 @@ fuse_dirents_add_linux_plus(fuse_dirents_t *d_, void fuse_dirents_reset(fuse_dirents_t *d_) { - d_->data_len = 0; d_->type = UNSET; + kv_size(d_->data) = 0; kv_size(d_->offs) = 1; } int fuse_dirents_init(fuse_dirents_t *d_) { - void *buf; + d_->type = UNSET; - buf = calloc(DEFAULT_SIZE,1); - if(buf == NULL) + kv_init(d_->data); + kv_resize(char,d_->data,DEFAULT_SIZE); + if(d_->data.a == NULL) return -ENOMEM; - d_->buf = buf; - d_->buf_len = DEFAULT_SIZE; - d_->data_len = 0; - d_->type = UNSET; - kv_init(d_->offs); kv_resize(uint32_t,d_->offs,64); kv_push(uint32_t,d_->offs,0); @@ -400,11 +401,6 @@ fuse_dirents_init(fuse_dirents_t *d_) void fuse_dirents_free(fuse_dirents_t *d_) { - d_->buf_len = 0; - d_->data_len = 0; - d_->type = UNSET; - + kv_destroy(d_->data); kv_destroy(d_->offs); - - free(d_->buf); } diff --git a/libfuse/lib/fuse_lowlevel.c b/libfuse/lib/fuse_lowlevel.c index 0ad52d84..00a7d0a0 100644 --- a/libfuse/lib/fuse_lowlevel.c +++ b/libfuse/lib/fuse_lowlevel.c @@ -8,6 +8,8 @@ #define _GNU_SOURCE +#include "lfmp.h" + #include "config.h" #include "fuse_i.h" #include "fuse_kernel.h" @@ -47,12 +49,26 @@ struct fuse_pollhandle_t }; static size_t pagesize; +static lfmp_t g_FMP_fuse_req; -static __attribute__((constructor)) void fuse_ll_init_pagesize(void) +static +__attribute__((constructor)) +void +fuse_ll_constructor(void) { pagesize = getpagesize(); + lfmp_init(&g_FMP_fuse_req,sizeof(struct fuse_req),1); } +static +__attribute__((destructor)) +void +fuse_ll_destructor(void) +{ + lfmp_destroy(&g_FMP_fuse_req); +} + + static void convert_stat(const struct stat *stbuf_, @@ -109,7 +125,7 @@ static void destroy_req(fuse_req_t req) { - free(req); + lfmp_free(&g_FMP_fuse_req,req); } static @@ -118,7 +134,7 @@ fuse_ll_alloc_req(struct fuse_ll *f) { struct fuse_req *req; - req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req)); + req = (struct fuse_req*)lfmp_calloc(&g_FMP_fuse_req); if (req == NULL) { fprintf(stderr, "fuse: failed to allocate request\n"); @@ -203,27 +219,6 @@ send_reply(fuse_req_t req, return send_reply_iov(req, error, iov, count); } -int -fuse_reply_iov(fuse_req_t req, - const struct iovec *iov, - int count) -{ - int res; - struct iovec *padded_iov; - - padded_iov = malloc((count + 1) * sizeof(struct iovec)); - if (padded_iov == NULL) - return fuse_reply_err(req, ENOMEM); - - memcpy(padded_iov + 1, iov, count * sizeof(struct iovec)); - count++; - - res = send_reply_iov(req, 0, padded_iov, count); - free(padded_iov); - - return res; -} - static void convert_statfs(const struct statvfs *stbuf, @@ -1635,9 +1630,7 @@ do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { - pthread_mutex_lock(&req->f->lock); destroy_req(req); - pthread_mutex_unlock(&req->f->lock); } static @@ -2631,6 +2624,8 @@ fuse_ll_destroy(void *data) pthread_key_delete(f->pipe_key); pthread_mutex_destroy(&f->lock); free(f); + + lfmp_clear(&g_FMP_fuse_req); } static diff --git a/libfuse/lib/fuse_mt.c b/libfuse/lib/fuse_mt.c index 6a2f9581..1de419e8 100644 --- a/libfuse/lib/fuse_mt.c +++ b/libfuse/lib/fuse_mt.c @@ -112,12 +112,14 @@ int fuse_loop_mt(struct fuse *f) if (f == NULL) return -1; - int res = fuse_start_cleanup_thread(f); + int res = fuse_start_maintenance_thread(f); if (res) return -1; res = fuse_session_loop_mt(fuse_get_session(f), fuse_config_num_threads(f)); - fuse_stop_cleanup_thread(f); + + fuse_stop_maintenance_thread(f); + return res; } diff --git a/libfuse/lib/fuse_node.c b/libfuse/lib/fuse_node.c new file mode 100644 index 00000000..a276aeb6 --- /dev/null +++ b/libfuse/lib/fuse_node.c @@ -0,0 +1,224 @@ +#include "fuse_node.h" + +#include "khash.h" + +#include +#include +#include + +#include // for debugging + +#define UNKNOWN_INO UINT64_MAX +#define ROOT_NODE_ID 0 +#define ROOT_NODE_NAME "/" + +typedef struct node_idname_t node_idname_t; +struct node_idname_t +{ + uint64_t id; + const char *name; +}; + +static khint_t idname_hash_func(const node_idname_t idname); +static int idname_hash_equal(const node_idname_t idname0, const node_idname_t idname1); + +KHASH_INIT(node,node_idname_t,fuse_node_t*,1,idname_hash_func,idname_hash_equal); + +typedef struct fuse_node_hashtable_t fuse_node_hashtable_t; +struct fuse_node_hashtable_t +{ + kh_node_t *ht; + uint64_t id; + uint64_t generation; +}; + +static +inline +khint_t +idname_hash_func(const node_idname_t idname_) +{ + if(idname_.name == NULL) + return idname_.id; + return (idname_.id ^ kh_str_hash_func(idname_.name)); +} + +static +inline +int +idname_hash_equal(const node_idname_t idname0_, + const node_idname_t idname1_) +{ + return ((idname0_.id == idname1_.id) && + ((idname0_.name == idname1_.name) || + (strcmp(idname0_.name,idname1_.name) == 0))); +} + +static +inline +fuse_node_t* +fuse_node_alloc(const uint64_t id_, + const char *name_) +{ + fuse_node_t *node; + + node = (fuse_node_t*)calloc(1,sizeof(fuse_node_t)); + + node->id = id_; + node->name = strdup(name_); + node->ref_count = 1; + node->lookup_count = 1; + + return node; +} + +static +inline +void +fuse_node_free(fuse_node_t *node_) +{ + free(node_->name); + free(node_); +} + +static +inline +uint64_t +rand64() +{ + uint64_t rv; + + rv = rand(); + rv <<= 32; + rv |= rand(); + + return rv; +} + +static +inline +void +node_hashtable_gen_unique_id(fuse_node_hashtable_t *ht_) +{ + do + { + ht_->id++; + if(ht_->id == 0) + ht_->generation++; + } + while((ht_->id == 0) || (ht_->id == UNKNOWN_INO)); +} + +static +inline +void +node_hashtable_put_root(fuse_node_hashtable_t *ht_) +{ + int rv; + khint_t k; + fuse_node_t *root_node; + const node_idname_t idname0 = {ROOT_NODE_ID,""}; + const node_idname_t idname1 = {ROOT_NODE_ID,ROOT_NODE_NAME}; + + root_node = fuse_node_alloc(ROOT_NODE_ID,ROOT_NODE_NAME); + + k = kh_put_node(ht_->ht,idname0,&rv); + kh_value(ht_->ht,k) = root_node; + + k = kh_put_node(ht_->ht,idname1,&rv); + kh_value(ht_->ht,k) = root_node; +} + +static +inline +void +node_hashtable_set_id_gen(fuse_node_hashtable_t *ht_, + fuse_node_t *node_) +{ + node_hashtable_gen_unique_id(ht_); + node_->id = ht_->id; + node_->generation = ht_->generation; +} + +fuse_node_hashtable_t* +fuse_node_hashtable_init() +{ + fuse_node_hashtable_t *ht; + + ht = (fuse_node_hashtable_t*)calloc(sizeof(fuse_node_hashtable_t),1); + if(ht == NULL) + return NULL; + + ht->ht = kh_init_node(); + if(ht->ht == NULL) + { + free(ht); + return NULL; + } + + srand(time(NULL)); + ht->id = 0; + ht->generation = rand64(); + + node_hashtable_put_root(ht); + + return ht; +} + +fuse_node_t* +fuse_node_hashtable_put(fuse_node_hashtable_t *ht_, + const uint64_t parent_id_, + const uint64_t child_id_, + const char *child_name_) +{ + int rv; + khint_t k; + fuse_node_t *child_node; + const node_idname_t p_idname = {parent_id_,""}; + const node_idname_t c0_idname = {child_id_,child_name_}; + const node_idname_t c1_idname = {parent_id_,child_name_}; + + child_node = fuse_node_alloc(child_id_,child_name_); + + k = kh_get_node(ht_->ht,p_idname); + child_node->parent = kh_value(ht_->ht,k); + child_node->parent->ref_count++; + + k = kh_put_node(ht_->ht,c0_idname,&rv); + kh_value(ht_->ht,k) = child_node; + + k = kh_put_node(ht_->ht,c1_idname,&rv); + kh_value(ht_->ht,k) = child_node; + + return child_node; +} + +fuse_node_t* +fuse_node_hashtable_get(fuse_node_hashtable_t *ht_, + const uint64_t id_) +{ + return fuse_node_hashtable_get_child(ht_,id_,""); +} + +fuse_node_t* +fuse_node_hashtable_get_child(fuse_node_hashtable_t *ht_, + const uint64_t parent_id_, + const char *child_name_) +{ + khint_t k; + fuse_node_t *node; + const node_idname_t idname = {parent_id_,child_name_}; + + k = kh_get_node(ht_->ht,idname); + node = ((k != kh_end(ht_->ht)) ? + kh_value(ht_->ht,k) : + NULL); + + return node; +} + +void +fuse_node_hashtable_del(fuse_node_hashtable_t *ht_, + fuse_node_t *node_) +{ + +} diff --git a/libfuse/lib/fuse_node.h b/libfuse/lib/fuse_node.h new file mode 100644 index 00000000..d3497ba8 --- /dev/null +++ b/libfuse/lib/fuse_node.h @@ -0,0 +1,36 @@ +#include + +typedef struct fuse_node_t fuse_node_t; +struct fuse_node_t +{ + uint64_t id; + uint64_t generation; + char *name; + fuse_node_t *parent; + uint32_t ref_count; + uint64_t lookup_count; + uint64_t open_count; +}; + +struct fuse_node_hashtable_t; +typedef struct fuse_node_hashtable_t fuse_node_hashtable_t; + + +fuse_node_hashtable_t *fuse_node_hashtable_init(); + +fuse_node_t *fuse_node_hashtable_put(fuse_node_hashtable_t *ht, + const uint64_t parent_id, + const uint64_t child_id, + const char *child_name); + +fuse_node_t* fuse_node_hashtable_get(fuse_node_hashtable_t *ht, + const uint64_t id); +fuse_node_t* fuse_node_hashtable_get_child(fuse_node_hashtable_t *ht, + const uint64_t id, + const char *name); +void fuse_node_hashtable_del(fuse_node_hashtable_t *ht, + fuse_node_t *node); + +void fuse_node_hashtable_get_path(fuse_node_hashtable_t *ht, + char *buf, + uint32_t buflen); diff --git a/libfuse/lib/khash.h b/libfuse/lib/khash.h new file mode 100644 index 00000000..58268768 --- /dev/null +++ b/libfuse/lib/khash.h @@ -0,0 +1,627 @@ +/* The MIT License + + Copyright (c) 2008, 2009, 2011 by Attractive Chaos + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ + +/* + An example: + + #include "khash.h" + KHASH_MAP_INIT_INT(32, char) + int main() { + int ret, is_missing; + khiter_t k; + khash_t(32) *h = kh_init(32); + k = kh_put(32, h, 5, &ret); + kh_value(h, k) = 10; + k = kh_get(32, h, 10); + is_missing = (k == kh_end(h)); + k = kh_get(32, h, 5); + kh_del(32, h, k); + for (k = kh_begin(h); k != kh_end(h); ++k) + if (kh_exist(h, k)) kh_value(h, k) = 1; + kh_destroy(32, h); + return 0; + } +*/ + +/* + 2013-05-02 (0.2.8): + + * Use quadratic probing. When the capacity is power of 2, stepping function + i*(i+1)/2 guarantees to traverse each bucket. It is better than double + hashing on cache performance and is more robust than linear probing. + + In theory, double hashing should be more robust than quadratic probing. + However, my implementation is probably not for large hash tables, because + the second hash function is closely tied to the first hash function, + which reduce the effectiveness of double hashing. + + Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php + + 2011-12-29 (0.2.7): + + * Minor code clean up; no actual effect. + + 2011-09-16 (0.2.6): + + * The capacity is a power of 2. This seems to dramatically improve the + speed for simple keys. Thank Zilong Tan for the suggestion. Reference: + + - http://code.google.com/p/ulib/ + - http://nothings.org/computer/judy/ + + * Allow to optionally use linear probing which usually has better + performance for random input. Double hashing is still the default as it + is more robust to certain non-random input. + + * Added Wang's integer hash function (not used by default). This hash + function is more robust to certain non-random input. + + 2011-02-14 (0.2.5): + + * Allow to declare global functions. + + 2009-09-26 (0.2.4): + + * Improve portability + + 2008-09-19 (0.2.3): + + * Corrected the example + * Improved interfaces + + 2008-09-11 (0.2.2): + + * Improved speed a little in kh_put() + + 2008-09-10 (0.2.1): + + * Added kh_clear() + * Fixed a compiling error + + 2008-09-02 (0.2.0): + + * Changed to token concatenation which increases flexibility. + + 2008-08-31 (0.1.2): + + * Fixed a bug in kh_get(), which has not been tested previously. + + 2008-08-31 (0.1.1): + + * Added destructor + */ + + +#ifndef __AC_KHASH_H +#define __AC_KHASH_H + +/*! + @header + + Generic hash table library. +*/ + +#define AC_VERSION_KHASH_H "0.2.8" + +#include +#include +#include + +/* compiler specific configuration */ + +#if UINT_MAX == 0xffffffffu +typedef unsigned int khint32_t; +#elif ULONG_MAX == 0xffffffffu +typedef unsigned long khint32_t; +#endif + +#if ULONG_MAX == ULLONG_MAX +typedef unsigned long khint64_t; +#else +typedef unsigned long long khint64_t; +#endif + +#ifndef kh_inline +#ifdef _MSC_VER +#define kh_inline __inline +#else +#define kh_inline inline +#endif +#endif /* kh_inline */ + +#ifndef klib_unused +#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3) +#define klib_unused __attribute__ ((__unused__)) +#else +#define klib_unused +#endif +#endif /* klib_unused */ + +typedef khint32_t khint_t; +typedef khint_t khiter_t; + +#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2) +#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1) +#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3) +#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1))) +#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1))) +#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1))) +#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1)) + +#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4) + +#ifndef kroundup32 +#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x)) +#endif + +#ifndef kcalloc +#define kcalloc(N,Z) calloc(N,Z) +#endif +#ifndef kmalloc +#define kmalloc(Z) malloc(Z) +#endif +#ifndef krealloc +#define krealloc(P,Z) realloc(P,Z) +#endif +#ifndef kfree +#define kfree(P) free(P) +#endif + +static const double __ac_HASH_UPPER = 0.77; + +#define __KHASH_TYPE(name, khkey_t, khval_t) \ + typedef struct kh_##name##_s { \ + khint_t n_buckets, size, n_occupied, upper_bound; \ + khint32_t *flags; \ + khkey_t *keys; \ + khval_t *vals; \ + } kh_##name##_t; + +#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \ + extern kh_##name##_t *kh_init_##name(void); \ + extern void kh_destroy_##name(kh_##name##_t *h); \ + extern void kh_clear_##name(kh_##name##_t *h); \ + extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \ + extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \ + extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \ + extern void kh_del_##name(kh_##name##_t *h, khint_t x); + +#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ + SCOPE kh_##name##_t *kh_init_##name(void) { \ + return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \ + } \ + SCOPE void kh_destroy_##name(kh_##name##_t *h) \ + { \ + if (h) { \ + kfree((void *)h->keys); kfree(h->flags); \ + kfree((void *)h->vals); \ + kfree(h); \ + } \ + } \ + SCOPE void kh_clear_##name(kh_##name##_t *h) \ + { \ + if (h && h->flags) { \ + memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \ + h->size = h->n_occupied = 0; \ + } \ + } \ + SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \ + { \ + if (h->n_buckets) { \ + khint_t k, i, last, mask, step = 0; \ + mask = h->n_buckets - 1; \ + k = __hash_func(key); i = k & mask; \ + last = i; \ + while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \ + i = (i + (++step)) & mask; \ + if (i == last) return h->n_buckets; \ + } \ + return __ac_iseither(h->flags, i)? h->n_buckets : i; \ + } else return 0; \ + } \ + SCOPE int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \ + { /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \ + khint32_t *new_flags = 0; \ + khint_t j = 1; \ + { \ + kroundup32(new_n_buckets); \ + if (new_n_buckets < 4) new_n_buckets = 4; \ + if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \ + else { /* hash table size to be changed (shrink or expand); rehash */ \ + new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \ + if (!new_flags) return -1; \ + memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \ + if (h->n_buckets < new_n_buckets) { /* expand */ \ + khkey_t *new_keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \ + if (!new_keys) { kfree(new_flags); return -1; } \ + h->keys = new_keys; \ + if (kh_is_map) { \ + khval_t *new_vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \ + if (!new_vals) { kfree(new_flags); return -1; } \ + h->vals = new_vals; \ + } \ + } /* otherwise shrink */ \ + } \ + } \ + if (j) { /* rehashing is needed */ \ + for (j = 0; j != h->n_buckets; ++j) { \ + if (__ac_iseither(h->flags, j) == 0) { \ + khkey_t key = h->keys[j]; \ + khval_t val; \ + khint_t new_mask; \ + new_mask = new_n_buckets - 1; \ + if (kh_is_map) val = h->vals[j]; \ + __ac_set_isdel_true(h->flags, j); \ + while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \ + khint_t k, i, step = 0; \ + k = __hash_func(key); \ + i = k & new_mask; \ + while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \ + __ac_set_isempty_false(new_flags, i); \ + if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \ + { khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \ + if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \ + __ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \ + } else { /* write the element and jump out of the loop */ \ + h->keys[i] = key; \ + if (kh_is_map) h->vals[i] = val; \ + break; \ + } \ + } \ + } \ + } \ + if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \ + h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \ + if (kh_is_map) h->vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \ + } \ + kfree(h->flags); /* free the working space */ \ + h->flags = new_flags; \ + h->n_buckets = new_n_buckets; \ + h->n_occupied = h->size; \ + h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \ + } \ + return 0; \ + } \ + SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \ + { \ + khint_t x; \ + if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \ + if (h->n_buckets > (h->size<<1)) { \ + if (kh_resize_##name(h, h->n_buckets - 1) < 0) { /* clear "deleted" elements */ \ + *ret = -1; return h->n_buckets; \ + } \ + } else if (kh_resize_##name(h, h->n_buckets + 1) < 0) { /* expand the hash table */ \ + *ret = -1; return h->n_buckets; \ + } \ + } /* TODO: to implement automatically shrinking; resize() already support shrinking */ \ + { \ + khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \ + x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \ + if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \ + else { \ + last = i; \ + while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \ + if (__ac_isdel(h->flags, i)) site = i; \ + i = (i + (++step)) & mask; \ + if (i == last) { x = site; break; } \ + } \ + if (x == h->n_buckets) { \ + if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \ + else x = i; \ + } \ + } \ + } \ + if (__ac_isempty(h->flags, x)) { /* not present at all */ \ + h->keys[x] = key; \ + __ac_set_isboth_false(h->flags, x); \ + ++h->size; ++h->n_occupied; \ + *ret = 1; \ + } else if (__ac_isdel(h->flags, x)) { /* deleted */ \ + h->keys[x] = key; \ + __ac_set_isboth_false(h->flags, x); \ + ++h->size; \ + *ret = 2; \ + } else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \ + return x; \ + } \ + SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \ + { \ + if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \ + __ac_set_isdel_true(h->flags, x); \ + --h->size; \ + } \ + } + +#define KHASH_DECLARE(name, khkey_t, khval_t) \ + __KHASH_TYPE(name, khkey_t, khval_t) \ + __KHASH_PROTOTYPES(name, khkey_t, khval_t) + +#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ + __KHASH_TYPE(name, khkey_t, khval_t) \ + __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) + +#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ + KHASH_INIT2(name, static kh_inline klib_unused, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) + +/* --- BEGIN OF HASH FUNCTIONS --- */ + +/*! @function + @abstract Integer hash function + @param key The integer [khint32_t] + @return The hash value [khint_t] +*/ +#define kh_int_hash_func(key) (khint32_t)(key) +/*! @function + @abstract Integer comparison function +*/ +#define kh_int_hash_equal(a, b) ((a) == (b)) +/*! @function + @abstract 64-bit integer hash function + @param key The integer [khint64_t] + @return The hash value [khint_t] +*/ +#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11) +/*! @function + @abstract 64-bit integer comparison function +*/ +#define kh_int64_hash_equal(a, b) ((a) == (b)) +/*! @function + @abstract const char* hash function + @param s Pointer to a null terminated string + @return The hash value +*/ +static kh_inline khint_t __ac_X31_hash_string(const char *s) +{ + khint_t h = (khint_t)*s; + if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s; + return h; +} +/*! @function + @abstract Another interface to const char* hash function + @param key Pointer to a null terminated string [const char*] + @return The hash value [khint_t] +*/ +#define kh_str_hash_func(key) __ac_X31_hash_string(key) +/*! @function + @abstract Const char* comparison function +*/ +#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0) + +static kh_inline khint_t __ac_Wang_hash(khint_t key) +{ + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; +} +#define kh_int_hash_func2(key) __ac_Wang_hash((khint_t)key) + +/* --- END OF HASH FUNCTIONS --- */ + +/* Other convenient macros... */ + +/*! + @abstract Type of the hash table. + @param name Name of the hash table [symbol] +*/ +#define khash_t(name) kh_##name##_t + +/*! @function + @abstract Initiate a hash table. + @param name Name of the hash table [symbol] + @return Pointer to the hash table [khash_t(name)*] +*/ +#define kh_init(name) kh_init_##name() + +/*! @function + @abstract Destroy a hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] +*/ +#define kh_destroy(name, h) kh_destroy_##name(h) + +/*! @function + @abstract Reset a hash table without deallocating memory. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] +*/ +#define kh_clear(name, h) kh_clear_##name(h) + +/*! @function + @abstract Resize a hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + @param s New size [khint_t] +*/ +#define kh_resize(name, h, s) kh_resize_##name(h, s) + +/*! @function + @abstract Insert a key to the hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + @param k Key [type of keys] + @param r Extra return code: -1 if the operation failed; + 0 if the key is present in the hash table; + 1 if the bucket is empty (never used); 2 if the element in + the bucket has been deleted [int*] + @return Iterator to the inserted element [khint_t] +*/ +#define kh_put(name, h, k, r) kh_put_##name(h, k, r) + +/*! @function + @abstract Retrieve a key from the hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + @param k Key [type of keys] + @return Iterator to the found element, or kh_end(h) if the element is absent [khint_t] +*/ +#define kh_get(name, h, k) kh_get_##name(h, k) + +/*! @function + @abstract Remove a key from the hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + @param k Iterator to the element to be deleted [khint_t] +*/ +#define kh_del(name, h, k) kh_del_##name(h, k) + +/*! @function + @abstract Test whether a bucket contains data. + @param h Pointer to the hash table [khash_t(name)*] + @param x Iterator to the bucket [khint_t] + @return 1 if containing data; 0 otherwise [int] +*/ +#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x))) + +/*! @function + @abstract Get key given an iterator + @param h Pointer to the hash table [khash_t(name)*] + @param x Iterator to the bucket [khint_t] + @return Key [type of keys] +*/ +#define kh_key(h, x) ((h)->keys[x]) + +/*! @function + @abstract Get value given an iterator + @param h Pointer to the hash table [khash_t(name)*] + @param x Iterator to the bucket [khint_t] + @return Value [type of values] + @discussion For hash sets, calling this results in segfault. +*/ +#define kh_val(h, x) ((h)->vals[x]) + +/*! @function + @abstract Alias of kh_val() +*/ +#define kh_value(h, x) ((h)->vals[x]) + +/*! @function + @abstract Get the start iterator + @param h Pointer to the hash table [khash_t(name)*] + @return The start iterator [khint_t] +*/ +#define kh_begin(h) (khint_t)(0) + +/*! @function + @abstract Get the end iterator + @param h Pointer to the hash table [khash_t(name)*] + @return The end iterator [khint_t] +*/ +#define kh_end(h) ((h)->n_buckets) + +/*! @function + @abstract Get the number of elements in the hash table + @param h Pointer to the hash table [khash_t(name)*] + @return Number of elements in the hash table [khint_t] +*/ +#define kh_size(h) ((h)->size) + +/*! @function + @abstract Get the number of buckets in the hash table + @param h Pointer to the hash table [khash_t(name)*] + @return Number of buckets in the hash table [khint_t] +*/ +#define kh_n_buckets(h) ((h)->n_buckets) + +/*! @function + @abstract Iterate over the entries in the hash table + @param h Pointer to the hash table [khash_t(name)*] + @param kvar Variable to which key will be assigned + @param vvar Variable to which value will be assigned + @param code Block of code to execute +*/ +#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \ + for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \ + if (!kh_exist(h,__i)) continue; \ + (kvar) = kh_key(h,__i); \ + (vvar) = kh_val(h,__i); \ + code; \ + } } + +/*! @function + @abstract Iterate over the values in the hash table + @param h Pointer to the hash table [khash_t(name)*] + @param vvar Variable to which value will be assigned + @param code Block of code to execute +*/ +#define kh_foreach_value(h, vvar, code) { khint_t __i; \ + for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \ + if (!kh_exist(h,__i)) continue; \ + (vvar) = kh_val(h,__i); \ + code; \ + } } + +/* More convenient interfaces */ + +/*! @function + @abstract Instantiate a hash set containing integer keys + @param name Name of the hash table [symbol] +*/ +#define KHASH_SET_INIT_INT(name) \ + KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal) + +/*! @function + @abstract Instantiate a hash map containing integer keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] +*/ +#define KHASH_MAP_INIT_INT(name, khval_t) \ + KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) + +/*! @function + @abstract Instantiate a hash set containing 64-bit integer keys + @param name Name of the hash table [symbol] +*/ +#define KHASH_SET_INIT_INT64(name) \ + KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal) + +/*! @function + @abstract Instantiate a hash map containing 64-bit integer keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] +*/ +#define KHASH_MAP_INIT_INT64(name, khval_t) \ + KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal) + +typedef const char *kh_cstr_t; +/*! @function + @abstract Instantiate a hash map containing const char* keys + @param name Name of the hash table [symbol] +*/ +#define KHASH_SET_INIT_STR(name) \ + KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal) + +/*! @function + @abstract Instantiate a hash map containing const char* keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] +*/ +#define KHASH_MAP_INIT_STR(name, khval_t) \ + KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal) + +#endif /* __AC_KHASH_H */ diff --git a/libfuse/lib/lfmp.h b/libfuse/lib/lfmp.h new file mode 100644 index 00000000..c2267387 --- /dev/null +++ b/libfuse/lib/lfmp.h @@ -0,0 +1,213 @@ +/* + ISC License + + Copyright (c) 2020, Antonio SJ Musumeci + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#pragma once + +#include "fmp.h" + +#include + + +typedef struct lfmp_t lfmp_t; +struct lfmp_t +{ + fmp_t fmp; + pthread_mutex_t lock; +}; + + +static +inline +void +lfmp_init(lfmp_t *lfmp_, + const uint64_t obj_size_, + const uint64_t page_multiple_) +{ + fmp_init(&lfmp_->fmp,obj_size_,page_multiple_); + pthread_mutex_init(&lfmp_->lock,NULL); +} + +static +inline +uint64_t +lfmp_slab_count(lfmp_t *lfmp_) +{ + uint64_t rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_slab_count(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +} + +static +inline +int +lfmp_slab_alloc(lfmp_t *lfmp_) +{ + int rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_slab_alloc(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +} + +static +inline +void* +lfmp_alloc(lfmp_t *lfmp_) +{ + void *rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_alloc(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +} + +static +inline +void* +lfmp_calloc(lfmp_t *lfmp_) +{ + void *rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_calloc(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +} + +static +inline +void +lfmp_free(lfmp_t *lfmp_, + void *obj_) +{ + pthread_mutex_lock(&lfmp_->lock); + fmp_free(&lfmp_->fmp,obj_); + pthread_mutex_unlock(&lfmp_->lock); +} + +static +inline +void +lfmp_clear(lfmp_t *lfmp_) +{ + pthread_mutex_lock(&lfmp_->lock); + fmp_clear(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); +} + +static +inline +void +lfmp_destroy(lfmp_t *lfmp_) +{ + pthread_mutex_lock(&lfmp_->lock); + fmp_destroy(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + pthread_mutex_destroy(&lfmp_->lock); +} + +static +inline +uint64_t +lfmp_avail_objs(lfmp_t *lfmp_) +{ + uint64_t rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_avail_objs(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +} + +static +inline +uint64_t +lfmp_objs_in_slab(lfmp_t *lfmp_, + void *slab_) +{ + uint64_t rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_objs_in_slab(&lfmp_->fmp,slab_); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +} + +static +inline +void +lfmp_remove_objs_in_slab(lfmp_t *lfmp_, + void *slab_) +{ + pthread_mutex_lock(&lfmp_->lock); + fmp_remove_objs_in_slab(&lfmp_->fmp,slab_); + pthread_mutex_unlock(&lfmp_->lock); +} + +static +inline +int +lfmp_gc(lfmp_t *lfmp_) +{ + int rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_gc(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +} + +static +inline +uint64_t +lfmp_objs_per_slab(lfmp_t *lfmp_) +{ + uint64_t rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_objs_per_slab(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +} + +static +inline +double +lfmp_slab_usage_ratio(lfmp_t *lfmp_) +{ + double rv; + + pthread_mutex_lock(&lfmp_->lock); + rv = fmp_slab_usage_ratio(&lfmp_->fmp); + pthread_mutex_unlock(&lfmp_->lock); + + return rv; +}