@ -1,442 +0,0 @@ | |||
#define _GNU_SOURCE | |||
#include "erl_nif.h" | |||
#include <assert.h> | |||
#include <stdio.h> | |||
#include <stdint.h> | |||
#include <string.h> | |||
#include <time.h> | |||
// #include "fifo.h" | |||
#include "lifo.h" | |||
typedef struct { | |||
ERL_NIF_TERM ok; | |||
ERL_NIF_TERM error; | |||
ERL_NIF_TERM fifo; | |||
ERL_NIF_TERM lifo; | |||
ERL_NIF_TERM ttl; | |||
ERL_NIF_TERM max_size; | |||
} atoms_t; | |||
typedef struct { | |||
ErlNifResourceType *queue; | |||
atoms_t atoms; | |||
} priv_t; | |||
typedef struct { | |||
union { | |||
fifo_handle_t fifo; | |||
lifo_handle_t lifo; | |||
} handle; | |||
ErlNifBinary data; | |||
struct timespec added; | |||
} item_t; | |||
typedef enum { | |||
QTYPE_FIFO = 0, | |||
QTYPE_LIFO | |||
} queue_type_t; | |||
typedef struct queue { | |||
union { | |||
fifo_t fifo; | |||
lifo_t lifo; | |||
} queue; | |||
uint64_t ttl; | |||
uint64_t max_size; | |||
void (*push) (struct queue *inst, item_t *item); | |||
item_t* (*pop) (struct queue *inst); | |||
void (*free) (struct queue *inst); | |||
uint64_t (*size) (struct queue *inst); | |||
void (*cleanup) (struct queue *inst); | |||
} queue_t; | |||
// returns tuple {error, atom()} | |||
static inline ERL_NIF_TERM | |||
make_error(ErlNifEnv* env, const char *error) { | |||
priv_t *priv = (priv_t *) enif_priv_data(env); | |||
return enif_make_tuple2(env, priv->atoms.error, enif_make_atom(env, error)); | |||
} | |||
// returns time diff in milliseconds | |||
static inline int64_t | |||
tdiff(struct timespec *t2, struct timespec *t1) { | |||
return (t2->tv_sec * 1000 + t2->tv_nsec / 1000000UL) - | |||
(t1->tv_sec * 1000 + t1->tv_nsec / 1000000UL); | |||
} | |||
static inline void | |||
gettime(struct timespec *tp) { | |||
int rc = clock_gettime(CLOCK_MONOTONIC_RAW, tp); | |||
assert(rc == 0); | |||
} | |||
/******************************************************************************/ | |||
/* FIFO callbacks */ | |||
/******************************************************************************/ | |||
static void | |||
cleanup_fifo(queue_t *inst) { | |||
struct timespec now; | |||
gettime(&now); | |||
for (;;) { | |||
item_t *item = NULL; | |||
__fifo_peak(&inst->queue.fifo, item, handle.fifo); | |||
if (item == NULL) | |||
return; | |||
int64_t diff = tdiff(&now, &item->added); | |||
if (diff < inst->ttl) { | |||
return; | |||
} else { | |||
__fifo_pop(&inst->queue.fifo, item, handle.fifo); | |||
enif_release_binary(&item->data); | |||
enif_free(item); | |||
} | |||
} | |||
} | |||
static void | |||
push_fifo(queue_t *inst, item_t *item) { | |||
__fifo_push(&inst->queue.fifo, item, handle.fifo); | |||
} | |||
static item_t * | |||
pop_fifo(queue_t *inst) { | |||
item_t *item = NULL; | |||
if (inst->ttl > 0) { | |||
struct timespec now; | |||
gettime(&now); | |||
for (;;) { | |||
__fifo_pop(&inst->queue.fifo, item, handle.fifo); | |||
if (item == NULL) | |||
return NULL; | |||
int64_t diff = tdiff(&now, &item->added); | |||
if (diff < inst->ttl) { | |||
return item; | |||
} else { | |||
enif_release_binary(&item->data); | |||
enif_free(item); | |||
} | |||
} | |||
} else { | |||
__fifo_pop(&inst->queue.fifo, item, handle.fifo); | |||
} | |||
return item; | |||
} | |||
static void | |||
free_fifo(queue_t *inst) { | |||
item_t *item; | |||
for(;;) { | |||
__fifo_pop(&inst->queue.fifo, item, handle.fifo); | |||
if (item == NULL) | |||
return; | |||
enif_release_binary(&item->data); | |||
enif_free(item); | |||
} | |||
} | |||
static uint64_t | |||
size_fifo(queue_t *inst) { | |||
return fifo_length(&inst->queue.fifo); | |||
} | |||
/******************************************************************************/ | |||
/* LIFO callbacks */ | |||
/******************************************************************************/ | |||
static void | |||
cleanup_lifo(queue_t *inst) { | |||
struct timespec now; | |||
gettime(&now); | |||
for(;;) { | |||
item_t *item = inst->queue.lifo.tail; | |||
if (item == NULL) | |||
return; | |||
int64_t diff = tdiff(&now, &item->added); | |||
if (diff < inst->ttl) { | |||
return; | |||
} else { | |||
item_t *prev = item->handle.lifo.prev; | |||
if (prev != NULL) | |||
prev->handle.lifo.next = NULL; | |||
inst->queue.lifo.tail = prev; | |||
enif_release_binary(&item->data); | |||
enif_free(item); | |||
} | |||
} | |||
} | |||
static void | |||
push_lifo(queue_t *inst, item_t *item) { | |||
__lifo_push(&inst->queue.lifo, item, handle.lifo); | |||
} | |||
static item_t * | |||
pop_lifo(queue_t *inst) { | |||
item_t *item = NULL; | |||
if (inst->ttl > 0) | |||
cleanup_lifo(inst); | |||
__lifo_pop(&inst->queue.lifo, item, handle.lifo); | |||
return item; | |||
} | |||
static void | |||
free_lifo(queue_t *inst) { | |||
item_t *item; | |||
for(;;) { | |||
__lifo_pop(&inst->queue.lifo, item, handle.lifo); | |||
if (item == NULL) | |||
return; | |||
enif_release_binary(&item->data); | |||
enif_free(item); | |||
} | |||
} | |||
static uint64_t | |||
size_lifo(queue_t *inst) { | |||
return lifo_length(&inst->queue.lifo); | |||
} | |||
/****************************************************************************** | |||
** NIFs | |||
*******************************************************************************/ | |||
static ERL_NIF_TERM | |||
new_queue(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
if (!enif_is_list(env, argv[0])) | |||
return enif_make_badarg(env); | |||
priv_t *priv = (priv_t *) enif_priv_data(env); | |||
queue_type_t qtype = QTYPE_FIFO; | |||
unsigned long ttl = 0; | |||
unsigned long max_size = 0; | |||
ERL_NIF_TERM settings_list = argv[0]; | |||
ERL_NIF_TERM head; | |||
// parses proplist [fifo, lifo, {ttl, non_neg_integer()}, {max_size, non_neg_integer()}] | |||
while(enif_get_list_cell(env, settings_list, &head, &settings_list)) | |||
{ | |||
const ERL_NIF_TERM *items; | |||
int arity; | |||
if (enif_is_atom(env, head)) { | |||
if (enif_is_identical(head, priv->atoms.fifo)) { | |||
qtype = QTYPE_FIFO; | |||
} else if (enif_is_identical(head, priv->atoms.lifo)) { | |||
qtype = QTYPE_LIFO; | |||
} else { | |||
return enif_make_badarg(env); | |||
} | |||
} else if (enif_get_tuple(env, head, &arity, &items) && arity == 2) { | |||
if (enif_is_identical(items[0], priv->atoms.ttl)) { | |||
if (!enif_get_ulong(env, items[1], &ttl)) { | |||
return enif_make_badarg(env); | |||
} | |||
} else if (enif_is_identical(items[0], priv->atoms.max_size)) { | |||
if (!enif_get_ulong(env, items[1], &max_size)) { | |||
return enif_make_badarg(env); | |||
} | |||
} else { | |||
return enif_make_badarg(env); | |||
} | |||
} else { | |||
return enif_make_badarg(env); | |||
} | |||
} | |||
queue_t *inst = (queue_t *) enif_alloc_resource(priv->queue, sizeof(*inst)); | |||
if (inst == NULL) | |||
return make_error(env, "enif_alloc_resource"); | |||
inst->ttl = ttl; | |||
inst->max_size = max_size; | |||
switch (qtype) { | |||
case QTYPE_FIFO: | |||
fifo_init(&inst->queue.fifo); | |||
inst->push = &push_fifo; | |||
inst->pop = &pop_fifo; | |||
inst->free = &free_fifo; | |||
inst->size = &size_fifo; | |||
inst->cleanup = &cleanup_fifo; | |||
break; | |||
case QTYPE_LIFO: | |||
lifo_init(&inst->queue.lifo); | |||
inst->push = &push_lifo; | |||
inst->pop = &pop_lifo; | |||
inst->free = &free_lifo; | |||
inst->size = &size_lifo; | |||
inst->cleanup = &cleanup_lifo; | |||
break; | |||
} | |||
ERL_NIF_TERM result = enif_make_resource(env, inst); | |||
enif_release_resource(inst); | |||
return enif_make_tuple2(env, priv->atoms.ok, result); | |||
} | |||
static ERL_NIF_TERM | |||
push_item(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
priv_t *priv = (priv_t *) enif_priv_data(env); | |||
queue_t *inst; | |||
if (!enif_get_resource(env, argv[0], priv->queue, (void**) &inst)) | |||
return enif_make_badarg(env); | |||
// todo: check an owner of the queue | |||
ErlNifBinary bin; | |||
if (!enif_inspect_binary(env, argv[1], &bin)) | |||
return enif_make_badarg(env); | |||
if (inst->ttl > 0) { | |||
inst->cleanup(inst); | |||
} | |||
if (inst->max_size > 0 && inst->size(inst) >= inst->max_size) { | |||
return enif_make_tuple2(env, priv->atoms.error, priv->atoms.max_size); | |||
} | |||
item_t *item = (item_t *) enif_alloc(sizeof(*item)); | |||
if (item == NULL) | |||
return make_error(env, "enif_alloc"); | |||
if (!enif_alloc_binary(bin.size, &item->data)) { | |||
enif_free(item); | |||
return make_error(env, "enif_alloc_binary"); | |||
} | |||
memcpy(item->data.data, bin.data, bin.size); | |||
if (inst->ttl > 0) { | |||
gettime(&item->added); | |||
} | |||
inst->push(inst, item); | |||
return priv->atoms.ok; | |||
} | |||
static ERL_NIF_TERM | |||
pop_item(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
priv_t *priv = (priv_t *) enif_priv_data(env); | |||
queue_t *inst; | |||
item_t *item; | |||
if (!enif_get_resource(env, argv[0], priv->queue, (void**) &inst)) | |||
return enif_make_badarg(env); | |||
// todo: check an owner of the queue | |||
item = inst->pop(inst); | |||
if (item == NULL) | |||
return enif_make_list(env, 0); | |||
ERL_NIF_TERM result = enif_make_binary(env, &item->data); | |||
enif_free(item); | |||
return enif_make_list1(env, result); | |||
} | |||
static ERL_NIF_TERM | |||
queue_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
priv_t *priv = (priv_t *) enif_priv_data(env); | |||
queue_t *inst; | |||
if (!enif_get_resource(env, argv[0], priv->queue, (void**) &inst)) | |||
return enif_make_badarg(env); | |||
return enif_make_uint64(env, inst->size(inst)); | |||
} | |||
/****************************************************************************** | |||
** NIF initialization | |||
*******************************************************************************/ | |||
static void | |||
enq_queue_free(ErlNifEnv* env, void* obj) { | |||
queue_t *inst = obj; | |||
inst->free(inst); | |||
} | |||
static priv_t * | |||
make_priv(ErlNifEnv *env) { | |||
priv_t *priv = enif_alloc(sizeof(*priv)); | |||
if (priv == NULL) | |||
return NULL; | |||
ErlNifResourceFlags flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER; | |||
priv->queue = enif_open_resource_type(env, NULL, "enq_queue", enq_queue_free, flags, NULL); | |||
priv->atoms.ok = enif_make_atom(env, "ok"); | |||
priv->atoms.error = enif_make_atom(env, "error"); | |||
priv->atoms.fifo = enif_make_atom(env, "fifo"); | |||
priv->atoms.lifo = enif_make_atom(env, "lifo"); | |||
priv->atoms.ttl = enif_make_atom(env, "ttl"); | |||
priv->atoms.max_size = enif_make_atom(env, "max_size"); | |||
return priv; | |||
} | |||
static int | |||
enq_nif_load(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM load_info) { | |||
*priv_data = make_priv(env); | |||
return 0; | |||
} | |||
static int | |||
enq_nif_upgrade(ErlNifEnv *env, void **priv_data, void **old_priv_data, ERL_NIF_TERM load_info) { | |||
*priv_data = make_priv(env); | |||
return 0; | |||
} | |||
static ErlNifFunc enq_nif_funcs[] = { | |||
{"new", 1, new_queue}, | |||
{"push", 2, push_item}, | |||
{"pop", 1, pop_item}, | |||
{"size", 1, queue_size}, | |||
}; | |||
ERL_NIF_INIT(enq_nif, enq_nif_funcs, enq_nif_load, NULL, enq_nif_upgrade, NULL) |
@ -1,71 +0,0 @@ | |||
#ifndef _FIFO_H | |||
#define _FIFO_H | |||
/* Main FIFO structure. Allocate memory for it yourself. */ | |||
typedef struct fifo_t { | |||
void *head; | |||
void *tail; | |||
unsigned long long count; | |||
} fifo_t; | |||
typedef struct fifo_handle_t { | |||
void *next; | |||
} fifo_handle_t; | |||
/* Initializes fifo structure. */ | |||
#define fifo_init(fifo) \ | |||
do { \ | |||
fifo_t *__q = fifo; \ | |||
__q->head = NULL; \ | |||
__q->tail = NULL; \ | |||
__q->count = 0; \ | |||
} while (0) | |||
#define __fifo_push(fifo, p, h) \ | |||
do { \ | |||
fifo_t *__q = fifo; \ | |||
__typeof__ (p) e = p; \ | |||
e->h.next = NULL; \ | |||
if (__q->tail == NULL) { \ | |||
__q->head = e; \ | |||
} else { \ | |||
__typeof__ (e) t = __q->tail; \ | |||
t->h.next = e; \ | |||
} \ | |||
__q->tail = e; \ | |||
__q->count++; \ | |||
} while (0) | |||
/* Puts an element to the queue. */ | |||
#define fifo_push(fifo, p) __fifo_push (fifo, p, fifo_handle) | |||
#define __fifo_pop(fifo, p, h) \ | |||
do { \ | |||
fifo_t *__q = fifo; \ | |||
p = __q->head; \ | |||
if (p != NULL) { \ | |||
__q->count--; \ | |||
__q->head = p->h.next; \ | |||
if (__q->tail == p) \ | |||
__q->tail = NULL; \ | |||
} \ | |||
} while (0) | |||
/* Pops the first element out of the queue. */ | |||
#define fifo_pop(fifo, p) __fifo_pop (fifo, p, fifo_handle) | |||
#define __fifo_peak(fifo, p, h) \ | |||
do { \ | |||
p = (fifo)->head; \ | |||
} while (0) | |||
/* Returns the first elemnt of the queue without removing. */ | |||
#define fifo_peak(fifo, p) __fifo_peak (fifo, p, fifo_handle) | |||
/* Returns the length of the queue. */ | |||
#define fifo_length(fifo) ((fifo)->count) | |||
/* Returns true if the queue is empty. */ | |||
#define fifo_empty(fifo) ((fifo)->count == 0) | |||
#endif /* _FIFO_H */ |
@ -1,63 +0,0 @@ | |||
#ifndef _LIFO_H | |||
#define _LIFO_H | |||
typedef struct lifo_t { | |||
void *head; | |||
void *tail; | |||
unsigned long long count; | |||
} lifo_t; | |||
typedef struct lifo_handle_t { | |||
void *next; | |||
void *prev; | |||
} lifo_handle_t; | |||
#define lifo_init(lifo) \ | |||
do { \ | |||
lifo_t *__q = lifo; \ | |||
__q->head = NULL; \ | |||
__q->tail = NULL; \ | |||
__q->count = 0; \ | |||
} while (0) | |||
#define __lifo_push(lifo, p, h) \ | |||
do { \ | |||
lifo_t *__q = lifo; \ | |||
__typeof__ (p) e = p; \ | |||
e->h.next = __q->head; \ | |||
e->h.prev = NULL; \ | |||
if (__q->head == NULL) { \ | |||
__q->tail = e; \ | |||
} else { \ | |||
__typeof__ (e) t = __q->head; \ | |||
t->h.prev = e; \ | |||
} \ | |||
__q->head = e; \ | |||
__q->count++; \ | |||
} while (0) | |||
#define lifo_push(lifo, p) __lifo_push (lifo, p, lifo_handle) | |||
#define __lifo_pop(lifo, p, h) \ | |||
do { \ | |||
lifo_t *__q = lifo; \ | |||
p = __q->head; \ | |||
if (p != NULL) { \ | |||
__q->count--; \ | |||
__q->head = p->h.next; \ | |||
if (__q->head != NULL) { \ | |||
__typeof__ (p) t = __q->head; \ | |||
t->h.prev = NULL; \ | |||
} else { \ | |||
__q->tail = NULL; \ | |||
} \ | |||
} \ | |||
} while (0) | |||
#define lifo_pop(lifo, p) __lifo_pop (lifo, p, lifo_handle) | |||
#define lifo_length(lifo) ((lifo)->count) | |||
#define lifo_empty(lifo) ((lifo)->count == 0) | |||
#endif /* _LIFO_H */ |
@ -1,12 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/enq_nif.so", ["*.c"]} | |||
]}. | |||
% {port_env, [ | |||
% {"LDFLAGS", "$ERL_LDFLAGS -lrt"}, | |||
% {"CFLAGS", "$CFLAGS --std=gnu99 -Wall -O3"} | |||
% ]}. | |||
@ -1,688 +0,0 @@ | |||
/* | |||
* @author Evgeny Khramtsov <ekhramtsov@process-one.net> | |||
* @copyright (C) 2002-2020 ProcessOne, SARL. All Rights Reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
* You may obtain a copy of the License at | |||
* | |||
* http://www.apache.org/licenses/LICENSE-2.0 | |||
* | |||
* Unless required by applicable law or agreed to in writing, software | |||
* distributed under the License is distributed on an "AS IS" BASIS, | |||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
* See the License for the specific language governing permissions and | |||
* limitations under the License. | |||
* | |||
*/ | |||
#include <erl_nif.h> | |||
#include <stdio.h> | |||
#include <errno.h> | |||
#include "uthash.h" | |||
void __free(void *ptr, size_t size) { | |||
enif_free(ptr); | |||
} | |||
#undef uthash_malloc | |||
#undef uthash_free | |||
#define uthash_malloc enif_alloc | |||
#define uthash_free __free | |||
/**************************************************************** | |||
* Structures/Globals definitions * | |||
****************************************************************/ | |||
typedef struct __tree_t { | |||
char *key; | |||
char *val; | |||
int refc; | |||
struct __tree_t *sub; | |||
UT_hash_handle hh; | |||
} tree_t; | |||
typedef struct { | |||
tree_t *tree; | |||
char *name; | |||
ErlNifRWLock *lock; | |||
} state_t; | |||
typedef struct { | |||
char *name; | |||
state_t *state; | |||
UT_hash_handle hh; | |||
} registry_t; | |||
static ErlNifResourceType *tree_state_t = NULL; | |||
static registry_t *registry = NULL; | |||
static ErlNifRWLock *registry_lock = NULL; | |||
/**************************************************************** | |||
* MQTT Tree Manipulation * | |||
****************************************************************/ | |||
tree_t *tree_new(char *key, size_t len) { | |||
tree_t *tree = enif_alloc(sizeof(tree_t)); | |||
if (tree) { | |||
memset(tree, 0, sizeof(tree_t)); | |||
if (key && len) { | |||
tree->key = enif_alloc(len); | |||
if (tree->key) { | |||
memcpy(tree->key, key, len); | |||
} else { | |||
enif_free(tree); | |||
tree = NULL; | |||
} | |||
} | |||
} | |||
return tree; | |||
} | |||
void tree_free(tree_t *t) { | |||
tree_t *found, *iter; | |||
if (t) { | |||
enif_free(t->key); | |||
enif_free(t->val); | |||
HASH_ITER(hh, t->sub, found, iter) { | |||
HASH_DEL(t->sub, found); | |||
tree_free(found); | |||
} | |||
memset(t, 0, sizeof(tree_t)); | |||
enif_free(t); | |||
} | |||
} | |||
void tree_clear(tree_t *root) { | |||
tree_t *found, *iter; | |||
HASH_ITER(hh, root->sub, found, iter) { | |||
HASH_DEL(root->sub, found); | |||
tree_free(found); | |||
} | |||
} | |||
int tree_add(tree_t *root, char *path, size_t size) { | |||
int i = 0; | |||
size_t len; | |||
tree_t *t = root; | |||
tree_t *found, *new; | |||
while (i<=size) { | |||
len = strlen(path+i) + 1; | |||
HASH_FIND_STR(t->sub, path+i, found); | |||
if (found) { | |||
i += len; | |||
t = found; | |||
} else { | |||
new = tree_new(path+i, len); | |||
if (new) { | |||
HASH_ADD_STR(t->sub, key, new); | |||
i += len; | |||
t = new; | |||
} else | |||
return ENOMEM; | |||
} | |||
} | |||
if (!t->val) { | |||
t->val = enif_alloc(size+1); | |||
if (t->val) { | |||
t->val[size] = 0; | |||
for (i=0; i<size; i++) { | |||
char c = path[i]; | |||
t->val[i] = c ? c : '/'; | |||
} | |||
} else | |||
return ENOMEM; | |||
} | |||
t->refc++; | |||
return 0; | |||
} | |||
int tree_del(tree_t *root, char *path, size_t i, size_t size) { | |||
tree_t *found; | |||
if (i<=size) { | |||
HASH_FIND_STR(root->sub, path+i, found); | |||
if (found) { | |||
i += strlen(path+i) + 1; | |||
int deleted = tree_del(found, path, i, size); | |||
if (deleted) { | |||
HASH_DEL(root->sub, found); | |||
tree_free(found); | |||
} | |||
} | |||
} else if (root->refc) { | |||
root->refc--; | |||
if (!root->refc) { | |||
enif_free(root->val); | |||
root->val = NULL; | |||
} | |||
} | |||
return !root->refc && !root->sub; | |||
} | |||
void tree_size(tree_t *tree, size_t *size) { | |||
tree_t *found, *iter; | |||
HASH_ITER(hh, tree->sub, found, iter) { | |||
if (found->refc) (*size)++; | |||
tree_size(found, size); | |||
} | |||
} | |||
int tree_refc(tree_t *tree, char *path, size_t i, size_t size) { | |||
tree_t *found; | |||
if (i<=size) { | |||
HASH_FIND_STR(tree->sub, path+i, found); | |||
if (found) { | |||
i += strlen(path+i) + 1; | |||
return tree_refc(found, path, i, size); | |||
} else { | |||
return 0; | |||
} | |||
} else | |||
return tree->refc; | |||
} | |||
/**************************************************************** | |||
* Registration * | |||
****************************************************************/ | |||
void delete_registry_entry(registry_t *entry) { | |||
/* registry_lock must be RW-locked! */ | |||
HASH_DEL(registry, entry); | |||
entry->state->name = NULL; | |||
enif_release_resource(entry->state); | |||
enif_free(entry->name); | |||
enif_free(entry); | |||
} | |||
int register_tree(char *name, state_t *state) { | |||
registry_t *entry, *found; | |||
entry = enif_alloc(sizeof(registry_t)); | |||
if (!entry) return ENOMEM; | |||
entry->name = enif_alloc(strlen(name) + 1); | |||
if (!entry->name) { | |||
free(entry); | |||
return ENOMEM; | |||
} | |||
entry->state = state; | |||
strcpy(entry->name, name); | |||
enif_rwlock_rwlock(registry_lock); | |||
HASH_FIND_STR(registry, name, found); | |||
if (found) { | |||
enif_rwlock_rwunlock(registry_lock); | |||
enif_free(entry->name); | |||
enif_free(entry); | |||
return EINVAL; | |||
} else { | |||
if (state->name) { | |||
/* Unregistering previously registered name */ | |||
HASH_FIND_STR(registry, state->name, found); | |||
if (found) | |||
delete_registry_entry(found); | |||
} | |||
enif_keep_resource(state); | |||
HASH_ADD_STR(registry, name, entry); | |||
state->name = entry->name; | |||
enif_rwlock_rwunlock(registry_lock); | |||
return 0; | |||
} | |||
} | |||
int unregister_tree(char *name) { | |||
registry_t *entry; | |||
int ret; | |||
enif_rwlock_rwlock(registry_lock); | |||
HASH_FIND_STR(registry, name, entry); | |||
if (entry) { | |||
delete_registry_entry(entry); | |||
ret = 0; | |||
} else { | |||
ret = EINVAL; | |||
} | |||
enif_rwlock_rwunlock(registry_lock); | |||
return ret; | |||
} | |||
/**************************************************************** | |||
* NIF helpers * | |||
****************************************************************/ | |||
static ERL_NIF_TERM cons(ErlNifEnv *env, char *str, ERL_NIF_TERM tail) | |||
{ | |||
if (str) { | |||
size_t len = strlen(str); | |||
ERL_NIF_TERM head; | |||
unsigned char *buf = enif_make_new_binary(env, len, &head); | |||
if (buf) { | |||
memcpy(buf, str, len); | |||
return enif_make_list_cell(env, head, tail); | |||
} | |||
} | |||
return tail; | |||
} | |||
static void match(ErlNifEnv *env, tree_t *root, | |||
char *path, size_t i, size_t size, ERL_NIF_TERM *acc) | |||
{ | |||
tree_t *found; | |||
size_t len = 0; | |||
if (i<=size) { | |||
HASH_FIND_STR(root->sub, path+i, found); | |||
if (found) { | |||
len = strlen(path+i) + 1; | |||
match(env, found, path, i+len, size, acc); | |||
}; | |||
if (i || path[0] != '$') { | |||
HASH_FIND_STR(root->sub, "+", found); | |||
if (found) { | |||
len = strlen(path+i) + 1; | |||
match(env, found, path, i+len, size, acc); | |||
} | |||
HASH_FIND_STR(root->sub, "#", found); | |||
if (found) { | |||
*acc = cons(env, found->val, *acc); | |||
} | |||
} | |||
} else { | |||
*acc = cons(env, root->val, *acc); | |||
HASH_FIND_STR(root->sub, "#", found); | |||
if (found) | |||
*acc = cons(env, found->val, *acc); | |||
} | |||
} | |||
static void to_list(ErlNifEnv *env, tree_t *root, ERL_NIF_TERM *acc) | |||
{ | |||
tree_t *found, *iter; | |||
HASH_ITER(hh, root->sub, found, iter) { | |||
if (found->val) { | |||
size_t len = strlen(found->val); | |||
ERL_NIF_TERM refc = enif_make_int(env, found->refc); | |||
ERL_NIF_TERM val; | |||
unsigned char *buf = enif_make_new_binary(env, len, &val); | |||
if (buf) { | |||
memcpy(buf, found->val, len); | |||
*acc = enif_make_list_cell(env, enif_make_tuple2(env, val, refc), *acc); | |||
} | |||
}; | |||
to_list(env, found, acc); | |||
} | |||
} | |||
static ERL_NIF_TERM dump(ErlNifEnv *env, tree_t *tree) | |||
{ | |||
tree_t *found, *iter; | |||
ERL_NIF_TERM tail, head; | |||
tail = enif_make_list(env, 0); | |||
HASH_ITER(hh, tree->sub, found, iter) { | |||
head = dump(env, found); | |||
tail = enif_make_list_cell(env, head, tail); | |||
} | |||
if (tree->key) { | |||
ERL_NIF_TERM part, path; | |||
part = enif_make_string(env, tree->key, ERL_NIF_LATIN1); | |||
if (tree->val) | |||
path = enif_make_string(env, tree->val, ERL_NIF_LATIN1); | |||
else | |||
path = enif_make_atom(env, "none"); | |||
return enif_make_tuple4(env, part, path, enif_make_int(env, tree->refc), tail); | |||
} else | |||
return tail; | |||
} | |||
static ERL_NIF_TERM raise(ErlNifEnv *env, int err) | |||
{ | |||
switch (err) { | |||
case ENOMEM: | |||
return enif_raise_exception(env, enif_make_atom(env, "enomem")); | |||
default: | |||
return enif_make_badarg(env); | |||
} | |||
} | |||
void prep_path(char *path, ErlNifBinary *bin) { | |||
int i; | |||
unsigned char c; | |||
path[bin->size] = 0; | |||
for (i=0; i<bin->size; i++) { | |||
c = bin->data[i]; | |||
path[i] = (c == '/') ? 0 : c; | |||
} | |||
} | |||
/**************************************************************** | |||
* Constructors/Destructors * | |||
****************************************************************/ | |||
static state_t *init_tree_state(ErlNifEnv *env) { | |||
state_t *state = enif_alloc_resource(tree_state_t, sizeof(state_t)); | |||
if (state) { | |||
memset(state, 0, sizeof(state_t)); | |||
state->tree = tree_new(NULL, 0); | |||
state->lock = enif_rwlock_create("mqtree_lock"); | |||
if (state->tree && state->lock) | |||
return state; | |||
else | |||
enif_release_resource(state); | |||
} | |||
return NULL; | |||
} | |||
static void destroy_tree_state(ErlNifEnv *env, void *data) { | |||
state_t *state = (state_t *) data; | |||
if (state) { | |||
tree_free(state->tree); | |||
if (state->lock) enif_rwlock_destroy(state->lock); | |||
} | |||
memset(state, 0, sizeof(state_t)); | |||
} | |||
/**************************************************************** | |||
* NIF definitions * | |||
****************************************************************/ | |||
static int load(ErlNifEnv* env, void** priv, ERL_NIF_TERM max) { | |||
registry_lock = enif_rwlock_create("mqtree_registry"); | |||
if (registry_lock) { | |||
ErlNifResourceFlags flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER; | |||
tree_state_t = enif_open_resource_type(env, NULL, "mqtree_state", | |||
destroy_tree_state, | |||
flags, NULL); | |||
return 0; | |||
} | |||
return ENOMEM; | |||
} | |||
static void unload(ErlNifEnv* env, void* priv) { | |||
if (registry_lock) { | |||
enif_rwlock_destroy(registry_lock); | |||
registry_lock = NULL; | |||
} | |||
} | |||
static ERL_NIF_TERM new_0(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
ERL_NIF_TERM result; | |||
state_t *state = init_tree_state(env); | |||
if (state) { | |||
result = enif_make_resource(env, state); | |||
enif_release_resource(state); | |||
} else | |||
result = raise(env, ENOMEM); | |||
return result; | |||
} | |||
static ERL_NIF_TERM insert_2(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
ErlNifBinary path_bin; | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state) || | |||
!enif_inspect_iolist_as_binary(env, argv[1], &path_bin)) | |||
return raise(env, EINVAL); | |||
if (!path_bin.size) | |||
return enif_make_atom(env, "ok"); | |||
char path[path_bin.size+1]; | |||
prep_path(path, &path_bin); | |||
enif_rwlock_rwlock(state->lock); | |||
int ret = tree_add(state->tree, path, path_bin.size); | |||
enif_rwlock_rwunlock(state->lock); | |||
if (!ret) | |||
return enif_make_atom(env, "ok"); | |||
else | |||
return raise(env, ret); | |||
} | |||
static ERL_NIF_TERM delete_2(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
ErlNifBinary path_bin; | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state) || | |||
!enif_inspect_iolist_as_binary(env, argv[1], &path_bin)) | |||
return raise(env, EINVAL); | |||
if (!path_bin.size) | |||
return enif_make_atom(env, "ok"); | |||
char path[path_bin.size+1]; | |||
prep_path(path, &path_bin); | |||
enif_rwlock_rwlock(state->lock); | |||
tree_del(state->tree, path, 0, path_bin.size); | |||
enif_rwlock_rwunlock(state->lock); | |||
return enif_make_atom(env, "ok"); | |||
} | |||
static ERL_NIF_TERM match_2(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
ErlNifBinary path_bin; | |||
ERL_NIF_TERM result = enif_make_list(env, 0); | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state) || | |||
!enif_inspect_iolist_as_binary(env, argv[1], &path_bin)) | |||
return raise(env, EINVAL); | |||
if (!path_bin.size) | |||
return result; | |||
char path[path_bin.size+1]; | |||
prep_path(path, &path_bin); | |||
enif_rwlock_rlock(state->lock); | |||
match(env, state->tree, path, 0, path_bin.size, &result); | |||
enif_rwlock_runlock(state->lock); | |||
return result; | |||
} | |||
static ERL_NIF_TERM refc_2(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
ErlNifBinary path_bin; | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state) || | |||
!enif_inspect_iolist_as_binary(env, argv[1], &path_bin)) | |||
return raise(env, EINVAL); | |||
if (!path_bin.size) | |||
return enif_make_int(env, 0); | |||
char path[path_bin.size+1]; | |||
prep_path(path, &path_bin); | |||
enif_rwlock_rlock(state->lock); | |||
int refc = tree_refc(state->tree, path, 0, path_bin.size); | |||
enif_rwlock_runlock(state->lock); | |||
return enif_make_int(env, refc); | |||
} | |||
static ERL_NIF_TERM clear_1(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state)) | |||
return raise(env, EINVAL); | |||
enif_rwlock_rwlock(state->lock); | |||
tree_clear(state->tree); | |||
enif_rwlock_rwunlock(state->lock); | |||
return enif_make_atom(env, "ok"); | |||
} | |||
static ERL_NIF_TERM size_1(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
size_t size = 0; | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state)) | |||
return raise(env, EINVAL); | |||
enif_rwlock_rlock(state->lock); | |||
tree_size(state->tree, &size); | |||
enif_rwlock_runlock(state->lock); | |||
return enif_make_uint64(env, (ErlNifUInt64) size); | |||
} | |||
static ERL_NIF_TERM is_empty_1(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state)) | |||
return raise(env, EINVAL); | |||
enif_rwlock_rlock(state->lock); | |||
char *ret = state->tree->sub ? "false" : "true"; | |||
enif_rwlock_runlock(state->lock); | |||
return enif_make_atom(env, ret); | |||
} | |||
static ERL_NIF_TERM to_list_1(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
ERL_NIF_TERM result = enif_make_list(env, 0); | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state)) | |||
return raise(env, EINVAL); | |||
enif_rwlock_rlock(state->lock); | |||
to_list(env, state->tree, &result); | |||
enif_rwlock_runlock(state->lock); | |||
return result; | |||
} | |||
static ERL_NIF_TERM dump_1(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
if (!enif_get_resource(env, argv[0], tree_state_t, (void *) &state)) | |||
return raise(env, EINVAL); | |||
enif_rwlock_rlock(state->lock); | |||
ERL_NIF_TERM result = dump(env, state->tree); | |||
enif_rwlock_runlock(state->lock); | |||
return result; | |||
} | |||
static ERL_NIF_TERM register_2(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
state_t *state; | |||
unsigned int len; | |||
int ret; | |||
if (!enif_get_atom_length(env, argv[0], &len, ERL_NIF_LATIN1) || | |||
!enif_get_resource(env, argv[1], tree_state_t, (void *) &state)) | |||
return raise(env, EINVAL); | |||
char name[len+1]; | |||
enif_get_atom(env, argv[0], name, len+1, ERL_NIF_LATIN1); | |||
if (!strcmp(name, "undefined")) | |||
return raise(env, EINVAL); | |||
ret = register_tree(name, state); | |||
if (ret) | |||
return raise(env, ret); | |||
else | |||
return enif_make_atom(env, "ok"); | |||
} | |||
static ERL_NIF_TERM unregister_1(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
unsigned int len; | |||
int ret; | |||
if (!enif_get_atom_length(env, argv[0], &len, ERL_NIF_LATIN1)) | |||
return raise(env, EINVAL); | |||
char name[len+1]; | |||
enif_get_atom(env, argv[0], name, len+1, ERL_NIF_LATIN1); | |||
ret = unregister_tree(name); | |||
if (ret) | |||
return raise(env, ret); | |||
else | |||
return enif_make_atom(env, "ok"); | |||
} | |||
static ERL_NIF_TERM whereis_1(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
unsigned int len; | |||
registry_t *entry; | |||
ERL_NIF_TERM result; | |||
if (!enif_get_atom_length(env, argv[0], &len, ERL_NIF_LATIN1)) | |||
return raise(env, EINVAL); | |||
char name[len+1]; | |||
enif_get_atom(env, argv[0], name, len+1, ERL_NIF_LATIN1); | |||
enif_rwlock_rlock(registry_lock); | |||
HASH_FIND_STR(registry, name, entry); | |||
if (entry) | |||
result = enif_make_resource(env, entry->state); | |||
else | |||
result = enif_make_atom(env, "undefined"); | |||
enif_rwlock_runlock(registry_lock); | |||
return result; | |||
} | |||
static ERL_NIF_TERM registered_0(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
registry_t *entry, *iter; | |||
ERL_NIF_TERM result = enif_make_list(env, 0); | |||
enif_rwlock_rlock(registry_lock); | |||
HASH_ITER(hh, registry, entry, iter) { | |||
result = enif_make_list_cell(env, enif_make_atom(env, entry->name), result); | |||
} | |||
enif_rwlock_runlock(registry_lock); | |||
return result; | |||
} | |||
static ErlNifFunc nif_funcs[] = | |||
{ | |||
{"new", 0, new_0}, | |||
{"insert", 2, insert_2}, | |||
{"delete", 2, delete_2}, | |||
{"match", 2, match_2}, | |||
{"refc", 2, refc_2}, | |||
{"clear", 1, clear_1}, | |||
{"size", 1, size_1}, | |||
{"is_empty", 1, is_empty_1}, | |||
{"to_list", 1, to_list_1}, | |||
{"dump", 1, dump_1}, | |||
{"register", 2, register_2}, | |||
{"unregister", 1, unregister_1}, | |||
{"whereis", 1, whereis_1}, | |||
{"registered", 0, registered_0} | |||
}; | |||
ERL_NIF_INIT(mqtree, nif_funcs, load, NULL, NULL, unload) |
@ -1,12 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/mqtree.so", ["*.c"]} | |||
]}. | |||
{port_env, [ | |||
{"CFLAGS", "$CFLAGS -std=c99 -g -O2 -Wall"}, | |||
{"LDFLAGS", "$LDFLAGS -lpthread"} | |||
]}. | |||
@ -1,80 +0,0 @@ | |||
#include <erl_nif.h> | |||
/* | |||
This function expects a list of list of tuples of type {int, _}. | |||
It filters the tuples, using the first int field as a key, | |||
and removing duplicating keys with precedence given the the order | |||
in which they were seen (first given precedence). | |||
*/ | |||
static ERL_NIF_TERM | |||
bitmap_filter(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
size_t seen_forklift_id[3000] = { 0 }; | |||
if(argc != 1) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_is_list(env, argv[0])) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
ERL_NIF_TERM ret = enif_make_list(env, 0); | |||
ERL_NIF_TERM outer_list = argv[0]; | |||
ERL_NIF_TERM inner_list; | |||
ERL_NIF_TERM inner_head; | |||
const ERL_NIF_TERM* tuple_elems; | |||
int num_elems; | |||
unsigned int key; | |||
while(enif_get_list_cell(env, outer_list, &inner_list, &outer_list)) | |||
{ | |||
if(!enif_is_list(env, inner_list)) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
while(enif_get_list_cell(env, inner_list, &inner_head, &inner_list)) | |||
{ | |||
if(!enif_get_tuple(env, inner_head, &num_elems, &tuple_elems)) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
if(num_elems != 2) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_uint(env, tuple_elems[0], &key)) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
if(key >= 3000) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
if(!seen_forklift_id[key]) | |||
{ | |||
seen_forklift_id[key] = 1; | |||
ret = enif_make_list_cell(env, inner_head, ret); | |||
} | |||
} | |||
} | |||
return ret; | |||
} | |||
static ErlNifFunc nif_funcs[] = | |||
{ | |||
{"filter", 1, bitmap_filter, 0} | |||
}; | |||
ERL_NIF_INIT(bitmap_filter, nif_funcs, NULL, NULL, NULL, NULL) |
@ -1,30 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/bitmap_filter.so", [ | |||
"*.c" | |||
]} | |||
]}. | |||
%{port_specs, [{"../../priv/granderl.so", []}]}. | |||
%% {port_env, [ | |||
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)", | |||
%% "CFLAGS", "$CFLAGS -Ic_src/ -g -Wall -flto -Werror -O3"}, | |||
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)", | |||
%% "CXXFLAGS", "$CXXFLAGS -Ic_src/ -g -Wall -flto -Werror -O3"}, | |||
%% | |||
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)", | |||
%% "LDFLAGS", "$LDFLAGS -flto -lstdc++"}, | |||
%% | |||
%% %% OS X Leopard flags for 64-bit | |||
%% {"darwin9.*-64$", "CXXFLAGS", "-m64"}, | |||
%% {"darwin9.*-64$", "LDFLAGS", "-arch x86_64"}, | |||
%% | |||
%% %% OS X Snow Leopard flags for 32-bit | |||
%% {"darwin10.*-32$", "CXXFLAGS", "-m32"}, | |||
%% {"darwin10.*-32$", "LDFLAGS", "-arch i386"}, | |||
%% | |||
%% {"win32", "CXXFLAGS", "$CXXFLAGS /O2 /DNDEBUG"} | |||
%% ]}. | |||
@ -1,448 +0,0 @@ | |||
#include "erl_nif.h" | |||
ErlNifResourceType* bsn_type; | |||
ERL_NIF_TERM ATOM_TRUE, ATOM_FALSE; | |||
/* | |||
typedef struct { | |||
unsigned size; | |||
unsigned char* data; | |||
} ErlNifBinary; | |||
*/ | |||
struct bsn_elem_struct { | |||
ErlNifBinary bin; | |||
struct bsn_elem_struct* next; | |||
}; | |||
typedef struct bsn_elem_struct bsn_elem; | |||
typedef bsn_elem* bsn_list; | |||
typedef struct { | |||
unsigned int count; /* count of elements */ | |||
unsigned int max; /* count of slots */ | |||
ErlNifMutex *mutex; | |||
bsn_list* list; | |||
} bsn_res; | |||
inline static ERL_NIF_TERM bool_to_term(int value) { | |||
return value ? ATOM_TRUE : ATOM_FALSE; | |||
} | |||
/* Calculate the sum of chars. */ | |||
unsigned int | |||
private_hash(const ErlNifBinary* b, unsigned int max) | |||
{ | |||
unsigned char* ptr; | |||
unsigned int i, sum = 0; | |||
ptr = b->data; | |||
i = b->size; | |||
for (; i; i--, ptr++) | |||
sum += *ptr; | |||
return sum % max; | |||
} | |||
inline void | |||
private_clear_elem(bsn_elem* el) | |||
{ | |||
enif_release_binary(&(el->bin)); | |||
enif_free(el); | |||
} | |||
inline void | |||
private_chain_clear_all(bsn_elem* ptr) | |||
{ | |||
bsn_elem* next; | |||
while (ptr != NULL) { | |||
next = ptr->next; | |||
private_clear_elem(ptr); | |||
ptr = next; | |||
} | |||
} | |||
inline int | |||
private_compare(ErlNifBinary* b1, ErlNifBinary* b2) | |||
{ | |||
unsigned char* p1; | |||
unsigned char* p2; | |||
unsigned len; | |||
if (b1->size != b2->size) | |||
return 0; | |||
p1 = b1->data; | |||
p2 = b2->data; | |||
len = b1->size; | |||
while (len) { | |||
if ((*p1) != (*p2)) | |||
return 0; | |||
len--; p1++; p2++; | |||
} | |||
return 1; | |||
} | |||
/* Skip existing elements. If the element bin is not found, return last element. | |||
* If el.bin == bin, return el. */ | |||
bsn_elem* | |||
private_chain_shift(bsn_elem* ptr, ErlNifBinary* bin, int* num_ptr) | |||
{ | |||
(*num_ptr)++; | |||
if ((ptr) == NULL) | |||
return ptr; | |||
while (1) { | |||
if (private_compare(&(ptr->bin), bin)) { | |||
/* found an equal binary. Invert num */ | |||
(*num_ptr) *= -1; | |||
return ptr; | |||
} | |||
if ((ptr->next) == NULL) | |||
return ptr; | |||
ptr = ptr->next; | |||
(*num_ptr)++; | |||
} | |||
} | |||
/* Append the element `el' to the chain `chain' */ | |||
void | |||
private_chain_append(bsn_elem** chain, bsn_elem* el, int* num_ptr) | |||
{ | |||
bsn_elem* last; | |||
if ((*chain) == NULL) { | |||
/* The new element is last */ | |||
*chain = el; | |||
} else { | |||
last = private_chain_shift(*chain, &(el->bin), num_ptr); | |||
if ((*num_ptr) < 0) { | |||
/* Element was already added. */ | |||
private_clear_elem(el); | |||
} else { | |||
last->next = el; | |||
} | |||
} | |||
} | |||
bsn_elem* | |||
private_chain_shift_clear(bsn_elem** ptr, ErlNifBinary* bin, int* num_ptr) | |||
{ | |||
bsn_elem** prev = NULL; | |||
bsn_elem* el; | |||
while ((*ptr) != NULL) { | |||
if (private_compare(&((*ptr)->bin), bin)) { | |||
(*num_ptr) *= -1; | |||
/* found an equal binary. Delete elem. Invert num */ | |||
if (prev == NULL) { | |||
el = *ptr; | |||
(*ptr) = (*ptr)->next; | |||
return el; | |||
} | |||
*prev = (*ptr)->next; | |||
return *ptr; | |||
} | |||
prev = ptr; | |||
el = *ptr; | |||
ptr = (bsn_elem**) &(el->next); | |||
(*num_ptr)++; | |||
} | |||
return NULL; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
unsigned int max; | |||
bsn_list* ptr; | |||
bsn_res* r; | |||
if (!(enif_get_uint(env, argv[0], &max) && (max>0))) | |||
return enif_make_badarg(env); | |||
ptr = enif_alloc(sizeof(bsn_list) * max); | |||
if (ptr == NULL) | |||
return enif_make_badarg(env); | |||
r = (bsn_res*) enif_alloc_resource(bsn_type, sizeof(bsn_res)); | |||
r->mutex = enif_mutex_create("Mutex for the BSN writer"); | |||
r->count = 0; | |||
r->max = max; | |||
r->list = ptr; | |||
for (; max; max--, ptr++) | |||
*ptr = NULL; | |||
return enif_make_resource(env, r); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos; | |||
int num = 0; | |||
bsn_elem* elem_ptr; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
enif_realloc_binary(&bin, bin.size); | |||
pos = private_hash(&bin, r->max); | |||
elem_ptr = enif_alloc(sizeof(bsn_elem)); | |||
if (elem_ptr == NULL) | |||
return enif_make_badarg(env); | |||
elem_ptr->next = NULL; | |||
elem_ptr->bin = bin; | |||
enif_mutex_lock(r->mutex); | |||
private_chain_append(&(r->list[pos]), elem_ptr, &num); | |||
if (num >= 0) | |||
(r->count)++; | |||
enif_mutex_unlock(r->mutex); | |||
/* Already added */ | |||
if (num < 0) | |||
enif_release_binary(&(bin)); | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos; | |||
int num = 0; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
pos = private_hash(&bin, r->max); | |||
enif_mutex_lock(r->mutex); | |||
private_chain_shift(r->list[pos], &bin, &num); | |||
enif_mutex_unlock(r->mutex); | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos; | |||
int num = 0; | |||
bsn_elem* elem_ptr; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
pos = private_hash(&bin, r->max); | |||
enif_mutex_lock(r->mutex); | |||
elem_ptr = private_chain_shift_clear(&(r->list[pos]), &bin, &num); | |||
if (elem_ptr != NULL) { | |||
private_clear_elem(elem_ptr); | |||
(r->count)--; | |||
} | |||
enif_mutex_unlock(r->mutex); | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_all_chain(ErlNifEnv* env, bsn_elem* e, ERL_NIF_TERM tail) | |||
{ | |||
ERL_NIF_TERM head; | |||
ErlNifBinary bin; | |||
while (e != NULL) { | |||
bin = e->bin; | |||
enif_realloc_binary(&bin, bin.size); | |||
head = enif_make_binary(env, &bin); | |||
tail = enif_make_list_cell(env, head, tail); | |||
e = e->next; | |||
} | |||
return tail; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_chains(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
unsigned int max; | |||
bsn_list* ptr; | |||
ERL_NIF_TERM tail, head; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
tail = enif_make_list(env, 0); | |||
ptr = r->list; | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
while (max) { | |||
head = enif_make_list(env, 0); | |||
head = bsn_all_chain(env, *ptr, head); | |||
tail = enif_make_list_cell(env, head, tail); | |||
ptr++; | |||
max--; | |||
} | |||
enif_mutex_unlock(r->mutex); | |||
return tail; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_all(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
unsigned int max; | |||
bsn_list* ptr; | |||
ERL_NIF_TERM list; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
list = enif_make_list(env, 0); | |||
ptr = r->list; | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
while (max) { | |||
list = bsn_all_chain(env, *ptr, list); | |||
ptr++; | |||
max--; | |||
} | |||
enif_mutex_unlock(r->mutex); | |||
return list; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_count(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
return enif_make_int(env, r->count); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_hash(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
unsigned int max; | |||
if (!(enif_inspect_binary(env, argv[0], &bin) | |||
&& enif_get_uint(env, argv[1], &max) && (max>0))) | |||
return enif_make_badarg(env); | |||
return enif_make_uint(env, | |||
private_hash(&bin, max)); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_compare(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary b1, b2; | |||
if (!(enif_inspect_binary(env, argv[0], &b1) | |||
&& enif_inspect_binary(env, argv[1], &b2))) | |||
return enif_make_badarg(env); | |||
return bool_to_term(private_compare(&b1, &b2)); | |||
} | |||
void private_clear_all(bsn_res* r) | |||
{ | |||
unsigned int max; | |||
bsn_list* ptr; | |||
max = r->max; | |||
ptr = r->list; | |||
while (max) { | |||
private_chain_clear_all(*ptr); | |||
ptr++; | |||
max--; | |||
} | |||
} | |||
void | |||
bsn_type_dtor(ErlNifEnv* env, void* obj) | |||
{ | |||
bsn_res* r = (bsn_res*) obj; | |||
private_clear_all(r); | |||
enif_mutex_destroy(r->mutex); | |||
enif_free(r->list); | |||
} | |||
int | |||
on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info) | |||
{ | |||
ATOM_TRUE = enif_make_atom(env, "true"); | |||
ATOM_FALSE = enif_make_atom(env, "false"); | |||
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | | |||
ERL_NIF_RT_TAKEOVER); | |||
bsn_type = enif_open_resource_type(env, NULL, "bsn_type", | |||
bsn_type_dtor, flags, NULL); | |||
if (bsn_type == NULL) return 1; | |||
return 0; | |||
} | |||
int | |||
on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info) | |||
{ | |||
return 0; | |||
} | |||
static ErlNifFunc nif_functions[] = { | |||
{"new", 1, bsn_new}, | |||
{"add", 2, bsn_add}, | |||
{"all", 1, bsn_all}, | |||
{"chains", 1, bsn_chains}, | |||
{"in", 2, bsn_search}, | |||
{"clear", 2, bsn_clear}, | |||
{"count", 1, bsn_count}, | |||
{"hash", 2, bsn_hash}, | |||
{"compare", 2, bsn_compare}, | |||
}; | |||
ERL_NIF_INIT(bsn_ext, nif_functions, &on_load, &on_load, &on_upgrade, NULL); |
@ -1,331 +0,0 @@ | |||
#include "erl_nif.h" | |||
ErlNifResourceType* bsn_type; | |||
ERL_NIF_TERM ATOM_TRUE, ATOM_FALSE, ATOM_NO_MORE; | |||
struct bsn_elem_struct { | |||
ErlNifBinary bin; | |||
unsigned int hash; | |||
}; | |||
typedef struct bsn_elem_struct bsn_elem; | |||
typedef struct { | |||
unsigned int count; /* count of elements */ | |||
unsigned int max; /* count of slots */ | |||
ErlNifMutex *mutex; | |||
bsn_elem* list; | |||
unsigned int (*next_pos) | |||
(void*, unsigned int, unsigned int); | |||
} bsn_res; | |||
inline static ERL_NIF_TERM bool_to_term(int value) { | |||
return value ? ATOM_TRUE : ATOM_FALSE; | |||
} | |||
unsigned int next_pos_linear(bsn_res* r, unsigned int hash, unsigned int step) { | |||
return (hash + step) % (r->max); | |||
} | |||
unsigned int next_pos_quadric(bsn_res* r, unsigned int hash, unsigned int step) { | |||
return (hash + (step*step)) % (r->max); | |||
} | |||
/* Calculate the sum of chars. */ | |||
unsigned int | |||
private_hash(const ErlNifBinary* b, unsigned int max) | |||
{ | |||
unsigned char* ptr; | |||
unsigned int i, sum = 0; | |||
ptr = b->data; | |||
i = b->size; | |||
for (; i; i--, ptr++) | |||
sum += *ptr; | |||
return sum % max; | |||
} | |||
inline int | |||
private_compare(ErlNifBinary* b1, ErlNifBinary* b2) | |||
{ | |||
unsigned char* p1; | |||
unsigned char* p2; | |||
unsigned len; | |||
if (b1->size != b2->size) | |||
return 0; | |||
p1 = b1->data; | |||
p2 = b2->data; | |||
len = b1->size; | |||
while (len) { | |||
if ((*p1) != (*p2)) | |||
return 0; | |||
len--; p1++; p2++; | |||
} | |||
return 1; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
int max; /* This value will be set by a client: | |||
if (max<0) -> use quadric algorithm */ | |||
bsn_elem* ptr; | |||
bsn_res* r; | |||
if (!enif_get_int(env, argv[0], &max) || (max == 0)) | |||
return enif_make_badarg(env); | |||
r = (bsn_res*) enif_alloc_resource(bsn_type, sizeof(bsn_res)); | |||
r->mutex = enif_mutex_create("Mutex for the BSN writer"); | |||
r->count = 0; | |||
/* Select an algorithm */ | |||
if (max>0) { | |||
r->next_pos = &next_pos_linear; | |||
} else if (max<0) { | |||
r->next_pos = &next_pos_quadric; | |||
max *= -1; | |||
} | |||
/* Now max is cells' count in the array. */ | |||
r->max = (unsigned int) max; | |||
ptr = enif_alloc(sizeof(bsn_elem) * max); | |||
if (ptr == NULL) | |||
return enif_make_badarg(env); | |||
r->list = ptr; | |||
for (; max; max--, ptr++) | |||
ptr->hash = r->max; | |||
return enif_make_resource(env, r); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos, hash, max; | |||
int num = 0; | |||
bsn_elem* elem_ptr; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
enif_realloc_binary(&bin, bin.size); | |||
hash = pos = private_hash(&bin, r->max); | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
while (num < max) { | |||
elem_ptr = &(r->list[pos]); | |||
/* Found free space */ | |||
if (elem_ptr->hash == max) { | |||
elem_ptr->bin = bin; | |||
elem_ptr->hash = hash; | |||
break; | |||
} | |||
/* Found elem */ | |||
if ((elem_ptr->hash == hash) | |||
&& private_compare(&bin, &(elem_ptr->bin))) { | |||
num *= -1; | |||
break; | |||
} | |||
pos = (r->next_pos)(r, hash, num); | |||
num++; | |||
} | |||
if ((num >= 0) && (num < max)) | |||
(r->count)++; | |||
enif_mutex_unlock(r->mutex); | |||
/* Error: already added or owerflow */ | |||
if (!((num >= 0) && (num < max))) | |||
enif_release_binary(&bin); | |||
if (num >= max) | |||
return ATOM_NO_MORE; | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos, max, hash; | |||
int num = 1; | |||
bsn_elem* elem_ptr; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
hash = pos = private_hash(&bin, r->max); | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
while (num < max) { | |||
elem_ptr = &(r->list[pos]); | |||
/* Found free space */ | |||
if (elem_ptr->hash == max) { | |||
break; | |||
} | |||
/* Found elem */ | |||
if ((elem_ptr->hash == hash) | |||
&& private_compare(&bin, &(elem_ptr->bin))) { | |||
num *= -1; | |||
break; | |||
} | |||
pos = (r->next_pos)(r, hash, num); | |||
num++; | |||
} | |||
enif_mutex_unlock(r->mutex); | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_all(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
unsigned int max, pos = 0; | |||
ERL_NIF_TERM head, tail; | |||
ErlNifBinary bin; | |||
bsn_elem* elem_ptr; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
tail = enif_make_list(env, 0); | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
elem_ptr = r->list; | |||
do { | |||
if (elem_ptr->hash != max) { | |||
bin = elem_ptr->bin; | |||
enif_realloc_binary(&bin, bin.size); | |||
head = enif_make_binary(env, &bin); | |||
tail = enif_make_list_cell(env, head, tail); | |||
} | |||
elem_ptr++; | |||
pos++; | |||
} while (pos < max); | |||
enif_mutex_unlock(r->mutex); | |||
return tail; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_count(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
return enif_make_int(env, r->count); | |||
} | |||
void private_clear_all(bsn_res* r) | |||
{ | |||
unsigned int max, num; | |||
bsn_elem* ptr; | |||
num = max = r->max; | |||
ptr = r->list; | |||
while (num) { | |||
if (ptr->hash != max) { | |||
enif_release_binary(&(ptr->bin)); | |||
} | |||
ptr++; | |||
num--; | |||
} | |||
} | |||
void | |||
bsn_type_dtor(ErlNifEnv* env, void* obj) | |||
{ | |||
bsn_res* r = (bsn_res*) obj; | |||
private_clear_all(r); | |||
enif_mutex_destroy(r->mutex); | |||
enif_free(r->list); | |||
} | |||
int | |||
on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info) | |||
{ | |||
ATOM_TRUE = enif_make_atom(env, "true"); | |||
ATOM_FALSE = enif_make_atom(env, "false"); | |||
ATOM_NO_MORE = enif_make_atom(env, "no_more"); | |||
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | | |||
ERL_NIF_RT_TAKEOVER); | |||
bsn_type = enif_open_resource_type(env, NULL, "bsn_type", | |||
bsn_type_dtor, flags, NULL); | |||
if (bsn_type == NULL) return 1; | |||
return 0; | |||
} | |||
int | |||
on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info) | |||
{ | |||
return 0; | |||
} | |||
static ErlNifFunc nif_functions[] = { | |||
{"new", 1, bsn_new}, | |||
{"add", 2, bsn_add}, | |||
{"all", 1, bsn_all}, | |||
{"in", 2, bsn_search}, | |||
{"clear", 2, bsn_clear}, | |||
{"count", 1, bsn_count}, | |||
}; | |||
ERL_NIF_INIT(bsn_int, nif_functions, &on_load, &on_load, &on_upgrade, NULL); |
@ -1,448 +0,0 @@ | |||
#include "erl_nif.h" | |||
ErlNifResourceType* bsn_type; | |||
ERL_NIF_TERM ATOM_TRUE, ATOM_FALSE; | |||
/* | |||
typedef struct { | |||
unsigned size; | |||
unsigned char* data; | |||
} ErlNifBinary; | |||
*/ | |||
struct bsn_elem_struct { | |||
ErlNifBinary bin; | |||
struct bsn_elem_struct* next; | |||
}; | |||
typedef struct bsn_elem_struct bsn_elem; | |||
typedef bsn_elem* bsn_list; | |||
typedef struct { | |||
unsigned int count; /* count of elements */ | |||
unsigned int max; /* count of slots */ | |||
ErlNifMutex *mutex; | |||
bsn_list* list; | |||
} bsn_res; | |||
inline static ERL_NIF_TERM bool_to_term(int value) { | |||
return value ? ATOM_TRUE : ATOM_FALSE; | |||
} | |||
/* Calculate the sum of chars. */ | |||
unsigned int | |||
private_hash(const ErlNifBinary* b, unsigned int max) | |||
{ | |||
unsigned char* ptr; | |||
unsigned int i, sum = 0; | |||
ptr = b->data; | |||
i = b->size; | |||
for (; i; i--, ptr++) | |||
sum += *ptr; | |||
return sum % max; | |||
} | |||
inline void | |||
private_clear_elem(bsn_elem* el) | |||
{ | |||
enif_release_binary(&(el->bin)); | |||
enif_free(el); | |||
} | |||
inline void | |||
private_chain_clear_all(bsn_elem* ptr) | |||
{ | |||
bsn_elem* next; | |||
while (ptr != NULL) { | |||
next = ptr->next; | |||
private_clear_elem(ptr); | |||
ptr = next; | |||
} | |||
} | |||
inline int | |||
private_compare(ErlNifBinary* b1, ErlNifBinary* b2) | |||
{ | |||
unsigned char* p1; | |||
unsigned char* p2; | |||
unsigned len; | |||
if (b1->size != b2->size) | |||
return 0; | |||
p1 = b1->data; | |||
p2 = b2->data; | |||
len = b1->size; | |||
while (len) { | |||
if ((*p1) != (*p2)) | |||
return 0; | |||
len--; p1++; p2++; | |||
} | |||
return 1; | |||
} | |||
/* Skip existing elements. If the element bin is not found, return last element. | |||
* If el.bin == bin, return el. */ | |||
bsn_elem* | |||
private_chain_shift(bsn_elem* ptr, ErlNifBinary* bin, int* num_ptr) | |||
{ | |||
(*num_ptr)++; | |||
if ((ptr) == NULL) | |||
return ptr; | |||
while (1) { | |||
if (private_compare(&(ptr->bin), bin)) { | |||
/* found an equal binary. Invert num */ | |||
(*num_ptr) *= -1; | |||
return ptr; | |||
} | |||
if ((ptr->next) == NULL) | |||
return ptr; | |||
ptr = ptr->next; | |||
(*num_ptr)++; | |||
} | |||
} | |||
/* Append the element `el' to the chain `chain' */ | |||
void | |||
private_chain_append(bsn_elem** chain, bsn_elem* el, int* num_ptr) | |||
{ | |||
bsn_elem* last; | |||
if ((*chain) == NULL) { | |||
/* The new element is last */ | |||
*chain = el; | |||
} else { | |||
last = private_chain_shift(*chain, &(el->bin), num_ptr); | |||
if ((*num_ptr) < 0) { | |||
/* Element was already added. */ | |||
private_clear_elem(el); | |||
} else { | |||
last->next = el; | |||
} | |||
} | |||
} | |||
bsn_elem* | |||
private_chain_shift_clear(bsn_elem** ptr, ErlNifBinary* bin, int* num_ptr) | |||
{ | |||
bsn_elem** prev = NULL; | |||
bsn_elem* el; | |||
while ((*ptr) != NULL) { | |||
if (private_compare(&((*ptr)->bin), bin)) { | |||
(*num_ptr) *= -1; | |||
/* found an equal binary. Delete elem. Invert num */ | |||
if (prev == NULL) { | |||
el = *ptr; | |||
(*ptr) = (*ptr)->next; | |||
return el; | |||
} | |||
*prev = (*ptr)->next; | |||
return *ptr; | |||
} | |||
prev = ptr; | |||
el = *ptr; | |||
ptr = (bsn_elem**) &(el->next); | |||
(*num_ptr)++; | |||
} | |||
return NULL; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
unsigned int max; | |||
bsn_list* ptr; | |||
bsn_res* r; | |||
if (!(enif_get_uint(env, argv[0], &max) && (max>0))) | |||
return enif_make_badarg(env); | |||
ptr = enif_alloc(sizeof(bsn_list) * max); | |||
if (ptr == NULL) | |||
return enif_make_badarg(env); | |||
r = (bsn_res*) enif_alloc_resource(bsn_type, sizeof(bsn_res)); | |||
r->mutex = enif_mutex_create("Mutex for the BSN writer"); | |||
r->count = 0; | |||
r->max = max; | |||
r->list = ptr; | |||
for (; max; max--, ptr++) | |||
*ptr = NULL; | |||
return enif_make_resource(env, r); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos; | |||
int num = 0; | |||
bsn_elem* elem_ptr; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
enif_realloc_binary(&bin, bin.size); | |||
pos = private_hash(&bin, r->max); | |||
elem_ptr = enif_alloc(sizeof(bsn_elem)); | |||
if (elem_ptr == NULL) | |||
return enif_make_badarg(env); | |||
elem_ptr->next = NULL; | |||
elem_ptr->bin = bin; | |||
enif_mutex_lock(r->mutex); | |||
private_chain_append(&(r->list[pos]), elem_ptr, &num); | |||
if (num >= 0) | |||
(r->count)++; | |||
enif_mutex_unlock(r->mutex); | |||
/* Already added */ | |||
if (num < 0) | |||
enif_release_binary(&(bin)); | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos; | |||
int num = 0; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
pos = private_hash(&bin, r->max); | |||
enif_mutex_lock(r->mutex); | |||
private_chain_shift(r->list[pos], &bin, &num); | |||
enif_mutex_unlock(r->mutex); | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos; | |||
int num = 0; | |||
bsn_elem* elem_ptr; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
pos = private_hash(&bin, r->max); | |||
enif_mutex_lock(r->mutex); | |||
elem_ptr = private_chain_shift_clear(&(r->list[pos]), &bin, &num); | |||
if (elem_ptr != NULL) { | |||
private_clear_elem(elem_ptr); | |||
(r->count)--; | |||
} | |||
enif_mutex_unlock(r->mutex); | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_all_chain(ErlNifEnv* env, bsn_elem* e, ERL_NIF_TERM tail) | |||
{ | |||
ERL_NIF_TERM head; | |||
ErlNifBinary bin; | |||
while (e != NULL) { | |||
bin = e->bin; | |||
enif_realloc_binary(&bin, bin.size); | |||
head = enif_make_binary(env, &bin); | |||
tail = enif_make_list_cell(env, head, tail); | |||
e = e->next; | |||
} | |||
return tail; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_chains(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
unsigned int max; | |||
bsn_list* ptr; | |||
ERL_NIF_TERM tail, head; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
tail = enif_make_list(env, 0); | |||
ptr = r->list; | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
while (max) { | |||
head = enif_make_list(env, 0); | |||
head = bsn_all_chain(env, *ptr, head); | |||
tail = enif_make_list_cell(env, head, tail); | |||
ptr++; | |||
max--; | |||
} | |||
enif_mutex_unlock(r->mutex); | |||
return tail; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_all(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
unsigned int max; | |||
bsn_list* ptr; | |||
ERL_NIF_TERM list; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
list = enif_make_list(env, 0); | |||
ptr = r->list; | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
while (max) { | |||
list = bsn_all_chain(env, *ptr, list); | |||
ptr++; | |||
max--; | |||
} | |||
enif_mutex_unlock(r->mutex); | |||
return list; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_count(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
return enif_make_int(env, r->count); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_hash(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
unsigned int max; | |||
if (!(enif_inspect_binary(env, argv[0], &bin) | |||
&& enif_get_uint(env, argv[1], &max) && (max>0))) | |||
return enif_make_badarg(env); | |||
return enif_make_uint(env, | |||
private_hash(&bin, max)); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_compare(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary b1, b2; | |||
if (!(enif_inspect_binary(env, argv[0], &b1) | |||
&& enif_inspect_binary(env, argv[1], &b2))) | |||
return enif_make_badarg(env); | |||
return bool_to_term(private_compare(&b1, &b2)); | |||
} | |||
void private_clear_all(bsn_res* r) | |||
{ | |||
unsigned int max; | |||
bsn_list* ptr; | |||
max = r->max; | |||
ptr = r->list; | |||
while (max) { | |||
private_chain_clear_all(*ptr); | |||
ptr++; | |||
max--; | |||
} | |||
} | |||
void | |||
bsn_type_dtor(ErlNifEnv* env, void* obj) | |||
{ | |||
bsn_res* r = (bsn_res*) obj; | |||
private_clear_all(r); | |||
enif_mutex_destroy(r->mutex); | |||
enif_free(r->list); | |||
} | |||
int | |||
on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info) | |||
{ | |||
ATOM_TRUE = enif_make_atom(env, "true"); | |||
ATOM_FALSE = enif_make_atom(env, "false"); | |||
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | | |||
ERL_NIF_RT_TAKEOVER); | |||
bsn_type = enif_open_resource_type(env, NULL, "bsn_type", | |||
bsn_type_dtor, flags, NULL); | |||
if (bsn_type == NULL) return 1; | |||
return 0; | |||
} | |||
int | |||
on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info) | |||
{ | |||
return 0; | |||
} | |||
static ErlNifFunc nif_functions[] = { | |||
{"new", 1, bsn_new}, | |||
{"add", 2, bsn_add}, | |||
{"all", 1, bsn_all}, | |||
{"chains", 1, bsn_chains}, | |||
{"in", 2, bsn_search}, | |||
{"clear", 2, bsn_clear}, | |||
{"count", 1, bsn_count}, | |||
{"hash", 2, bsn_hash}, | |||
{"compare", 2, bsn_compare}, | |||
}; | |||
ERL_NIF_INIT(bsn_ext, nif_functions, &on_load, &on_load, &on_upgrade, NULL); |
@ -1,331 +0,0 @@ | |||
#include "erl_nif.h" | |||
ErlNifResourceType* bsn_type; | |||
ERL_NIF_TERM ATOM_TRUE, ATOM_FALSE, ATOM_NO_MORE; | |||
struct bsn_elem_struct { | |||
ErlNifBinary bin; | |||
unsigned int hash; | |||
}; | |||
typedef struct bsn_elem_struct bsn_elem; | |||
typedef struct { | |||
unsigned int count; /* count of elements */ | |||
unsigned int max; /* count of slots */ | |||
ErlNifMutex *mutex; | |||
bsn_elem* list; | |||
unsigned int (*next_pos) | |||
(void*, unsigned int, unsigned int); | |||
} bsn_res; | |||
inline static ERL_NIF_TERM bool_to_term(int value) { | |||
return value ? ATOM_TRUE : ATOM_FALSE; | |||
} | |||
unsigned int next_pos_linear(bsn_res* r, unsigned int hash, unsigned int step) { | |||
return (hash + step) % (r->max); | |||
} | |||
unsigned int next_pos_quadric(bsn_res* r, unsigned int hash, unsigned int step) { | |||
return (hash + (step*step)) % (r->max); | |||
} | |||
/* Calculate the sum of chars. */ | |||
unsigned int | |||
private_hash(const ErlNifBinary* b, unsigned int max) | |||
{ | |||
unsigned char* ptr; | |||
unsigned int i, sum = 0; | |||
ptr = b->data; | |||
i = b->size; | |||
for (; i; i--, ptr++) | |||
sum += *ptr; | |||
return sum % max; | |||
} | |||
inline int | |||
private_compare(ErlNifBinary* b1, ErlNifBinary* b2) | |||
{ | |||
unsigned char* p1; | |||
unsigned char* p2; | |||
unsigned len; | |||
if (b1->size != b2->size) | |||
return 0; | |||
p1 = b1->data; | |||
p2 = b2->data; | |||
len = b1->size; | |||
while (len) { | |||
if ((*p1) != (*p2)) | |||
return 0; | |||
len--; p1++; p2++; | |||
} | |||
return 1; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
int max; /* This value will be set by a client: | |||
if (max<0) -> use quadric algorithm */ | |||
bsn_elem* ptr; | |||
bsn_res* r; | |||
if (!enif_get_int(env, argv[0], &max) || (max == 0)) | |||
return enif_make_badarg(env); | |||
r = (bsn_res*) enif_alloc_resource(bsn_type, sizeof(bsn_res)); | |||
r->mutex = enif_mutex_create("Mutex for the BSN writer"); | |||
r->count = 0; | |||
/* Select an algorithm */ | |||
if (max>0) { | |||
r->next_pos = &next_pos_linear; | |||
} else if (max<0) { | |||
r->next_pos = &next_pos_quadric; | |||
max *= -1; | |||
} | |||
/* Now max is cells' count in the array. */ | |||
r->max = (unsigned int) max; | |||
ptr = enif_alloc(sizeof(bsn_elem) * max); | |||
if (ptr == NULL) | |||
return enif_make_badarg(env); | |||
r->list = ptr; | |||
for (; max; max--, ptr++) | |||
ptr->hash = r->max; | |||
return enif_make_resource(env, r); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos, hash, max; | |||
int num = 0; | |||
bsn_elem* elem_ptr; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
enif_realloc_binary(&bin, bin.size); | |||
hash = pos = private_hash(&bin, r->max); | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
while (num < max) { | |||
elem_ptr = &(r->list[pos]); | |||
/* Found free space */ | |||
if (elem_ptr->hash == max) { | |||
elem_ptr->bin = bin; | |||
elem_ptr->hash = hash; | |||
break; | |||
} | |||
/* Found elem */ | |||
if ((elem_ptr->hash == hash) | |||
&& private_compare(&bin, &(elem_ptr->bin))) { | |||
num *= -1; | |||
break; | |||
} | |||
pos = (r->next_pos)(r, hash, num); | |||
num++; | |||
} | |||
if ((num >= 0) && (num < max)) | |||
(r->count)++; | |||
enif_mutex_unlock(r->mutex); | |||
/* Error: already added or owerflow */ | |||
if (!((num >= 0) && (num < max))) | |||
enif_release_binary(&bin); | |||
if (num >= max) | |||
return ATOM_NO_MORE; | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
bsn_res* r; | |||
unsigned int pos, max, hash; | |||
int num = 1; | |||
bsn_elem* elem_ptr; | |||
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r) | |||
&& enif_inspect_binary(env, argv[1], &bin))) | |||
return enif_make_badarg(env); | |||
hash = pos = private_hash(&bin, r->max); | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
while (num < max) { | |||
elem_ptr = &(r->list[pos]); | |||
/* Found free space */ | |||
if (elem_ptr->hash == max) { | |||
break; | |||
} | |||
/* Found elem */ | |||
if ((elem_ptr->hash == hash) | |||
&& private_compare(&bin, &(elem_ptr->bin))) { | |||
num *= -1; | |||
break; | |||
} | |||
pos = (r->next_pos)(r, hash, num); | |||
num++; | |||
} | |||
enif_mutex_unlock(r->mutex); | |||
return enif_make_int(env, num); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
return enif_make_badarg(env); | |||
} | |||
static ERL_NIF_TERM | |||
bsn_all(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
unsigned int max, pos = 0; | |||
ERL_NIF_TERM head, tail; | |||
ErlNifBinary bin; | |||
bsn_elem* elem_ptr; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
tail = enif_make_list(env, 0); | |||
enif_mutex_lock(r->mutex); | |||
max = r->max; | |||
elem_ptr = r->list; | |||
do { | |||
if (elem_ptr->hash != max) { | |||
bin = elem_ptr->bin; | |||
enif_realloc_binary(&bin, bin.size); | |||
head = enif_make_binary(env, &bin); | |||
tail = enif_make_list_cell(env, head, tail); | |||
} | |||
elem_ptr++; | |||
pos++; | |||
} while (pos < max); | |||
enif_mutex_unlock(r->mutex); | |||
return tail; | |||
} | |||
static ERL_NIF_TERM | |||
bsn_count(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
bsn_res* r; | |||
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r)) | |||
return enif_make_badarg(env); | |||
return enif_make_int(env, r->count); | |||
} | |||
void private_clear_all(bsn_res* r) | |||
{ | |||
unsigned int max, num; | |||
bsn_elem* ptr; | |||
num = max = r->max; | |||
ptr = r->list; | |||
while (num) { | |||
if (ptr->hash != max) { | |||
enif_release_binary(&(ptr->bin)); | |||
} | |||
ptr++; | |||
num--; | |||
} | |||
} | |||
void | |||
bsn_type_dtor(ErlNifEnv* env, void* obj) | |||
{ | |||
bsn_res* r = (bsn_res*) obj; | |||
private_clear_all(r); | |||
enif_mutex_destroy(r->mutex); | |||
enif_free(r->list); | |||
} | |||
int | |||
on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info) | |||
{ | |||
ATOM_TRUE = enif_make_atom(env, "true"); | |||
ATOM_FALSE = enif_make_atom(env, "false"); | |||
ATOM_NO_MORE = enif_make_atom(env, "no_more"); | |||
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | | |||
ERL_NIF_RT_TAKEOVER); | |||
bsn_type = enif_open_resource_type(env, NULL, "bsn_type", | |||
bsn_type_dtor, flags, NULL); | |||
if (bsn_type == NULL) return 1; | |||
return 0; | |||
} | |||
int | |||
on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info) | |||
{ | |||
return 0; | |||
} | |||
static ErlNifFunc nif_functions[] = { | |||
{"new", 1, bsn_new}, | |||
{"add", 2, bsn_add}, | |||
{"all", 1, bsn_all}, | |||
{"in", 2, bsn_search}, | |||
{"clear", 2, bsn_clear}, | |||
{"count", 1, bsn_count}, | |||
}; | |||
ERL_NIF_INIT(bsn_int, nif_functions, &on_load, &on_load, &on_upgrade, NULL); |
@ -1,29 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/bsn_ext.so", ["bsn_ext.c"]}, | |||
{"../../priv/bsn_int.so", ["bsn_int.c"]} | |||
]}. | |||
%{port_specs, [{"../../priv/granderl.so", []}]}. | |||
%% {port_env, [ | |||
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)", | |||
%% "CFLAGS", "$CFLAGS -Ic_src/ -g -Wall -flto -Werror -O3"}, | |||
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)", | |||
%% "CXXFLAGS", "$CXXFLAGS -Ic_src/ -g -Wall -flto -Werror -O3"}, | |||
%% | |||
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)", | |||
%% "LDFLAGS", "$LDFLAGS -flto -lstdc++"}, | |||
%% | |||
%% %% OS X Leopard flags for 64-bit | |||
%% {"darwin9.*-64$", "CXXFLAGS", "-m64"}, | |||
%% {"darwin9.*-64$", "LDFLAGS", "-arch x86_64"}, | |||
%% | |||
%% %% OS X Snow Leopard flags for 32-bit | |||
%% {"darwin10.*-32$", "CXXFLAGS", "-m32"}, | |||
%% {"darwin10.*-32$", "LDFLAGS", "-arch i386"}, | |||
%% | |||
%% {"win32", "CXXFLAGS", "$CXXFLAGS /O2 /DNDEBUG"} | |||
%% ]}. | |||
@ -1,843 +0,0 @@ | |||
/* | |||
* Hash Table Data Type | |||
* Copyright (C) 1997 Kaz Kylheku <kaz@ashi.footprints.net> | |||
* | |||
* Free Software License: | |||
* | |||
* All rights are reserved by the author, with the following exceptions: | |||
* Permission is granted to freely reproduce and distribute this software, | |||
* possibly in exchange for a fee, provided that this copyright notice appears | |||
* intact. Permission is also granted to adapt this software to produce | |||
* derivative works, as long as the modified versions carry this copyright | |||
* notice and additional notices stating that the work has been modified. | |||
* This source code may be translated into executable form and incorporated | |||
* into proprietary software; there is no requirement for such software to | |||
* contain a copyright notice related to this source. | |||
* | |||
* $Id: hash.c,v 1.36.2.11 2000/11/13 01:36:45 kaz Exp $ | |||
* $Name: kazlib_1_20 $ | |||
*/ | |||
#include <stdlib.h> | |||
#include <stddef.h> | |||
#include <assert.h> | |||
#include <string.h> | |||
#define HASH_IMPLEMENTATION | |||
#include "hash.h" | |||
#ifdef KAZLIB_RCSID | |||
static const char rcsid[] = "$Id: hash.c,v 1.36.2.11 2000/11/13 01:36:45 kaz Exp $"; | |||
#endif | |||
#define INIT_BITS 6 | |||
#define INIT_SIZE (1UL << (INIT_BITS)) /* must be power of two */ | |||
#define INIT_MASK ((INIT_SIZE) - 1) | |||
#define next hash_next | |||
#define key hash_key | |||
#define data hash_data | |||
#define hkey hash_hkey | |||
#define table hash_table | |||
#define nchains hash_nchains | |||
#define nodecount hash_nodecount | |||
#define maxcount hash_maxcount | |||
#define highmark hash_highmark | |||
#define lowmark hash_lowmark | |||
#define compare hash_compare | |||
#define function hash_function | |||
#define allocnode hash_allocnode | |||
#define freenode hash_freenode | |||
#define context hash_context | |||
#define mask hash_mask | |||
#define dynamic hash_dynamic | |||
#define table hash_table | |||
#define chain hash_chain | |||
static hnode_t *kl_hnode_alloc(void *context); | |||
static void kl_hnode_free(hnode_t *node, void *context); | |||
static hash_val_t hash_fun_default(const void *key); | |||
static int hash_comp_default(const void *key1, const void *key2); | |||
int hash_val_t_bit; | |||
/* | |||
* Compute the number of bits in the hash_val_t type. We know that hash_val_t | |||
* is an unsigned integral type. Thus the highest value it can hold is a | |||
* Mersenne number (power of two, less one). We initialize a hash_val_t | |||
* object with this value and then shift bits out one by one while counting. | |||
* Notes: | |||
* 1. HASH_VAL_T_MAX is a Mersenne number---one that is one less than a power | |||
* of two. This means that its binary representation consists of all one | |||
* bits, and hence ``val'' is initialized to all one bits. | |||
* 2. While bits remain in val, we increment the bit count and shift it to the | |||
* right, replacing the topmost bit by zero. | |||
*/ | |||
static void compute_bits(void) | |||
{ | |||
hash_val_t val = HASH_VAL_T_MAX; /* 1 */ | |||
int bits = 0; | |||
while (val) { /* 2 */ | |||
bits++; | |||
val >>= 1; | |||
} | |||
hash_val_t_bit = bits; | |||
} | |||
/* | |||
* Verify whether the given argument is a power of two. | |||
*/ | |||
static int is_power_of_two(hash_val_t arg) | |||
{ | |||
if (arg == 0) | |||
return 0; | |||
while ((arg & 1) == 0) | |||
arg >>= 1; | |||
return (arg == 1); | |||
} | |||
/* | |||
* Compute a shift amount from a given table size | |||
*/ | |||
static hash_val_t compute_mask(hashcount_t size) | |||
{ | |||
assert (is_power_of_two(size)); | |||
assert (size >= 2); | |||
return size - 1; | |||
} | |||
/* | |||
* Initialize the table of pointers to null. | |||
*/ | |||
static void clear_table(hash_t *hash) | |||
{ | |||
hash_val_t i; | |||
for (i = 0; i < hash->nchains; i++) | |||
hash->table[i] = NULL; | |||
} | |||
/* | |||
* Double the size of a dynamic table. This works as follows. Each chain splits | |||
* into two adjacent chains. The shift amount increases by one, exposing an | |||
* additional bit of each hashed key. For each node in the original chain, the | |||
* value of this newly exposed bit will decide which of the two new chains will | |||
* receive the node: if the bit is 1, the chain with the higher index will have | |||
* the node, otherwise the lower chain will receive the node. In this manner, | |||
* the hash table will continue to function exactly as before without having to | |||
* rehash any of the keys. | |||
* Notes: | |||
* 1. Overflow check. | |||
* 2. The new number of chains is twice the old number of chains. | |||
* 3. The new mask is one bit wider than the previous, revealing a | |||
* new bit in all hashed keys. | |||
* 4. Allocate a new table of chain pointers that is twice as large as the | |||
* previous one. | |||
* 5. If the reallocation was successful, we perform the rest of the growth | |||
* algorithm, otherwise we do nothing. | |||
* 6. The exposed_bit variable holds a mask with which each hashed key can be | |||
* AND-ed to test the value of its newly exposed bit. | |||
* 7. Now loop over each chain in the table and sort its nodes into two | |||
* chains based on the value of each node's newly exposed hash bit. | |||
* 8. The low chain replaces the current chain. The high chain goes | |||
* into the corresponding sister chain in the upper half of the table. | |||
* 9. We have finished dealing with the chains and nodes. We now update | |||
* the various bookeeping fields of the hash structure. | |||
*/ | |||
static void grow_table(hash_t *hash) | |||
{ | |||
hnode_t **newtable; | |||
assert (2 * hash->nchains > hash->nchains); /* 1 */ | |||
newtable = realloc(hash->table, | |||
sizeof *newtable * hash->nchains * 2); /* 4 */ | |||
if (newtable) { /* 5 */ | |||
hash_val_t mask = (hash->mask << 1) | 1; /* 3 */ | |||
hash_val_t exposed_bit = mask ^ hash->mask; /* 6 */ | |||
hash_val_t chain; | |||
assert (mask != hash->mask); | |||
for (chain = 0; chain < hash->nchains; chain++) { /* 7 */ | |||
hnode_t *low_chain = 0, *high_chain = 0, *hptr, *next; | |||
for (hptr = newtable[chain]; hptr != 0; hptr = next) { | |||
next = hptr->next; | |||
if (hptr->hkey & exposed_bit) { | |||
hptr->next = high_chain; | |||
high_chain = hptr; | |||
} else { | |||
hptr->next = low_chain; | |||
low_chain = hptr; | |||
} | |||
} | |||
newtable[chain] = low_chain; /* 8 */ | |||
newtable[chain + hash->nchains] = high_chain; | |||
} | |||
hash->table = newtable; /* 9 */ | |||
hash->mask = mask; | |||
hash->nchains *= 2; | |||
hash->lowmark *= 2; | |||
hash->highmark *= 2; | |||
} | |||
assert (kl_hash_verify(hash)); | |||
} | |||
/* | |||
* Cut a table size in half. This is done by folding together adjacent chains | |||
* and populating the lower half of the table with these chains. The chains are | |||
* simply spliced together. Once this is done, the whole table is reallocated | |||
* to a smaller object. | |||
* Notes: | |||
* 1. It is illegal to have a hash table with one slot. This would mean that | |||
* hash->shift is equal to hash_val_t_bit, an illegal shift value. | |||
* Also, other things could go wrong, such as hash->lowmark becoming zero. | |||
* 2. Looping over each pair of sister chains, the low_chain is set to | |||
* point to the head node of the chain in the lower half of the table, | |||
* and high_chain points to the head node of the sister in the upper half. | |||
* 3. The intent here is to compute a pointer to the last node of the | |||
* lower chain into the low_tail variable. If this chain is empty, | |||
* low_tail ends up with a null value. | |||
* 4. If the lower chain is not empty, we simply tack the upper chain onto it. | |||
* If the upper chain is a null pointer, nothing happens. | |||
* 5. Otherwise if the lower chain is empty but the upper one is not, | |||
* If the low chain is empty, but the high chain is not, then the | |||
* high chain is simply transferred to the lower half of the table. | |||
* 6. Otherwise if both chains are empty, there is nothing to do. | |||
* 7. All the chain pointers are in the lower half of the table now, so | |||
* we reallocate it to a smaller object. This, of course, invalidates | |||
* all pointer-to-pointers which reference into the table from the | |||
* first node of each chain. | |||
* 8. Though it's unlikely, the reallocation may fail. In this case we | |||
* pretend that the table _was_ reallocated to a smaller object. | |||
* 9. Finally, update the various table parameters to reflect the new size. | |||
*/ | |||
static void shrink_table(hash_t *hash) | |||
{ | |||
hash_val_t chain, nchains; | |||
hnode_t **newtable, *low_tail, *low_chain, *high_chain; | |||
assert (hash->nchains >= 2); /* 1 */ | |||
nchains = hash->nchains / 2; | |||
for (chain = 0; chain < nchains; chain++) { | |||
low_chain = hash->table[chain]; /* 2 */ | |||
high_chain = hash->table[chain + nchains]; | |||
for (low_tail = low_chain; low_tail && low_tail->next; low_tail = low_tail->next) | |||
; /* 3 */ | |||
if (low_chain != 0) /* 4 */ | |||
low_tail->next = high_chain; | |||
else if (high_chain != 0) /* 5 */ | |||
hash->table[chain] = high_chain; | |||
else | |||
assert (hash->table[chain] == NULL); /* 6 */ | |||
} | |||
newtable = realloc(hash->table, | |||
sizeof *newtable * nchains); /* 7 */ | |||
if (newtable) /* 8 */ | |||
hash->table = newtable; | |||
hash->mask >>= 1; /* 9 */ | |||
hash->nchains = nchains; | |||
hash->lowmark /= 2; | |||
hash->highmark /= 2; | |||
assert (kl_hash_verify(hash)); | |||
} | |||
/* | |||
* Create a dynamic hash table. Both the hash table structure and the table | |||
* itself are dynamically allocated. Furthermore, the table is extendible in | |||
* that it will automatically grow as its load factor increases beyond a | |||
* certain threshold. | |||
* Notes: | |||
* 1. If the number of bits in the hash_val_t type has not been computed yet, | |||
* we do so here, because this is likely to be the first function that the | |||
* user calls. | |||
* 2. Allocate a hash table control structure. | |||
* 3. If a hash table control structure is successfully allocated, we | |||
* proceed to initialize it. Otherwise we return a null pointer. | |||
* 4. We try to allocate the table of hash chains. | |||
* 5. If we were able to allocate the hash chain table, we can finish | |||
* initializing the hash structure and the table. Otherwise, we must | |||
* backtrack by freeing the hash structure. | |||
* 6. INIT_SIZE should be a power of two. The high and low marks are always set | |||
* to be twice the table size and half the table size respectively. When the | |||
* number of nodes in the table grows beyond the high size (beyond load | |||
* factor 2), it will double in size to cut the load factor down to about | |||
* about 1. If the table shrinks down to or beneath load factor 0.5, | |||
* it will shrink, bringing the load up to about 1. However, the table | |||
* will never shrink beneath INIT_SIZE even if it's emptied. | |||
* 7. This indicates that the table is dynamically allocated and dynamically | |||
* resized on the fly. A table that has this value set to zero is | |||
* assumed to be statically allocated and will not be resized. | |||
* 8. The table of chains must be properly reset to all null pointers. | |||
*/ | |||
hash_t *kl_hash_create(hashcount_t maxcount, hash_comp_t compfun, | |||
hash_fun_t hashfun) | |||
{ | |||
hash_t *hash; | |||
if (hash_val_t_bit == 0) /* 1 */ | |||
compute_bits(); | |||
hash = malloc(sizeof *hash); /* 2 */ | |||
if (hash) { /* 3 */ | |||
hash->table = malloc(sizeof *hash->table * INIT_SIZE); /* 4 */ | |||
if (hash->table) { /* 5 */ | |||
hash->nchains = INIT_SIZE; /* 6 */ | |||
hash->highmark = INIT_SIZE * 2; | |||
hash->lowmark = INIT_SIZE / 2; | |||
hash->nodecount = 0; | |||
hash->maxcount = maxcount; | |||
hash->compare = compfun ? compfun : hash_comp_default; | |||
hash->function = hashfun ? hashfun : hash_fun_default; | |||
hash->allocnode = kl_hnode_alloc; | |||
hash->freenode = kl_hnode_free; | |||
hash->context = NULL; | |||
hash->mask = INIT_MASK; | |||
hash->dynamic = 1; /* 7 */ | |||
clear_table(hash); /* 8 */ | |||
assert (kl_hash_verify(hash)); | |||
return hash; | |||
} | |||
free(hash); | |||
} | |||
return NULL; | |||
} | |||
/* | |||
* Select a different set of node allocator routines. | |||
*/ | |||
void kl_hash_set_allocator(hash_t *hash, hnode_alloc_t al, | |||
hnode_free_t fr, void *context) | |||
{ | |||
assert (kl_hash_count(hash) == 0); | |||
assert ((al == 0 && fr == 0) || (al != 0 && fr != 0)); | |||
hash->allocnode = al ? al : kl_hnode_alloc; | |||
hash->freenode = fr ? fr : kl_hnode_free; | |||
hash->context = context; | |||
} | |||
/* | |||
* Free every node in the hash using the hash->freenode() function pointer, and | |||
* cause the hash to become empty. | |||
*/ | |||
void kl_hash_free_nodes(hash_t *hash) | |||
{ | |||
hscan_t hs; | |||
hnode_t *node; | |||
kl_hash_scan_begin(&hs, hash); | |||
while ((node = kl_hash_scan_next(&hs))) { | |||
kl_hash_scan_delete(hash, node); | |||
hash->freenode(node, hash->context); | |||
} | |||
hash->nodecount = 0; | |||
clear_table(hash); | |||
} | |||
/* | |||
* Obsolescent function for removing all nodes from a table, | |||
* freeing them and then freeing the table all in one step. | |||
*/ | |||
void kl_hash_free(hash_t *hash) | |||
{ | |||
#ifdef KAZLIB_OBSOLESCENT_DEBUG | |||
assert ("call to obsolescent function hash_free()" && 0); | |||
#endif | |||
kl_hash_free_nodes(hash); | |||
kl_hash_destroy(hash); | |||
} | |||
/* | |||
* Free a dynamic hash table structure. | |||
*/ | |||
void kl_hash_destroy(hash_t *hash) | |||
{ | |||
assert (hash_val_t_bit != 0); | |||
assert (kl_hash_isempty(hash)); | |||
free(hash->table); | |||
free(hash); | |||
} | |||
/* | |||
* Initialize a user supplied hash structure. The user also supplies a table of | |||
* chains which is assigned to the hash structure. The table is static---it | |||
* will not grow or shrink. | |||
* 1. See note 1. in hash_create(). | |||
* 2. The user supplied array of pointers hopefully contains nchains nodes. | |||
* 3. See note 7. in hash_create(). | |||
* 4. We must dynamically compute the mask from the given power of two table | |||
* size. | |||
* 5. The user supplied table can't be assumed to contain null pointers, | |||
* so we reset it here. | |||
*/ | |||
hash_t *kl_hash_init(hash_t *hash, hashcount_t maxcount, | |||
hash_comp_t compfun, hash_fun_t hashfun, hnode_t **table, | |||
hashcount_t nchains) | |||
{ | |||
if (hash_val_t_bit == 0) /* 1 */ | |||
compute_bits(); | |||
assert (is_power_of_two(nchains)); | |||
hash->table = table; /* 2 */ | |||
hash->nchains = nchains; | |||
hash->nodecount = 0; | |||
hash->maxcount = maxcount; | |||
hash->compare = compfun ? compfun : hash_comp_default; | |||
hash->function = hashfun ? hashfun : hash_fun_default; | |||
hash->dynamic = 0; /* 3 */ | |||
hash->mask = compute_mask(nchains); /* 4 */ | |||
clear_table(hash); /* 5 */ | |||
assert (kl_hash_verify(hash)); | |||
return hash; | |||
} | |||
/* | |||
* Reset the hash scanner so that the next element retrieved by | |||
* hash_scan_next() shall be the first element on the first non-empty chain. | |||
* Notes: | |||
* 1. Locate the first non empty chain. | |||
* 2. If an empty chain is found, remember which one it is and set the next | |||
* pointer to refer to its first element. | |||
* 3. Otherwise if a chain is not found, set the next pointer to NULL | |||
* so that hash_scan_next() shall indicate failure. | |||
*/ | |||
void kl_hash_scan_begin(hscan_t *scan, hash_t *hash) | |||
{ | |||
hash_val_t nchains = hash->nchains; | |||
hash_val_t chain; | |||
scan->table = hash; | |||
/* 1 */ | |||
for (chain = 0; chain < nchains && hash->table[chain] == 0; chain++) | |||
; | |||
if (chain < nchains) { /* 2 */ | |||
scan->chain = chain; | |||
scan->next = hash->table[chain]; | |||
} else { /* 3 */ | |||
scan->next = NULL; | |||
} | |||
} | |||
/* | |||
* Retrieve the next node from the hash table, and update the pointer | |||
* for the next invocation of hash_scan_next(). | |||
* Notes: | |||
* 1. Remember the next pointer in a temporary value so that it can be | |||
* returned. | |||
* 2. This assertion essentially checks whether the module has been properly | |||
* initialized. The first point of interaction with the module should be | |||
* either hash_create() or hash_init(), both of which set hash_val_t_bit to | |||
* a non zero value. | |||
* 3. If the next pointer we are returning is not NULL, then the user is | |||
* allowed to call hash_scan_next() again. We prepare the new next pointer | |||
* for that call right now. That way the user is allowed to delete the node | |||
* we are about to return, since we will no longer be needing it to locate | |||
* the next node. | |||
* 4. If there is a next node in the chain (next->next), then that becomes the | |||
* new next node, otherwise ... | |||
* 5. We have exhausted the current chain, and must locate the next subsequent | |||
* non-empty chain in the table. | |||
* 6. If a non-empty chain is found, the first element of that chain becomes | |||
* the new next node. Otherwise there is no new next node and we set the | |||
* pointer to NULL so that the next time hash_scan_next() is called, a null | |||
* pointer shall be immediately returned. | |||
*/ | |||
hnode_t *kl_hash_scan_next(hscan_t *scan) | |||
{ | |||
hnode_t *next = scan->next; /* 1 */ | |||
hash_t *hash = scan->table; | |||
hash_val_t chain = scan->chain + 1; | |||
hash_val_t nchains = hash->nchains; | |||
assert (hash_val_t_bit != 0); /* 2 */ | |||
if (next) { /* 3 */ | |||
if (next->next) { /* 4 */ | |||
scan->next = next->next; | |||
} else { | |||
while (chain < nchains && hash->table[chain] == 0) /* 5 */ | |||
chain++; | |||
if (chain < nchains) { /* 6 */ | |||
scan->chain = chain; | |||
scan->next = hash->table[chain]; | |||
} else { | |||
scan->next = NULL; | |||
} | |||
} | |||
} | |||
return next; | |||
} | |||
/* | |||
* Insert a node into the hash table. | |||
* Notes: | |||
* 1. It's illegal to insert more than the maximum number of nodes. The client | |||
* should verify that the hash table is not full before attempting an | |||
* insertion. | |||
* 2. The same key may not be inserted into a table twice. | |||
* 3. If the table is dynamic and the load factor is already at >= 2, | |||
* grow the table. | |||
* 4. We take the bottom N bits of the hash value to derive the chain index, | |||
* where N is the base 2 logarithm of the size of the hash table. | |||
*/ | |||
void kl_hash_insert(hash_t *hash, hnode_t *node, const void *key) | |||
{ | |||
hash_val_t hkey, chain; | |||
assert (hash_val_t_bit != 0); | |||
assert (node->next == NULL); | |||
assert (hash->nodecount < hash->maxcount); /* 1 */ | |||
assert (kl_hash_lookup(hash, key) == NULL); /* 2 */ | |||
if (hash->dynamic && hash->nodecount >= hash->highmark) /* 3 */ | |||
grow_table(hash); | |||
hkey = hash->function(key); | |||
chain = hkey & hash->mask; /* 4 */ | |||
node->key = key; | |||
node->hkey = hkey; | |||
node->next = hash->table[chain]; | |||
hash->table[chain] = node; | |||
hash->nodecount++; | |||
assert (kl_hash_verify(hash)); | |||
} | |||
/* | |||
* Find a node in the hash table and return a pointer to it. | |||
* Notes: | |||
* 1. We hash the key and keep the entire hash value. As an optimization, when | |||
* we descend down the chain, we can compare hash values first and only if | |||
* hash values match do we perform a full key comparison. | |||
* 2. To locate the chain from among 2^N chains, we look at the lower N bits of | |||
* the hash value by anding them with the current mask. | |||
* 3. Looping through the chain, we compare the stored hash value inside each | |||
* node against our computed hash. If they match, then we do a full | |||
* comparison between the unhashed keys. If these match, we have located the | |||
* entry. | |||
*/ | |||
hnode_t *kl_hash_lookup(hash_t *hash, const void *key) | |||
{ | |||
hash_val_t hkey, chain; | |||
hnode_t *nptr; | |||
hkey = hash->function(key); /* 1 */ | |||
chain = hkey & hash->mask; /* 2 */ | |||
for (nptr = hash->table[chain]; nptr; nptr = nptr->next) { /* 3 */ | |||
if (nptr->hkey == hkey && hash->compare(nptr->key, key) == 0) | |||
return nptr; | |||
} | |||
return NULL; | |||
} | |||
/* | |||
* Delete the given node from the hash table. Since the chains | |||
* are singly linked, we must locate the start of the node's chain | |||
* and traverse. | |||
* Notes: | |||
* 1. The node must belong to this hash table, and its key must not have | |||
* been tampered with. | |||
* 2. If this deletion will take the node count below the low mark, we | |||
* shrink the table now. | |||
* 3. Determine which chain the node belongs to, and fetch the pointer | |||
* to the first node in this chain. | |||
* 4. If the node being deleted is the first node in the chain, then | |||
* simply update the chain head pointer. | |||
* 5. Otherwise advance to the node's predecessor, and splice out | |||
* by updating the predecessor's next pointer. | |||
* 6. Indicate that the node is no longer in a hash table. | |||
*/ | |||
hnode_t *kl_hash_delete(hash_t *hash, hnode_t *node) | |||
{ | |||
hash_val_t chain; | |||
hnode_t *hptr; | |||
assert (kl_hash_lookup(hash, node->key) == node); /* 1 */ | |||
assert (hash_val_t_bit != 0); | |||
if (hash->dynamic && hash->nodecount <= hash->lowmark | |||
&& hash->nodecount > INIT_SIZE) | |||
shrink_table(hash); /* 2 */ | |||
chain = node->hkey & hash->mask; /* 3 */ | |||
hptr = hash->table[chain]; | |||
if (hptr == node) { /* 4 */ | |||
hash->table[chain] = node->next; | |||
} else { | |||
while (hptr->next != node) { /* 5 */ | |||
assert (hptr != 0); | |||
hptr = hptr->next; | |||
} | |||
assert (hptr->next == node); | |||
hptr->next = node->next; | |||
} | |||
hash->nodecount--; | |||
assert (kl_hash_verify(hash)); | |||
node->next = NULL; /* 6 */ | |||
return node; | |||
} | |||
int kl_hash_alloc_insert(hash_t *hash, const void *key, void *data) | |||
{ | |||
hnode_t *node = hash->allocnode(hash->context); | |||
if (node) { | |||
kl_hnode_init(node, data); | |||
kl_hash_insert(hash, node, key); | |||
return 1; | |||
} | |||
return 0; | |||
} | |||
void kl_hash_delete_free(hash_t *hash, hnode_t *node) | |||
{ | |||
kl_hash_delete(hash, node); | |||
hash->freenode(node, hash->context); | |||
} | |||
/* | |||
* Exactly like hash_delete, except does not trigger table shrinkage. This is to be | |||
* used from within a hash table scan operation. See notes for hash_delete. | |||
*/ | |||
hnode_t *kl_hash_scan_delete(hash_t *hash, hnode_t *node) | |||
{ | |||
hash_val_t chain; | |||
hnode_t *hptr; | |||
assert (kl_hash_lookup(hash, node->key) == node); | |||
assert (hash_val_t_bit != 0); | |||
chain = node->hkey & hash->mask; | |||
hptr = hash->table[chain]; | |||
if (hptr == node) { | |||
hash->table[chain] = node->next; | |||
} else { | |||
while (hptr->next != node) | |||
hptr = hptr->next; | |||
hptr->next = node->next; | |||
} | |||
hash->nodecount--; | |||
assert (kl_hash_verify(hash)); | |||
node->next = NULL; | |||
return node; | |||
} | |||
/* | |||
* Like hash_delete_free but based on hash_scan_delete. | |||
*/ | |||
void kl_hash_scan_delfree(hash_t *hash, hnode_t *node) | |||
{ | |||
kl_hash_scan_delete(hash, node); | |||
hash->freenode(node, hash->context); | |||
} | |||
/* | |||
* Verify whether the given object is a valid hash table. This means | |||
* Notes: | |||
* 1. If the hash table is dynamic, verify whether the high and | |||
* low expansion/shrinkage thresholds are powers of two. | |||
* 2. Count all nodes in the table, and test each hash value | |||
* to see whether it is correct for the node's chain. | |||
*/ | |||
int kl_hash_verify(hash_t *hash) | |||
{ | |||
hashcount_t count = 0; | |||
hash_val_t chain; | |||
hnode_t *hptr; | |||
if (hash->dynamic) { /* 1 */ | |||
if (hash->lowmark >= hash->highmark) | |||
return 0; | |||
if (!is_power_of_two(hash->highmark)) | |||
return 0; | |||
if (!is_power_of_two(hash->lowmark)) | |||
return 0; | |||
} | |||
for (chain = 0; chain < hash->nchains; chain++) { /* 2 */ | |||
for (hptr = hash->table[chain]; hptr != 0; hptr = hptr->next) { | |||
if ((hptr->hkey & hash->mask) != chain) | |||
return 0; | |||
count++; | |||
} | |||
} | |||
if (count != hash->nodecount) | |||
return 0; | |||
return 1; | |||
} | |||
/* | |||
* Test whether the hash table is full and return 1 if this is true, | |||
* 0 if it is false. | |||
*/ | |||
#undef kl_hash_isfull | |||
int kl_hash_isfull(hash_t *hash) | |||
{ | |||
return hash->nodecount == hash->maxcount; | |||
} | |||
/* | |||
* Test whether the hash table is empty and return 1 if this is true, | |||
* 0 if it is false. | |||
*/ | |||
#undef kl_hash_isempty | |||
int kl_hash_isempty(hash_t *hash) | |||
{ | |||
return hash->nodecount == 0; | |||
} | |||
static hnode_t *kl_hnode_alloc(void *context) | |||
{ | |||
return malloc(sizeof *kl_hnode_alloc(NULL)); | |||
} | |||
static void kl_hnode_free(hnode_t *node, void *context) | |||
{ | |||
free(node); | |||
} | |||
/* | |||
* Create a hash table node dynamically and assign it the given data. | |||
*/ | |||
hnode_t *kl_hnode_create(void *data) | |||
{ | |||
hnode_t *node = malloc(sizeof *node); | |||
if (node) { | |||
node->data = data; | |||
node->next = NULL; | |||
} | |||
return node; | |||
} | |||
/* | |||
* Initialize a client-supplied node | |||
*/ | |||
hnode_t *kl_hnode_init(hnode_t *hnode, void *data) | |||
{ | |||
hnode->data = data; | |||
hnode->next = NULL; | |||
return hnode; | |||
} | |||
/* | |||
* Destroy a dynamically allocated node. | |||
*/ | |||
void kl_hnode_destroy(hnode_t *hnode) | |||
{ | |||
free(hnode); | |||
} | |||
#undef kl_hnode_put | |||
void kl_hnode_put(hnode_t *node, void *data) | |||
{ | |||
node->data = data; | |||
} | |||
#undef kl_hnode_get | |||
void *kl_hnode_get(hnode_t *node) | |||
{ | |||
return node->data; | |||
} | |||
#undef kl_hnode_getkey | |||
const void *kl_hnode_getkey(hnode_t *node) | |||
{ | |||
return node->key; | |||
} | |||
#undef kl_hash_count | |||
hashcount_t kl_hash_count(hash_t *hash) | |||
{ | |||
return hash->nodecount; | |||
} | |||
#undef kl_hash_size | |||
hashcount_t kl_hash_size(hash_t *hash) | |||
{ | |||
return hash->nchains; | |||
} | |||
static hash_val_t hash_fun_default(const void *key) | |||
{ | |||
static unsigned long randbox[] = { | |||
0x49848f1bU, 0xe6255dbaU, 0x36da5bdcU, 0x47bf94e9U, | |||
0x8cbcce22U, 0x559fc06aU, 0xd268f536U, 0xe10af79aU, | |||
0xc1af4d69U, 0x1d2917b5U, 0xec4c304dU, 0x9ee5016cU, | |||
0x69232f74U, 0xfead7bb3U, 0xe9089ab6U, 0xf012f6aeU, | |||
}; | |||
const unsigned char *str = key; | |||
hash_val_t acc = 0; | |||
while (*str) { | |||
acc ^= randbox[(*str + acc) & 0xf]; | |||
acc = (acc << 1) | (acc >> 31); | |||
acc &= 0xffffffffU; | |||
acc ^= randbox[((*str++ >> 4) + acc) & 0xf]; | |||
acc = (acc << 2) | (acc >> 30); | |||
acc &= 0xffffffffU; | |||
} | |||
return acc; | |||
} | |||
static int hash_comp_default(const void *key1, const void *key2) | |||
{ | |||
return strcmp(key1, key2); | |||
} |
@ -1,240 +0,0 @@ | |||
/* | |||
* Hash Table Data Type | |||
* Copyright (C) 1997 Kaz Kylheku <kaz@ashi.footprints.net> | |||
* | |||
* Free Software License: | |||
* | |||
* All rights are reserved by the author, with the following exceptions: | |||
* Permission is granted to freely reproduce and distribute this software, | |||
* possibly in exchange for a fee, provided that this copyright notice appears | |||
* intact. Permission is also granted to adapt this software to produce | |||
* derivative works, as long as the modified versions carry this copyright | |||
* notice and additional notices stating that the work has been modified. | |||
* This source code may be translated into executable form and incorporated | |||
* into proprietary software; there is no requirement for such software to | |||
* contain a copyright notice related to this source. | |||
* | |||
* $Id: hash.h,v 1.22.2.7 2000/11/13 01:36:45 kaz Exp $ | |||
* $Name: kazlib_1_20 $ | |||
*/ | |||
#ifndef HASH_H | |||
#define HASH_H | |||
#include <limits.h> | |||
#ifdef KAZLIB_SIDEEFFECT_DEBUG | |||
#include "sfx.h" | |||
#endif | |||
/* | |||
* Blurb for inclusion into C++ translation units | |||
*/ | |||
#ifdef __cplusplus | |||
extern "C" { | |||
#endif | |||
typedef unsigned long hashcount_t; | |||
#define HASHCOUNT_T_MAX ULONG_MAX | |||
typedef unsigned long hash_val_t; | |||
#define HASH_VAL_T_MAX ULONG_MAX | |||
extern int hash_val_t_bit; | |||
#ifndef HASH_VAL_T_BIT | |||
#define HASH_VAL_T_BIT ((int) hash_val_t_bit) | |||
#endif | |||
/* | |||
* Hash chain node structure. | |||
* Notes: | |||
* 1. This preprocessing directive is for debugging purposes. The effect is | |||
* that if the preprocessor symbol KAZLIB_OPAQUE_DEBUG is defined prior to the | |||
* inclusion of this header, then the structure shall be declared as having | |||
* the single member int __OPAQUE__. This way, any attempts by the | |||
* client code to violate the principles of information hiding (by accessing | |||
* the structure directly) can be diagnosed at translation time. However, | |||
* note the resulting compiled unit is not suitable for linking. | |||
* 2. This is a pointer to the next node in the chain. In the last node of a | |||
* chain, this pointer is null. | |||
* 3. The key is a pointer to some user supplied data that contains a unique | |||
* identifier for each hash node in a given table. The interpretation of | |||
* the data is up to the user. When creating or initializing a hash table, | |||
* the user must supply a pointer to a function for comparing two keys, | |||
* and a pointer to a function for hashing a key into a numeric value. | |||
* 4. The value is a user-supplied pointer to void which may refer to | |||
* any data object. It is not interpreted in any way by the hashing | |||
* module. | |||
* 5. The hashed key is stored in each node so that we don't have to rehash | |||
* each key when the table must grow or shrink. | |||
*/ | |||
typedef struct hnode_t { | |||
#if defined(HASH_IMPLEMENTATION) || !defined(KAZLIB_OPAQUE_DEBUG) /* 1 */ | |||
struct hnode_t *hash_next; /* 2 */ | |||
const void *hash_key; /* 3 */ | |||
void *hash_data; /* 4 */ | |||
hash_val_t hash_hkey; /* 5 */ | |||
#else | |||
int hash_dummy; | |||
#endif | |||
} hnode_t; | |||
/* | |||
* The comparison function pointer type. A comparison function takes two keys | |||
* and produces a value of -1 if the left key is less than the right key, a | |||
* value of 0 if the keys are equal, and a value of 1 if the left key is | |||
* greater than the right key. | |||
*/ | |||
typedef int (*hash_comp_t)(const void *, const void *); | |||
/* | |||
* The hashing function performs some computation on a key and produces an | |||
* integral value of type hash_val_t based on that key. For best results, the | |||
* function should have a good randomness properties in *all* significant bits | |||
* over the set of keys that are being inserted into a given hash table. In | |||
* particular, the most significant bits of hash_val_t are most significant to | |||
* the hash module. Only as the hash table expands are less significant bits | |||
* examined. Thus a function that has good distribution in its upper bits but | |||
* not lower is preferrable to one that has poor distribution in the upper bits | |||
* but not the lower ones. | |||
*/ | |||
typedef hash_val_t (*hash_fun_t)(const void *); | |||
/* | |||
* allocator functions | |||
*/ | |||
typedef hnode_t *(*hnode_alloc_t)(void *); | |||
typedef void (*hnode_free_t)(hnode_t *, void *); | |||
/* | |||
* This is the hash table control structure. It keeps track of information | |||
* about a hash table, as well as the hash table itself. | |||
* Notes: | |||
* 1. Pointer to the hash table proper. The table is an array of pointers to | |||
* hash nodes (of type hnode_t). If the table is empty, every element of | |||
* this table is a null pointer. A non-null entry points to the first | |||
* element of a chain of nodes. | |||
* 2. This member keeps track of the size of the hash table---that is, the | |||
* number of chain pointers. | |||
* 3. The count member maintains the number of elements that are presently | |||
* in the hash table. | |||
* 4. The maximum count is the greatest number of nodes that can populate this | |||
* table. If the table contains this many nodes, no more can be inserted, | |||
* and the hash_isfull() function returns true. | |||
* 5. The high mark is a population threshold, measured as a number of nodes, | |||
* which, if exceeded, will trigger a table expansion. Only dynamic hash | |||
* tables are subject to this expansion. | |||
* 6. The low mark is a minimum population threshold, measured as a number of | |||
* nodes. If the table population drops below this value, a table shrinkage | |||
* will occur. Only dynamic tables are subject to this reduction. No table | |||
* will shrink beneath a certain absolute minimum number of nodes. | |||
* 7. This is the a pointer to the hash table's comparison function. The | |||
* function is set once at initialization or creation time. | |||
* 8. Pointer to the table's hashing function, set once at creation or | |||
* initialization time. | |||
* 9. The current hash table mask. If the size of the hash table is 2^N, | |||
* this value has its low N bits set to 1, and the others clear. It is used | |||
* to select bits from the result of the hashing function to compute an | |||
* index into the table. | |||
* 10. A flag which indicates whether the table is to be dynamically resized. It | |||
* is set to 1 in dynamically allocated tables, 0 in tables that are | |||
* statically allocated. | |||
*/ | |||
typedef struct hash_t { | |||
#if defined(HASH_IMPLEMENTATION) || !defined(KAZLIB_OPAQUE_DEBUG) | |||
struct hnode_t **hash_table; /* 1 */ | |||
hashcount_t hash_nchains; /* 2 */ | |||
hashcount_t hash_nodecount; /* 3 */ | |||
hashcount_t hash_maxcount; /* 4 */ | |||
hashcount_t hash_highmark; /* 5 */ | |||
hashcount_t hash_lowmark; /* 6 */ | |||
hash_comp_t hash_compare; /* 7 */ | |||
hash_fun_t hash_function; /* 8 */ | |||
hnode_alloc_t hash_allocnode; | |||
hnode_free_t hash_freenode; | |||
void *hash_context; | |||
hash_val_t hash_mask; /* 9 */ | |||
int hash_dynamic; /* 10 */ | |||
#else | |||
int hash_dummy; | |||
#endif | |||
} hash_t; | |||
/* | |||
* Hash scanner structure, used for traversals of the data structure. | |||
* Notes: | |||
* 1. Pointer to the hash table that is being traversed. | |||
* 2. Reference to the current chain in the table being traversed (the chain | |||
* that contains the next node that shall be retrieved). | |||
* 3. Pointer to the node that will be retrieved by the subsequent call to | |||
* hash_scan_next(). | |||
*/ | |||
typedef struct hscan_t { | |||
#if defined(HASH_IMPLEMENTATION) || !defined(KAZLIB_OPAQUE_DEBUG) | |||
hash_t *hash_table; /* 1 */ | |||
hash_val_t hash_chain; /* 2 */ | |||
hnode_t *hash_next; /* 3 */ | |||
#else | |||
int hash_dummy; | |||
#endif | |||
} hscan_t; | |||
extern hash_t *kl_hash_create(hashcount_t, hash_comp_t, hash_fun_t); | |||
extern void kl_hash_set_allocator(hash_t *, hnode_alloc_t, hnode_free_t, void *); | |||
extern void kl_hash_destroy(hash_t *); | |||
extern void kl_hash_free_nodes(hash_t *); | |||
extern void kl_hash_free(hash_t *); | |||
extern hash_t *kl_hash_init(hash_t *, hashcount_t, hash_comp_t, | |||
hash_fun_t, hnode_t **, hashcount_t); | |||
extern void kl_hash_insert(hash_t *, hnode_t *, const void *); | |||
extern hnode_t *kl_hash_lookup(hash_t *, const void *); | |||
extern hnode_t *kl_hash_delete(hash_t *, hnode_t *); | |||
extern int kl_hash_alloc_insert(hash_t *, const void *, void *); | |||
extern void kl_hash_delete_free(hash_t *, hnode_t *); | |||
extern void kl_hnode_put(hnode_t *, void *); | |||
extern void *kl_hnode_get(hnode_t *); | |||
extern const void *kl_hnode_getkey(hnode_t *); | |||
extern hashcount_t kl_hash_count(hash_t *); | |||
extern hashcount_t kl_hash_size(hash_t *); | |||
extern int kl_hash_isfull(hash_t *); | |||
extern int kl_hash_isempty(hash_t *); | |||
extern void kl_hash_scan_begin(hscan_t *, hash_t *); | |||
extern hnode_t *kl_hash_scan_next(hscan_t *); | |||
extern hnode_t *kl_hash_scan_delete(hash_t *, hnode_t *); | |||
extern void kl_hash_scan_delfree(hash_t *, hnode_t *); | |||
extern int kl_hash_verify(hash_t *); | |||
extern hnode_t *kl_hnode_create(void *); | |||
extern hnode_t *kl_hnode_init(hnode_t *, void *); | |||
extern void kl_hnode_destroy(hnode_t *); | |||
#if defined(HASH_IMPLEMENTATION) || !defined(KAZLIB_OPAQUE_DEBUG) | |||
#ifdef KAZLIB_SIDEEFFECT_DEBUG | |||
#define kl_hash_isfull(H) (SFX_CHECK(H)->hash_nodecount == (H)->hash_maxcount) | |||
#else | |||
#define kl_hash_isfull(H) ((H)->hash_nodecount == (H)->hash_maxcount) | |||
#endif | |||
#define kl_hash_isempty(H) ((H)->hash_nodecount == 0) | |||
#define kl_hash_count(H) ((H)->hash_nodecount) | |||
#define kl_hash_size(H) ((H)->hash_nchains) | |||
#define kl_hnode_get(N) ((N)->hash_data) | |||
#define kl_hnode_getkey(N) ((N)->hash_key) | |||
#define kl_hnode_put(N, V) ((N)->hash_data = (V)) | |||
#endif | |||
#ifdef __cplusplus | |||
} | |||
#endif | |||
#endif |
@ -1,658 +0,0 @@ | |||
// This file is part of khash released under the MIT license. | |||
// See the LICENSE file for more information. | |||
// Copyright 2013 Cloudant, Inc <support@cloudant.com> | |||
#include <assert.h> | |||
#include <string.h> | |||
#include <stdint.h> | |||
#include "erl_nif.h" | |||
#include "hash.h" | |||
#ifdef _WIN32 | |||
#define INLINE __inline | |||
#else | |||
#define INLINE inline | |||
#endif | |||
#define KHASH_VERSION 0 | |||
typedef struct | |||
{ | |||
ERL_NIF_TERM atom_ok; | |||
ERL_NIF_TERM atom_error; | |||
ERL_NIF_TERM atom_value; | |||
ERL_NIF_TERM atom_not_found; | |||
ERL_NIF_TERM atom_end_of_table; | |||
ERL_NIF_TERM atom_expired_iterator; | |||
ErlNifResourceType* res_hash; | |||
ErlNifResourceType* res_iter; | |||
} khash_priv; | |||
typedef struct | |||
{ | |||
unsigned int hval; | |||
ErlNifEnv* env; | |||
ERL_NIF_TERM key; | |||
ERL_NIF_TERM val; | |||
} khnode_t; | |||
typedef struct | |||
{ | |||
int version; | |||
unsigned int gen; | |||
hash_t* h; | |||
ErlNifPid p; | |||
} khash_t; | |||
typedef struct | |||
{ | |||
int version; | |||
unsigned int gen; | |||
khash_t* khash; | |||
hscan_t scan; | |||
} khash_iter_t; | |||
static INLINE ERL_NIF_TERM | |||
make_atom(ErlNifEnv* env, const char* name) | |||
{ | |||
ERL_NIF_TERM ret; | |||
if(enif_make_existing_atom(env, name, &ret, ERL_NIF_LATIN1)) { | |||
return ret; | |||
} | |||
return enif_make_atom(env, name); | |||
} | |||
static INLINE ERL_NIF_TERM | |||
make_ok(ErlNifEnv* env, khash_priv* priv, ERL_NIF_TERM value) | |||
{ | |||
return enif_make_tuple2(env, priv->atom_ok, value); | |||
} | |||
static INLINE ERL_NIF_TERM | |||
make_error(ErlNifEnv* env, khash_priv* priv, ERL_NIF_TERM reason) | |||
{ | |||
return enif_make_tuple2(env, priv->atom_error, reason); | |||
} | |||
static INLINE int | |||
check_pid(ErlNifEnv* env, khash_t* khash) | |||
{ | |||
ErlNifPid pid; | |||
enif_self(env, &pid); | |||
if(enif_compare(pid.pid, khash->p.pid) == 0) { | |||
return 1; | |||
} | |||
return 0; | |||
} | |||
hnode_t* | |||
khnode_alloc(void* ctx) | |||
{ | |||
hnode_t* ret = (hnode_t*) enif_alloc(sizeof(hnode_t)); | |||
khnode_t* node = (khnode_t*) enif_alloc(sizeof(khnode_t)); | |||
memset(ret, '\0', sizeof(hnode_t)); | |||
memset(node, '\0', sizeof(khnode_t)); | |||
node->env = enif_alloc_env(); | |||
ret->hash_key = node; | |||
return ret; | |||
} | |||
void | |||
khnode_free(hnode_t* obj, void* ctx) | |||
{ | |||
khnode_t* node = (khnode_t*) kl_hnode_getkey(obj); | |||
enif_free_env(node->env); | |||
enif_free(node); | |||
enif_free(obj); | |||
return; | |||
} | |||
int | |||
khash_cmp_fun(const void* l, const void* r) | |||
{ | |||
khnode_t* left = (khnode_t*) l; | |||
khnode_t* right = (khnode_t*) r; | |||
int cmp = enif_compare(left->key, right->key); | |||
if(cmp < 0) { | |||
return -1; | |||
} else if(cmp == 0) { | |||
return 0; | |||
} else { | |||
return 1; | |||
} | |||
} | |||
hash_val_t | |||
khash_hash_fun(const void* obj) | |||
{ | |||
khnode_t* node = (khnode_t*) obj; | |||
return (hash_val_t) node->hval; | |||
} | |||
static INLINE khash_t* | |||
khash_create_int(ErlNifEnv* env, khash_priv* priv, ERL_NIF_TERM opts) | |||
{ | |||
khash_t* khash = NULL; | |||
assert(priv != NULL && "missing private data member"); | |||
khash = (khash_t*) enif_alloc_resource(priv->res_hash, sizeof(khash_t)); | |||
memset(khash, '\0', sizeof(khash_t)); | |||
khash->version = KHASH_VERSION; | |||
khash->gen = 0; | |||
khash->h = kl_hash_create(HASHCOUNT_T_MAX, khash_cmp_fun, khash_hash_fun); | |||
if(khash->h == NULL ) { | |||
enif_release_resource(khash); | |||
return NULL; | |||
} | |||
kl_hash_set_allocator(khash->h, khnode_alloc, khnode_free, NULL); | |||
enif_self(env, &(khash->p)); | |||
return khash; | |||
} | |||
static ERL_NIF_TERM | |||
khash_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_t* khash; | |||
ERL_NIF_TERM ret; | |||
if(argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
khash = khash_create_int(env, priv, argv[0]); | |||
if(khash == NULL) { | |||
return enif_make_badarg(env); | |||
} | |||
ret = enif_make_resource(env, khash); | |||
enif_release_resource(khash); | |||
return make_ok(env, priv, ret); | |||
} | |||
static void | |||
khash_free(ErlNifEnv* env, void* obj) | |||
{ | |||
khash_t* khash = (khash_t*) obj; | |||
if(khash->h != NULL) { | |||
kl_hash_free_nodes(khash->h); | |||
kl_hash_destroy(khash->h); | |||
} | |||
return; | |||
} | |||
static ERL_NIF_TERM | |||
khash_to_list(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = (khash_priv*) enif_priv_data(env); | |||
ERL_NIF_TERM ret = enif_make_list(env, 0); | |||
khash_t* khash = NULL; | |||
void* res = NULL; | |||
hscan_t scan; | |||
hnode_t* entry; | |||
khnode_t* node; | |||
ERL_NIF_TERM key; | |||
ERL_NIF_TERM val; | |||
ERL_NIF_TERM tuple; | |||
if(argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_hash, &res)) { | |||
return enif_make_badarg(env); | |||
} | |||
khash = (khash_t*) res; | |||
if(!check_pid(env, khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
kl_hash_scan_begin(&scan, khash->h); | |||
while((entry = kl_hash_scan_next(&scan)) != NULL) { | |||
node = (khnode_t*) kl_hnode_getkey(entry); | |||
key = enif_make_copy(env, node->key); | |||
val = enif_make_copy(env, node->val); | |||
tuple = enif_make_tuple2(env, key, val); | |||
ret = enif_make_list_cell(env, tuple, ret); | |||
} | |||
return ret; | |||
} | |||
static ERL_NIF_TERM | |||
khash_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_t* khash = NULL; | |||
void* res = NULL; | |||
if(argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_hash, &res)) { | |||
return enif_make_badarg(env); | |||
} | |||
khash = (khash_t*) res; | |||
if(!check_pid(env, khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
kl_hash_free_nodes(khash->h); | |||
khash->gen += 1; | |||
return priv->atom_ok; | |||
} | |||
static INLINE hnode_t* | |||
khash_lookup_int(ErlNifEnv* env, uint32_t hv, ERL_NIF_TERM key, khash_t* khash) | |||
{ | |||
khnode_t node; | |||
node.hval = hv; | |||
node.env = env; | |||
node.key = key; | |||
return kl_hash_lookup(khash->h, &node); | |||
} | |||
static ERL_NIF_TERM | |||
khash_lookup(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_t* khash = NULL; | |||
void* res = NULL; | |||
uint32_t hval; | |||
hnode_t* entry; | |||
khnode_t* node; | |||
ERL_NIF_TERM ret; | |||
if(argc != 3) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_hash, &res)) { | |||
return enif_make_badarg(env); | |||
} | |||
khash = (khash_t*) res; | |||
if(!check_pid(env, khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_uint(env, argv[1], &hval)) { | |||
return enif_make_badarg(env); | |||
} | |||
entry = khash_lookup_int(env, hval, argv[2], khash); | |||
if(entry == NULL) { | |||
ret = priv->atom_not_found; | |||
} else { | |||
node = (khnode_t*) kl_hnode_getkey(entry); | |||
ret = enif_make_copy(env, node->val); | |||
ret = enif_make_tuple2(env, priv->atom_value, ret); | |||
} | |||
return ret; | |||
} | |||
static ERL_NIF_TERM | |||
khash_get(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_t* khash = NULL; | |||
void* res = NULL; | |||
uint32_t hval; | |||
hnode_t* entry; | |||
khnode_t* node; | |||
ERL_NIF_TERM ret; | |||
if(argc != 4) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_hash, &res)) { | |||
return enif_make_badarg(env); | |||
} | |||
khash = (khash_t*) res; | |||
if(!check_pid(env, khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_uint(env, argv[1], &hval)) { | |||
return enif_make_badarg(env); | |||
} | |||
entry = khash_lookup_int(env, hval, argv[2], khash); | |||
if(entry == NULL) { | |||
ret = argv[3]; | |||
} else { | |||
node = (khnode_t*) kl_hnode_getkey(entry); | |||
ret = enif_make_copy(env, node->val); | |||
} | |||
return ret; | |||
} | |||
static ERL_NIF_TERM | |||
khash_put(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_t* khash = NULL; | |||
void* res = NULL; | |||
uint32_t hval; | |||
hnode_t* entry; | |||
khnode_t* node; | |||
if(argc != 4) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_hash, &res)) { | |||
return enif_make_badarg(env); | |||
} | |||
khash = (khash_t*) res; | |||
if(!check_pid(env, khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_uint(env, argv[1], &hval)) { | |||
return enif_make_badarg(env); | |||
} | |||
entry = khash_lookup_int(env, hval, argv[2], khash); | |||
if(entry == NULL) { | |||
entry = khnode_alloc(NULL); | |||
node = (khnode_t*) kl_hnode_getkey(entry); | |||
node->hval = hval; | |||
node->key = enif_make_copy(node->env, argv[2]); | |||
node->val = enif_make_copy(node->env, argv[3]); | |||
kl_hash_insert(khash->h, entry, node); | |||
} else { | |||
node = (khnode_t*) kl_hnode_getkey(entry); | |||
enif_clear_env(node->env); | |||
node->key = enif_make_copy(node->env, argv[2]); | |||
node->val = enif_make_copy(node->env, argv[3]); | |||
} | |||
khash->gen += 1; | |||
return priv->atom_ok; | |||
} | |||
static ERL_NIF_TERM | |||
khash_del(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_t* khash = NULL; | |||
void* res = NULL; | |||
uint32_t hval; | |||
hnode_t* entry; | |||
ERL_NIF_TERM ret; | |||
if(argc != 3) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_hash, &res)) { | |||
return enif_make_badarg(env); | |||
} | |||
khash = (khash_t*) res; | |||
if(!check_pid(env, khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_uint(env, argv[1], &hval)) { | |||
return enif_make_badarg(env); | |||
} | |||
entry = khash_lookup_int(env, hval, argv[2], khash); | |||
if(entry == NULL) { | |||
ret = priv->atom_not_found; | |||
} else { | |||
kl_hash_delete_free(khash->h, entry); | |||
ret = priv->atom_ok; | |||
} | |||
khash->gen += 1; | |||
return ret; | |||
} | |||
static ERL_NIF_TERM | |||
khash_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_t* khash; | |||
if(argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_hash, (void*) &khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!check_pid(env, khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
return enif_make_uint64(env, kl_hash_count(khash->h)); | |||
} | |||
static ERL_NIF_TERM | |||
khash_iter(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_t* khash = NULL; | |||
void* res = NULL; | |||
khash_iter_t* iter; | |||
ERL_NIF_TERM ret; | |||
if(argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_hash, &res)) { | |||
return enif_make_badarg(env); | |||
} | |||
khash = (khash_t*) res; | |||
if(!check_pid(env, khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
iter = (khash_iter_t*) enif_alloc_resource( | |||
priv->res_iter, sizeof(khash_iter_t)); | |||
memset(iter, '\0', sizeof(khash_iter_t)); | |||
iter->version = KHASH_VERSION; | |||
iter->gen = khash->gen; | |||
iter->khash = khash; | |||
kl_hash_scan_begin(&(iter->scan), iter->khash->h); | |||
// The iterator needs to guarantee that the khash | |||
// remains alive for the life of the iterator. | |||
enif_keep_resource(khash); | |||
ret = enif_make_resource(env, iter); | |||
enif_release_resource(iter); | |||
return make_ok(env, priv, ret); | |||
} | |||
static void | |||
khash_iter_free(ErlNifEnv* env, void* obj) | |||
{ | |||
khash_iter_t* iter = (khash_iter_t*) obj; | |||
enif_release_resource(iter->khash); | |||
} | |||
static ERL_NIF_TERM | |||
khash_iter_next(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
khash_priv* priv = enif_priv_data(env); | |||
khash_iter_t* iter = NULL; | |||
void* res = NULL; | |||
hnode_t* entry; | |||
khnode_t* node; | |||
ERL_NIF_TERM key; | |||
ERL_NIF_TERM val; | |||
if(argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if(!enif_get_resource(env, argv[0], priv->res_iter, &res)) { | |||
return enif_make_badarg(env); | |||
} | |||
iter = (khash_iter_t*) res; | |||
if(!check_pid(env, iter->khash)) { | |||
return enif_make_badarg(env); | |||
} | |||
if(iter->gen != iter->khash->gen) { | |||
return make_error(env, priv, priv->atom_expired_iterator); | |||
} | |||
entry = kl_hash_scan_next(&(iter->scan)); | |||
if(entry == NULL) { | |||
return priv->atom_end_of_table; | |||
} | |||
node = (khnode_t*) kl_hnode_getkey(entry); | |||
key = enif_make_copy(env, node->key); | |||
val = enif_make_copy(env, node->val); | |||
return enif_make_tuple2(env, key, val); | |||
} | |||
static int | |||
load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info) | |||
{ | |||
int flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER; | |||
ErlNifResourceType* res; | |||
khash_priv* new_priv = (khash_priv*) enif_alloc(sizeof(khash_priv)); | |||
if(new_priv == NULL) { | |||
return 1; | |||
} | |||
res = enif_open_resource_type( | |||
env, NULL, "khash", khash_free, flags, NULL); | |||
if(res == NULL) { | |||
return 1; | |||
} | |||
new_priv->res_hash = res; | |||
res = enif_open_resource_type( | |||
env, NULL, "khash_iter", khash_iter_free, flags, NULL); | |||
if(res == NULL) { | |||
return 1; | |||
} | |||
new_priv->res_iter = res; | |||
new_priv->atom_ok = make_atom(env, "ok"); | |||
new_priv->atom_error = make_atom(env, "error"); | |||
new_priv->atom_value = make_atom(env, "value"); | |||
new_priv->atom_not_found = make_atom(env, "not_found"); | |||
new_priv->atom_end_of_table = make_atom(env, "end_of_table"); | |||
new_priv->atom_expired_iterator = make_atom(env, "expired_iterator"); | |||
*priv = (void*) new_priv; | |||
return 0; | |||
} | |||
static int | |||
reload(ErlNifEnv* env, void** priv, ERL_NIF_TERM info) | |||
{ | |||
return 0; | |||
} | |||
static int | |||
upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info) | |||
{ | |||
return load(env, priv, info); | |||
} | |||
static void | |||
unload(ErlNifEnv* env, void* priv) | |||
{ | |||
enif_free(priv); | |||
return; | |||
} | |||
static ErlNifFunc funcs[] = { | |||
{"new", 1, khash_new}, | |||
{"to_list", 1, khash_to_list}, | |||
{"clear", 1, khash_clear}, | |||
{"lookup_int", 3, khash_lookup}, | |||
{"get_int", 4, khash_get}, | |||
{"put_int", 4, khash_put}, | |||
{"del_int", 3, khash_del}, | |||
{"size", 1, khash_size}, | |||
{"iter", 1, khash_iter}, | |||
{"iter_next", 1, khash_iter_next} | |||
}; | |||
ERL_NIF_INIT(khash, funcs, &load, &reload, &upgrade, &unload); |
@ -1,12 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/khash2.so", ["*.c"]} | |||
]}. | |||
{port_env, [ | |||
% Development compilation | |||
% {".*", "CFLAGS", "$CFLAGS -g -Wall -Werror -fPIC"} | |||
% Production compilation | |||
{"(linux|solaris|darwin|freebsd)", "CFLAGS", "$CFLAGS -Wall -Werror -DNDEBUG -O3"}, | |||
{"win32", "CFLAGS", "$CFLAGS /O2 /DNDEBUG /Wall"} | |||
]}. |
@ -1,80 +0,0 @@ | |||
PROJECT = enlfq | |||
CXXFLAGS = -std=c++11 -O2 -Wextra -Werror -Wno-missing-field-initializers -fno-rtti -fno-exceptions | |||
LDLIBS = -lstdc++ | |||
# Based on c_src.mk from erlang.mk by Loic Hoguin <essen@ninenines.eu> | |||
CURDIR := $(shell pwd) | |||
BASEDIR := $(abspath $(CURDIR)/..) | |||
PROJECT ?= $(notdir $(BASEDIR)) | |||
PROJECT := $(strip $(PROJECT)) | |||
ERTS_INCLUDE_DIR ?= $(shell erl -noshell -s init stop -eval "io:format(\"~ts/erts-~ts/include/\", [code:root_dir(), erlang:system_info(version)]).") | |||
ERL_INTERFACE_INCLUDE_DIR ?= $(shell erl -noshell -s init stop -eval "io:format(\"~ts\", [code:lib_dir(erl_interface, include)]).") | |||
ERL_INTERFACE_LIB_DIR ?= $(shell erl -noshell -s init stop -eval "io:format(\"~ts\", [code:lib_dir(erl_interface, lib)]).") | |||
C_SRC_DIR = $(CURDIR) | |||
C_SRC_OUTPUT ?= $(CURDIR)/../priv/$(PROJECT).so | |||
# System type and C compiler/flags. | |||
UNAME_SYS := $(shell uname -s) | |||
ifeq ($(UNAME_SYS), Darwin) | |||
CC ?= cc | |||
CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes | |||
CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall | |||
LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress | |||
else ifeq ($(UNAME_SYS), FreeBSD) | |||
CC ?= cc | |||
CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes | |||
CXXFLAGS ?= -O3 -finline-functions -Wall | |||
else ifeq ($(UNAME_SYS), Linux) | |||
CC ?= gcc | |||
CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes | |||
CXXFLAGS ?= -O3 -finline-functions -Wall | |||
endif | |||
CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR) | |||
CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR) | |||
LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei | |||
LDFLAGS += -shared | |||
# Verbosity. | |||
c_verbose_0 = @echo " C " $(?F); | |||
c_verbose = $(c_verbose_$(V)) | |||
cpp_verbose_0 = @echo " CPP " $(?F); | |||
cpp_verbose = $(cpp_verbose_$(V)) | |||
link_verbose_0 = @echo " LD " $(@F); | |||
link_verbose = $(link_verbose_$(V)) | |||
SOURCES := $(shell find $(C_SRC_DIR) -type f \( -name "*.c" -o -name "*.C" -o -name "*.cc" -o -name "*.cpp" \)) | |||
OBJECTS = $(addsuffix .o, $(basename $(SOURCES))) | |||
COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c | |||
COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c | |||
$(C_SRC_OUTPUT): $(OBJECTS) | |||
@mkdir -p $(BASEDIR)/priv/ | |||
$(link_verbose) $(CC) $(OBJECTS) $(LDFLAGS) $(LDLIBS) -o $(C_SRC_OUTPUT) | |||
%.o: %.c | |||
$(COMPILE_C) $(OUTPUT_OPTION) $< | |||
%.o: %.cc | |||
$(COMPILE_CPP) $(OUTPUT_OPTION) $< | |||
%.o: %.C | |||
$(COMPILE_CPP) $(OUTPUT_OPTION) $< | |||
%.o: %.cpp | |||
$(COMPILE_CPP) $(OUTPUT_OPTION) $< | |||
clean: | |||
@rm -f $(C_SRC_OUTPUT) $(OBJECTS) |
@ -1,84 +0,0 @@ | |||
#include "enlfq.h" | |||
#include "enlfq_nif.h" | |||
#include "nif_utils.h" | |||
#include "concurrentqueue.h" | |||
struct q_item { | |||
ErlNifEnv *env; | |||
ERL_NIF_TERM term; | |||
}; | |||
struct squeue { | |||
moodycamel::ConcurrentQueue<q_item> *queue; | |||
}; | |||
void nif_enlfq_free(ErlNifEnv *, void *obj) { | |||
squeue *inst = static_cast<squeue *>(obj); | |||
if (inst != nullptr) { | |||
q_item item; | |||
while (inst->queue->try_dequeue(item)) { | |||
enif_free_env(item.env); | |||
} | |||
delete inst->queue; | |||
} | |||
} | |||
ERL_NIF_TERM nif_enlfq_new(ErlNifEnv *env, int, const ERL_NIF_TERM *) { | |||
shared_data *data = static_cast<shared_data *>(enif_priv_data(env)); | |||
squeue *qinst = static_cast<squeue *>(enif_alloc_resource(data->resQueueInstance, sizeof(squeue))); | |||
qinst->queue = new moodycamel::ConcurrentQueue<q_item>; | |||
if (qinst == NULL) | |||
return make_error(env, "enif_alloc_resource failed"); | |||
ERL_NIF_TERM term = enif_make_resource(env, qinst); | |||
enif_release_resource(qinst); | |||
return enif_make_tuple2(env, ATOMS.atomOk, term); | |||
} | |||
ERL_NIF_TERM nif_enlfq_push(ErlNifEnv *env, int, const ERL_NIF_TERM argv[]) { | |||
shared_data *data = static_cast<shared_data *>(enif_priv_data(env)); | |||
squeue *inst; | |||
if (!enif_get_resource(env, argv[0], data->resQueueInstance, (void **) &inst)) { | |||
return enif_make_badarg(env); | |||
} | |||
q_item item; | |||
item.env = enif_alloc_env(); | |||
item.term = enif_make_copy(item.env, argv[1]); | |||
inst->queue->enqueue(item); | |||
return ATOMS.atomTrue; | |||
} | |||
ERL_NIF_TERM nif_enlfq_pop(ErlNifEnv *env, int, const ERL_NIF_TERM argv[]) { | |||
shared_data *data = static_cast<shared_data *>(enif_priv_data(env)); | |||
squeue *inst = NULL; | |||
if (!enif_get_resource(env, argv[0], data->resQueueInstance, (void **) &inst)) { | |||
return enif_make_badarg(env); | |||
} | |||
ERL_NIF_TERM term; | |||
q_item item; | |||
if (inst->queue->try_dequeue(item)) { | |||
term = enif_make_copy(env, item.term); | |||
enif_free_env(item.env); | |||
return enif_make_tuple2(env, ATOMS.atomOk, term); | |||
} else { | |||
return ATOMS.atomEmpty; | |||
} | |||
} |
@ -1,10 +0,0 @@ | |||
#pragma once | |||
#include "erl_nif.h" | |||
extern "C" { | |||
void nif_enlfq_free(ErlNifEnv *env, void *obj); | |||
ERL_NIF_TERM nif_enlfq_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
ERL_NIF_TERM nif_enlfq_push(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
ERL_NIF_TERM nif_enlfq_pop(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
} |
@ -1,57 +0,0 @@ | |||
#include <string.h> | |||
#include "enlfq_nif.h" | |||
#include "enlfq.h" | |||
#include "nif_utils.h" | |||
const char kAtomOk[] = "ok"; | |||
const char kAtomError[] = "error"; | |||
const char kAtomTrue[] = "true"; | |||
//const char kAtomFalse[] = "false"; | |||
//const char kAtomUndefined[] = "undefined"; | |||
const char kAtomEmpty[] = "empty"; | |||
atoms ATOMS; | |||
void open_resources(ErlNifEnv *env, shared_data *data) { | |||
ErlNifResourceFlags flags = static_cast<ErlNifResourceFlags>(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER); | |||
data->resQueueInstance = enif_open_resource_type(env, NULL, "enlfq_instance", nif_enlfq_free, flags, NULL); | |||
} | |||
int on_nif_load(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM) { | |||
ATOMS.atomOk = make_atom(env, kAtomOk); | |||
ATOMS.atomError = make_atom(env, kAtomError); | |||
ATOMS.atomTrue = make_atom(env, kAtomTrue); | |||
// ATOMS.atomFalse = make_atom(env, kAtomFalse); | |||
// ATOMS.atomUndefined = make_atom(env, kAtomUndefined); | |||
ATOMS.atomEmpty = make_atom(env, kAtomEmpty); | |||
shared_data *data = static_cast<shared_data *>(enif_alloc(sizeof(shared_data))); | |||
open_resources(env, data); | |||
*priv_data = data; | |||
return 0; | |||
} | |||
void on_nif_unload(ErlNifEnv *, void *priv_data) { | |||
shared_data *data = static_cast<shared_data *>(priv_data); | |||
enif_free(data); | |||
} | |||
int on_nif_upgrade(ErlNifEnv *env, void **priv, void **, ERL_NIF_TERM) { | |||
shared_data *data = static_cast<shared_data *>(enif_alloc(sizeof(shared_data))); | |||
open_resources(env, data); | |||
*priv = data; | |||
return 0; | |||
} | |||
static ErlNifFunc nif_funcs[] = | |||
{ | |||
{"new", 0, nif_enlfq_new}, | |||
{"push", 2, nif_enlfq_push}, | |||
{"pop", 1, nif_enlfq_pop} | |||
}; | |||
ERL_NIF_INIT(enlfq, nif_funcs, on_nif_load, NULL, on_nif_upgrade, on_nif_unload) | |||
@ -1,19 +0,0 @@ | |||
#pragma once | |||
#include "erl_nif.h" | |||
struct atoms | |||
{ | |||
ERL_NIF_TERM atomOk; | |||
ERL_NIF_TERM atomError; | |||
ERL_NIF_TERM atomTrue; | |||
// ERL_NIF_TERM atomFalse; | |||
// ERL_NIF_TERM atomUndefined; | |||
ERL_NIF_TERM atomEmpty; | |||
}; | |||
struct shared_data | |||
{ | |||
ErlNifResourceType* resQueueInstance; | |||
}; | |||
extern atoms ATOMS; |
@ -1,27 +0,0 @@ | |||
#include "nif_utils.h" | |||
#include "enlfq_nif.h" | |||
#include <string.h> | |||
ERL_NIF_TERM make_atom(ErlNifEnv* env, const char* name) | |||
{ | |||
ERL_NIF_TERM ret; | |||
if(enif_make_existing_atom(env, name, &ret, ERL_NIF_LATIN1)) | |||
return ret; | |||
return enif_make_atom(env, name); | |||
} | |||
ERL_NIF_TERM make_binary(ErlNifEnv* env, const char* buff, size_t length) | |||
{ | |||
ERL_NIF_TERM term; | |||
unsigned char *destination_buffer = enif_make_new_binary(env, length, &term); | |||
memcpy(destination_buffer, buff, length); | |||
return term; | |||
} | |||
ERL_NIF_TERM make_error(ErlNifEnv* env, const char* error) | |||
{ | |||
return enif_make_tuple2(env, ATOMS.atomError, make_binary(env, error, strlen(error))); | |||
} |
@ -1,6 +0,0 @@ | |||
#pragma once | |||
#include "erl_nif.h" | |||
ERL_NIF_TERM make_atom(ErlNifEnv* env, const char* name); | |||
ERL_NIF_TERM make_error(ErlNifEnv* env, const char* error); | |||
ERL_NIF_TERM make_binary(ErlNifEnv* env, const char* buff, size_t length); |
@ -1,7 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/enlfq.so", ["*.cc"]} | |||
]}. | |||
@ -1,172 +0,0 @@ | |||
#include "etsq.h" | |||
ErlNifRWLock *qinfo_map_rwlock; | |||
QInfoMap qinfo_map; | |||
// Function finds the queue from map and returns QueueInfo | |||
// Not thread safe. | |||
QueueInfo* get_q_info(char* name) | |||
{ | |||
//std::cout<<"Info: "<< name<<std::endl; | |||
QInfoMap::iterator iter = qinfo_map.find(name); | |||
if (iter != qinfo_map.end()) | |||
{ | |||
//std::cout<<" Fetched "; | |||
return iter->second; | |||
} | |||
return NULL; | |||
} | |||
void new_q(char* name) | |||
{ | |||
//std::cout<<"Create: " << name<<std::endl; | |||
WriteLock write_lock(qinfo_map_rwlock); | |||
QueueInfo *queue_info = new QueueInfo(name); | |||
qinfo_map.insert(QInfoMapPair(name, queue_info)); | |||
//std::cout<<"Created: " << name<<std::endl; | |||
} | |||
bool push(char* name, ErlTerm *erl_term) | |||
{ | |||
QueueInfo *pqueue_info = NULL; | |||
ReadLock read_lock(qinfo_map_rwlock); | |||
if (NULL != (pqueue_info = get_q_info(name))) | |||
{ | |||
Mutex mutex(pqueue_info->pmutex); | |||
pqueue_info->queue.push(erl_term); | |||
return true; | |||
} | |||
return false; | |||
} | |||
// Returns new ErlTerm. Caller should delete it | |||
ErlTerm* pop(char* name, bool read_only) | |||
{ | |||
QueueInfo *pqueue_info = NULL; | |||
ReadLock read_lock(qinfo_map_rwlock); | |||
if (NULL != (pqueue_info = get_q_info(name))) | |||
{ | |||
Mutex mutex(pqueue_info->pmutex); | |||
if (!pqueue_info->queue.empty()) | |||
{ | |||
ErlTerm *erl_term = pqueue_info->queue.front(); | |||
if(read_only) | |||
{ | |||
return new ErlTerm(erl_term); | |||
} | |||
pqueue_info->queue.pop(); | |||
return erl_term; | |||
} | |||
return new ErlTerm("empty"); | |||
} | |||
return NULL; | |||
} | |||
static ERL_NIF_TERM new_queue(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
int size = 100; | |||
char *name = new char(size); | |||
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1); | |||
{ | |||
QueueInfo *pqueue_info = NULL; | |||
ReadLock read_lock(qinfo_map_rwlock); | |||
if (NULL != (pqueue_info = get_q_info(name))) | |||
{ | |||
return enif_make_error(env, "already_exists"); | |||
} | |||
} | |||
new_q(name); | |||
return enif_make_atom(env, "ok"); | |||
} | |||
static ERL_NIF_TERM info(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
int size = 100; | |||
char name[100]; | |||
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1); | |||
int queue_size = 0; | |||
{ | |||
QueueInfo *pqueue_info = NULL; | |||
ReadLock read_lock(qinfo_map_rwlock); | |||
if (NULL == (pqueue_info = get_q_info(name))) | |||
return enif_make_badarg(env); | |||
queue_size = pqueue_info->queue.size(); | |||
} | |||
return enif_make_list2(env, | |||
enif_make_tuple2(env, enif_make_atom(env, "name"), enif_make_atom(env, name)), | |||
enif_make_tuple2(env, enif_make_atom(env, "size"), enif_make_int(env, queue_size))); | |||
} | |||
static ERL_NIF_TERM push_back(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
int size = 100; | |||
char name[100]; | |||
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1); | |||
ErlTerm *erl_term = new ErlTerm(argv[1]); | |||
if (push(name, erl_term)) | |||
return enif_make_atom(env, "ok"); | |||
delete erl_term; | |||
return enif_make_badarg(env); | |||
} | |||
static ERL_NIF_TERM pop_front(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
int size = 100; | |||
char name[100]; | |||
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1); | |||
ErlTerm *erl_term = NULL; | |||
if (NULL == (erl_term = pop(name, false))) | |||
return enif_make_badarg(env); | |||
ERL_NIF_TERM return_term = enif_make_copy(env, erl_term->term); | |||
delete erl_term; | |||
return return_term; | |||
} | |||
static ERL_NIF_TERM get_front(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
int size = 100; | |||
char name[100]; | |||
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1); | |||
ErlTerm *erl_term = NULL; | |||
if (NULL == (erl_term = pop(name, true))) | |||
return enif_make_badarg(env); | |||
ERL_NIF_TERM return_term = enif_make_copy(env, erl_term->term); | |||
delete erl_term; | |||
return return_term; | |||
} | |||
static int is_ok_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info) | |||
{ | |||
int i; | |||
return enif_get_int(env, load_info, &i) && i == 1; | |||
} | |||
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) | |||
{ | |||
if (!is_ok_load_info(env, load_info)) | |||
return -1; | |||
qinfo_map_rwlock = enif_rwlock_create((char*)"qinfo"); | |||
return 0; | |||
} | |||
static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info) | |||
{ | |||
if (!is_ok_load_info(env, load_info)) | |||
return -1; | |||
return 0; | |||
} | |||
static void unload(ErlNifEnv* env, void* priv_data) | |||
{ | |||
enif_rwlock_destroy(qinfo_map_rwlock); | |||
} | |||
static ErlNifFunc nif_funcs[] = { | |||
{"new", 1, new_queue}, | |||
{"info", 1, info}, | |||
{"push_back", 2, push_back}, | |||
{"pop_front", 1, pop_front}, | |||
{"get_front", 1, get_front} | |||
}; | |||
ERL_NIF_INIT(etsq, nif_funcs, load, NULL, upgrade, unload) |
@ -1,130 +0,0 @@ | |||
/* | |||
* etsq.h | |||
* | |||
* Created on: Mar 21, 2016 | |||
* Author: Vinod | |||
*/ | |||
#ifndef ETSQ_H_ | |||
#define ETSQ_H_ | |||
#include <iostream> // std::cin, std::cout | |||
#include <map> // std::map | |||
#include <queue> // std::queue | |||
#include <string.h> | |||
#include "erl_nif.h" | |||
#define enif_make_error(env, error) enif_make_tuple2(env, \ | |||
enif_make_atom(env, "error"), enif_make_atom(env, error)) | |||
struct cmp_str | |||
{ | |||
bool operator()(char *a, char *b) const | |||
{ | |||
return strcmp(a, b) < 0; | |||
} | |||
}; | |||
class ErlTerm | |||
{ | |||
public: | |||
ErlNifEnv *term_env; | |||
ERL_NIF_TERM term; | |||
public: | |||
ErlTerm(ERL_NIF_TERM erl_nif_term) | |||
{ | |||
term_env = enif_alloc_env(); | |||
this->term = enif_make_copy(term_env, erl_nif_term); | |||
} | |||
ErlTerm(ErlTerm *erl_term) | |||
{ | |||
term_env = enif_alloc_env(); | |||
this->term = enif_make_copy(term_env, erl_term->term); | |||
} | |||
ErlTerm(int value) | |||
{ | |||
term_env = enif_alloc_env(); | |||
this->term = enif_make_int(term_env, value); | |||
} | |||
ErlTerm(const char *error) | |||
{ | |||
term_env = enif_alloc_env(); | |||
this->term = enif_make_error(term_env, error); | |||
} | |||
~ErlTerm() | |||
{ | |||
enif_free_env(term_env); | |||
term_env = NULL; | |||
} | |||
}; | |||
typedef std::queue<ErlTerm*> ErlQueue; | |||
class QueueInfo | |||
{ | |||
public: | |||
ErlNifMutex* pmutex; | |||
ErlQueue queue; | |||
public: | |||
QueueInfo(char* name) | |||
{ | |||
pmutex = enif_mutex_create(name); | |||
} | |||
~QueueInfo() | |||
{ | |||
enif_mutex_destroy(pmutex); | |||
} | |||
}; | |||
typedef std::map<char *, QueueInfo*, cmp_str> QInfoMap; | |||
typedef std::pair<char *, QueueInfo*> QInfoMapPair; | |||
// Class to handle Read lock | |||
class ReadLock | |||
{ | |||
ErlNifRWLock *pread_lock; | |||
public: | |||
ReadLock(ErlNifRWLock *pread_lock) | |||
{ | |||
this->pread_lock = pread_lock; | |||
enif_rwlock_rlock(this->pread_lock); | |||
}; | |||
~ReadLock() | |||
{ | |||
enif_rwlock_runlock(pread_lock); | |||
}; | |||
}; | |||
// Class to handle Write lock | |||
class WriteLock | |||
{ | |||
ErlNifRWLock *pwrite_lock; | |||
public: | |||
WriteLock(ErlNifRWLock *pwrite_lock) | |||
{ | |||
this->pwrite_lock = pwrite_lock; | |||
enif_rwlock_rwlock(this->pwrite_lock); | |||
}; | |||
~WriteLock() | |||
{ | |||
enif_rwlock_rwunlock(pwrite_lock); | |||
}; | |||
}; | |||
// Class to handle Mutex lock and unlock | |||
class Mutex | |||
{ | |||
ErlNifMutex *pmtx; | |||
public: | |||
Mutex(ErlNifMutex *pmtx) | |||
{ | |||
this->pmtx = pmtx; | |||
enif_mutex_lock(this->pmtx); | |||
}; | |||
~Mutex() | |||
{ | |||
enif_mutex_unlock(pmtx); | |||
}; | |||
}; | |||
#endif /* ETSQ_H_ */ |
@ -1,7 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/etsq.so", ["*.cpp"]} | |||
]}. | |||
@ -1,103 +0,0 @@ | |||
#include <iostream> | |||
#include <algorithm> | |||
#include <string.h> | |||
class Binary { | |||
public: | |||
unsigned char *bin; | |||
size_t size; | |||
bool allocated; | |||
Binary() : bin(NULL), size(0), allocated(false) { } | |||
Binary(const char *data) { | |||
bin = (unsigned char *) data; | |||
size = strlen(data); | |||
allocated = false; | |||
} | |||
Binary(const Binary &b) { | |||
bin = b.bin; | |||
size = b.size; | |||
allocated = false; | |||
} | |||
~Binary() { | |||
if (allocated) { | |||
delete bin; | |||
} | |||
} | |||
operator std::string() { | |||
return (const char *) bin; | |||
} | |||
friend std::ostream & operator<<(std::ostream & str, Binary const &b) { | |||
return str << b.bin; | |||
} | |||
bool operator<(const Binary &b) { | |||
if(size < b.size) { | |||
return true; | |||
} else if (size > b.size) { | |||
return false; | |||
} else { | |||
return memcmp(bin,b.bin,size) < 0; | |||
} | |||
} | |||
bool operator<(Binary &b) { | |||
if(size < b.size) { | |||
return true; | |||
} else if (size > b.size) { | |||
return false; | |||
} else { | |||
return memcmp(bin,b.bin,size) < 0; | |||
} | |||
} | |||
bool operator>(const Binary &b) { | |||
if(size > b.size) { | |||
return true; | |||
} else if (size < b.size) { | |||
return false; | |||
} else { | |||
return memcmp(bin,b.bin,size) > 0; | |||
} | |||
} | |||
bool operator== (const Binary &b) { | |||
if (size == b.size ) { | |||
return memcmp(bin,b.bin, std::min(size, b.size)) == 0; | |||
} else { | |||
return false; | |||
} | |||
} | |||
operator std::string() const { | |||
return (const char*) bin; | |||
} | |||
Binary& set_data(const char *data) { | |||
bin = (unsigned char *) data; | |||
size = strlen(data); | |||
return *this; | |||
} | |||
void copy(char *inbin, size_t insize) { | |||
bin = (unsigned char *) operator new(insize); | |||
allocated = true; | |||
size = insize; | |||
memcpy(bin, inbin, size); | |||
} | |||
}; | |||
inline bool operator < (const Binary &a, const Binary &b) { | |||
if(a.size < b.size) { | |||
return true; | |||
} else if (a.size > b.size) { | |||
return false; | |||
} else { | |||
return memcmp(a.bin,b.bin, std::min(a.size, b.size)) < 0; | |||
} | |||
} | |||
@ -1,349 +0,0 @@ | |||
// Copyright 2013 Google Inc. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
#ifndef UTIL_BTREE_BTREE_CONTAINER_H__ | |||
#define UTIL_BTREE_BTREE_CONTAINER_H__ | |||
#include <iosfwd> | |||
#include <utility> | |||
#include "btree.h" | |||
namespace btree { | |||
// A common base class for btree_set, btree_map, btree_multiset and | |||
// btree_multimap. | |||
template <typename Tree> | |||
class btree_container { | |||
typedef btree_container<Tree> self_type; | |||
public: | |||
typedef typename Tree::params_type params_type; | |||
typedef typename Tree::key_type key_type; | |||
typedef typename Tree::value_type value_type; | |||
typedef typename Tree::key_compare key_compare; | |||
typedef typename Tree::allocator_type allocator_type; | |||
typedef typename Tree::pointer pointer; | |||
typedef typename Tree::const_pointer const_pointer; | |||
typedef typename Tree::reference reference; | |||
typedef typename Tree::const_reference const_reference; | |||
typedef typename Tree::size_type size_type; | |||
typedef typename Tree::difference_type difference_type; | |||
typedef typename Tree::iterator iterator; | |||
typedef typename Tree::const_iterator const_iterator; | |||
typedef typename Tree::reverse_iterator reverse_iterator; | |||
typedef typename Tree::const_reverse_iterator const_reverse_iterator; | |||
public: | |||
// Default constructor. | |||
btree_container(const key_compare &comp, const allocator_type &alloc) | |||
: tree_(comp, alloc) { | |||
} | |||
// Copy constructor. | |||
btree_container(const self_type &x) | |||
: tree_(x.tree_) { | |||
} | |||
// Iterator routines. | |||
iterator begin() { return tree_.begin(); } | |||
const_iterator begin() const { return tree_.begin(); } | |||
iterator end() { return tree_.end(); } | |||
const_iterator end() const { return tree_.end(); } | |||
reverse_iterator rbegin() { return tree_.rbegin(); } | |||
const_reverse_iterator rbegin() const { return tree_.rbegin(); } | |||
reverse_iterator rend() { return tree_.rend(); } | |||
const_reverse_iterator rend() const { return tree_.rend(); } | |||
// Lookup routines. | |||
iterator lower_bound(const key_type &key) { | |||
return tree_.lower_bound(key); | |||
} | |||
const_iterator lower_bound(const key_type &key) const { | |||
return tree_.lower_bound(key); | |||
} | |||
iterator upper_bound(const key_type &key) { | |||
return tree_.upper_bound(key); | |||
} | |||
const_iterator upper_bound(const key_type &key) const { | |||
return tree_.upper_bound(key); | |||
} | |||
std::pair<iterator,iterator> equal_range(const key_type &key) { | |||
return tree_.equal_range(key); | |||
} | |||
std::pair<const_iterator,const_iterator> equal_range(const key_type &key) const { | |||
return tree_.equal_range(key); | |||
} | |||
// Utility routines. | |||
void clear() { | |||
tree_.clear(); | |||
} | |||
void swap(self_type &x) { | |||
tree_.swap(x.tree_); | |||
} | |||
void dump(std::ostream &os) const { | |||
tree_.dump(os); | |||
} | |||
void verify() const { | |||
tree_.verify(); | |||
} | |||
// Size routines. | |||
size_type size() const { return tree_.size(); } | |||
size_type max_size() const { return tree_.max_size(); } | |||
bool empty() const { return tree_.empty(); } | |||
size_type height() const { return tree_.height(); } | |||
size_type internal_nodes() const { return tree_.internal_nodes(); } | |||
size_type leaf_nodes() const { return tree_.leaf_nodes(); } | |||
size_type nodes() const { return tree_.nodes(); } | |||
size_type bytes_used() const { return tree_.bytes_used(); } | |||
static double average_bytes_per_value() { | |||
return Tree::average_bytes_per_value(); | |||
} | |||
double fullness() const { return tree_.fullness(); } | |||
double overhead() const { return tree_.overhead(); } | |||
bool operator==(const self_type& x) const { | |||
if (size() != x.size()) { | |||
return false; | |||
} | |||
for (const_iterator i = begin(), xi = x.begin(); i != end(); ++i, ++xi) { | |||
if (*i != *xi) { | |||
return false; | |||
} | |||
} | |||
return true; | |||
} | |||
bool operator!=(const self_type& other) const { | |||
return !operator==(other); | |||
} | |||
protected: | |||
Tree tree_; | |||
}; | |||
template <typename T> | |||
inline std::ostream& operator<<(std::ostream &os, const btree_container<T> &b) { | |||
b.dump(os); | |||
return os; | |||
} | |||
// A common base class for btree_set and safe_btree_set. | |||
template <typename Tree> | |||
class btree_unique_container : public btree_container<Tree> { | |||
typedef btree_unique_container<Tree> self_type; | |||
typedef btree_container<Tree> super_type; | |||
public: | |||
typedef typename Tree::key_type key_type; | |||
typedef typename Tree::value_type value_type; | |||
typedef typename Tree::size_type size_type; | |||
typedef typename Tree::key_compare key_compare; | |||
typedef typename Tree::allocator_type allocator_type; | |||
typedef typename Tree::iterator iterator; | |||
typedef typename Tree::const_iterator const_iterator; | |||
public: | |||
// Default constructor. | |||
btree_unique_container(const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(comp, alloc) { | |||
} | |||
// Copy constructor. | |||
btree_unique_container(const self_type &x) | |||
: super_type(x) { | |||
} | |||
// Range constructor. | |||
template <class InputIterator> | |||
btree_unique_container(InputIterator b, InputIterator e, | |||
const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(comp, alloc) { | |||
insert(b, e); | |||
} | |||
// Lookup routines. | |||
iterator find(const key_type &key) { | |||
return this->tree_.find_unique(key); | |||
} | |||
const_iterator find(const key_type &key) const { | |||
return this->tree_.find_unique(key); | |||
} | |||
size_type count(const key_type &key) const { | |||
return this->tree_.count_unique(key); | |||
} | |||
// Insertion routines. | |||
std::pair<iterator,bool> insert(const value_type &x) { | |||
return this->tree_.insert_unique(x); | |||
} | |||
iterator insert(iterator position, const value_type &x) { | |||
return this->tree_.insert_unique(position, x); | |||
} | |||
template <typename InputIterator> | |||
void insert(InputIterator b, InputIterator e) { | |||
this->tree_.insert_unique(b, e); | |||
} | |||
// Deletion routines. | |||
int erase(const key_type &key) { | |||
return this->tree_.erase_unique(key); | |||
} | |||
// Erase the specified iterator from the btree. The iterator must be valid | |||
// (i.e. not equal to end()). Return an iterator pointing to the node after | |||
// the one that was erased (or end() if none exists). | |||
iterator erase(const iterator &iter) { | |||
return this->tree_.erase(iter); | |||
} | |||
void erase(const iterator &first, const iterator &last) { | |||
this->tree_.erase(first, last); | |||
} | |||
}; | |||
// A common base class for btree_map and safe_btree_map. | |||
template <typename Tree> | |||
class btree_map_container : public btree_unique_container<Tree> { | |||
typedef btree_map_container<Tree> self_type; | |||
typedef btree_unique_container<Tree> super_type; | |||
public: | |||
typedef typename Tree::key_type key_type; | |||
typedef typename Tree::data_type data_type; | |||
typedef typename Tree::value_type value_type; | |||
typedef typename Tree::mapped_type mapped_type; | |||
typedef typename Tree::key_compare key_compare; | |||
typedef typename Tree::allocator_type allocator_type; | |||
private: | |||
// A pointer-like object which only generates its value when | |||
// dereferenced. Used by operator[] to avoid constructing an empty data_type | |||
// if the key already exists in the map. | |||
struct generate_value { | |||
generate_value(const key_type &k) | |||
: key(k) { | |||
} | |||
value_type operator*() const { | |||
return std::make_pair(key, data_type()); | |||
} | |||
const key_type &key; | |||
}; | |||
public: | |||
// Default constructor. | |||
btree_map_container(const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(comp, alloc) { | |||
} | |||
// Copy constructor. | |||
btree_map_container(const self_type &x) | |||
: super_type(x) { | |||
} | |||
// Range constructor. | |||
template <class InputIterator> | |||
btree_map_container(InputIterator b, InputIterator e, | |||
const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(b, e, comp, alloc) { | |||
} | |||
// Insertion routines. | |||
data_type& operator[](const key_type &key) { | |||
return this->tree_.insert_unique(key, generate_value(key)).first->second; | |||
} | |||
}; | |||
// A common base class for btree_multiset and btree_multimap. | |||
template <typename Tree> | |||
class btree_multi_container : public btree_container<Tree> { | |||
typedef btree_multi_container<Tree> self_type; | |||
typedef btree_container<Tree> super_type; | |||
public: | |||
typedef typename Tree::key_type key_type; | |||
typedef typename Tree::value_type value_type; | |||
typedef typename Tree::size_type size_type; | |||
typedef typename Tree::key_compare key_compare; | |||
typedef typename Tree::allocator_type allocator_type; | |||
typedef typename Tree::iterator iterator; | |||
typedef typename Tree::const_iterator const_iterator; | |||
public: | |||
// Default constructor. | |||
btree_multi_container(const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(comp, alloc) { | |||
} | |||
// Copy constructor. | |||
btree_multi_container(const self_type &x) | |||
: super_type(x) { | |||
} | |||
// Range constructor. | |||
template <class InputIterator> | |||
btree_multi_container(InputIterator b, InputIterator e, | |||
const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(comp, alloc) { | |||
insert(b, e); | |||
} | |||
// Lookup routines. | |||
iterator find(const key_type &key) { | |||
return this->tree_.find_multi(key); | |||
} | |||
const_iterator find(const key_type &key) const { | |||
return this->tree_.find_multi(key); | |||
} | |||
size_type count(const key_type &key) const { | |||
return this->tree_.count_multi(key); | |||
} | |||
// Insertion routines. | |||
iterator insert(const value_type &x) { | |||
return this->tree_.insert_multi(x); | |||
} | |||
iterator insert(iterator position, const value_type &x) { | |||
return this->tree_.insert_multi(position, x); | |||
} | |||
template <typename InputIterator> | |||
void insert(InputIterator b, InputIterator e) { | |||
this->tree_.insert_multi(b, e); | |||
} | |||
// Deletion routines. | |||
int erase(const key_type &key) { | |||
return this->tree_.erase_multi(key); | |||
} | |||
// Erase the specified iterator from the btree. The iterator must be valid | |||
// (i.e. not equal to end()). Return an iterator pointing to the node after | |||
// the one that was erased (or end() if none exists). | |||
iterator erase(const iterator &iter) { | |||
return this->tree_.erase(iter); | |||
} | |||
void erase(const iterator &first, const iterator &last) { | |||
this->tree_.erase(first, last); | |||
} | |||
}; | |||
} // namespace btree | |||
#endif // UTIL_BTREE_BTREE_CONTAINER_H__ |
@ -1,130 +0,0 @@ | |||
// Copyright 2013 Google Inc. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// | |||
// A btree_map<> implements the STL unique sorted associative container | |||
// interface and the pair associative container interface (a.k.a map<>) using a | |||
// btree. A btree_multimap<> implements the STL multiple sorted associative | |||
// container interface and the pair associtive container interface (a.k.a | |||
// multimap<>) using a btree. See btree.h for details of the btree | |||
// implementation and caveats. | |||
#ifndef UTIL_BTREE_BTREE_MAP_H__ | |||
#define UTIL_BTREE_BTREE_MAP_H__ | |||
#include <algorithm> | |||
#include <functional> | |||
#include <memory> | |||
#include <string> | |||
#include <utility> | |||
#include "btree.h" | |||
#include "btree_container.h" | |||
namespace btree { | |||
// The btree_map class is needed mainly for its constructors. | |||
template <typename Key, typename Value, | |||
typename Compare = std::less<Key>, | |||
typename Alloc = std::allocator<std::pair<const Key, Value> >, | |||
int TargetNodeSize = 256> | |||
class btree_map : public btree_map_container< | |||
btree<btree_map_params<Key, Value, Compare, Alloc, TargetNodeSize> > > { | |||
typedef btree_map<Key, Value, Compare, Alloc, TargetNodeSize> self_type; | |||
typedef btree_map_params< | |||
Key, Value, Compare, Alloc, TargetNodeSize> params_type; | |||
typedef btree<params_type> btree_type; | |||
typedef btree_map_container<btree_type> super_type; | |||
public: | |||
typedef typename btree_type::key_compare key_compare; | |||
typedef typename btree_type::allocator_type allocator_type; | |||
public: | |||
// Default constructor. | |||
btree_map(const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(comp, alloc) { | |||
} | |||
// Copy constructor. | |||
btree_map(const self_type &x) | |||
: super_type(x) { | |||
} | |||
// Range constructor. | |||
template <class InputIterator> | |||
btree_map(InputIterator b, InputIterator e, | |||
const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(b, e, comp, alloc) { | |||
} | |||
}; | |||
template <typename K, typename V, typename C, typename A, int N> | |||
inline void swap(btree_map<K, V, C, A, N> &x, | |||
btree_map<K, V, C, A, N> &y) { | |||
x.swap(y); | |||
} | |||
// The btree_multimap class is needed mainly for its constructors. | |||
template <typename Key, typename Value, | |||
typename Compare = std::less<Key>, | |||
typename Alloc = std::allocator<std::pair<const Key, Value> >, | |||
int TargetNodeSize = 256> | |||
class btree_multimap : public btree_multi_container< | |||
btree<btree_map_params<Key, Value, Compare, Alloc, TargetNodeSize> > > { | |||
typedef btree_multimap<Key, Value, Compare, Alloc, TargetNodeSize> self_type; | |||
typedef btree_map_params< | |||
Key, Value, Compare, Alloc, TargetNodeSize> params_type; | |||
typedef btree<params_type> btree_type; | |||
typedef btree_multi_container<btree_type> super_type; | |||
public: | |||
typedef typename btree_type::key_compare key_compare; | |||
typedef typename btree_type::allocator_type allocator_type; | |||
typedef typename btree_type::data_type data_type; | |||
typedef typename btree_type::mapped_type mapped_type; | |||
public: | |||
// Default constructor. | |||
btree_multimap(const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(comp, alloc) { | |||
} | |||
// Copy constructor. | |||
btree_multimap(const self_type &x) | |||
: super_type(x) { | |||
} | |||
// Range constructor. | |||
template <class InputIterator> | |||
btree_multimap(InputIterator b, InputIterator e, | |||
const key_compare &comp = key_compare(), | |||
const allocator_type &alloc = allocator_type()) | |||
: super_type(b, e, comp, alloc) { | |||
} | |||
}; | |||
template <typename K, typename V, typename C, typename A, int N> | |||
inline void swap(btree_multimap<K, V, C, A, N> &x, | |||
btree_multimap<K, V, C, A, N> &y) { | |||
x.swap(y); | |||
} | |||
} // namespace btree | |||
#endif // UTIL_BTREE_BTREE_MAP_H__ |
@ -1,619 +0,0 @@ | |||
#include <string> | |||
#include <iostream> | |||
#include <vector> | |||
#include "erl_nif.h" | |||
#include "erlterm.h" | |||
#include "lru.h" | |||
using namespace std; | |||
namespace { /* anonymous namespace starts */ | |||
typedef struct _obj_resource { | |||
bool allocated; | |||
void *object; | |||
ErlNifMutex *emtx; | |||
} object_resource; | |||
ErlNifResourceFlags resource_flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER); | |||
ErlNifResourceType* lruResource; | |||
ErlNifResourceType* iteratorResource; | |||
/* atoms */ | |||
ERL_NIF_TERM atom_ok; | |||
ERL_NIF_TERM atom_key; | |||
ERL_NIF_TERM atom_error; | |||
ERL_NIF_TERM atom_invalid; | |||
ERL_NIF_TERM atom_value; | |||
ERL_NIF_TERM atom_max_size; | |||
ERL_NIF_TERM atom_tab; | |||
ERL_NIF_TERM atom_lru_old; | |||
void lru_dtor(ErlNifEnv* env, void *lru); | |||
void iterator_dtor(ErlNifEnv* env, void *it); | |||
int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info){ | |||
lruResource = enif_open_resource_type(env, | |||
"btreelru_nif", | |||
"lru", | |||
lru_dtor, | |||
resource_flags, | |||
NULL); | |||
iteratorResource = enif_open_resource_type(env, | |||
"btreelru_nif", | |||
"iterator", | |||
iterator_dtor, | |||
resource_flags, | |||
NULL); | |||
atom_ok = enif_make_atom(env, "ok"); | |||
atom_key = enif_make_atom(env, "key"); | |||
atom_error = enif_make_atom(env, "error"); | |||
atom_invalid = enif_make_atom(env, "invalid"); | |||
atom_value = enif_make_atom(env, "value"); | |||
atom_max_size = enif_make_atom(env, "max_size"); | |||
atom_tab = enif_make_atom(env, "tab"); | |||
atom_lru_old = enif_make_atom(env, "lru_old"); | |||
return 0; | |||
} | |||
int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info){ | |||
return 0; | |||
} | |||
int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data,ERL_NIF_TERM load_info){ | |||
return 0; | |||
} | |||
void lru_dtor(ErlNifEnv* env, void* _lru_btree) { | |||
object_resource *lru_btree = (object_resource*) _lru_btree; | |||
if (lru_btree->allocated) | |||
delete (LRUBtree<ErlTerm,ErlTerm>*) lru_btree->object; | |||
} | |||
void iterator_dtor(ErlNifEnv* env, void* _lru_iterator) { | |||
object_resource *lru_iterator = (object_resource*) _lru_iterator; | |||
if (lru_iterator->allocated) | |||
delete (LRUBtree<ErlTerm,ErlTerm>::iterator*) lru_iterator->object; | |||
} | |||
void node_free(LRUBtree<ErlTerm,ErlTerm> *bt_lru, LRUNode<ErlTerm,ErlTerm> *node) { | |||
enif_free_env((ErlNifEnv*)node->kvenv); | |||
return; | |||
} | |||
void node_kickout(LRUBtree<ErlTerm,ErlTerm> *bt_lru, LRUNode<ErlTerm,ErlTerm> *node, void *currenv) { | |||
ErlNifEnv *env = (ErlNifEnv *) currenv; | |||
if (bt_lru->pid_set) { | |||
enif_send(env, &bt_lru->pid, NULL, enif_make_tuple3(env, atom_lru_old, node->key.t, node->data.t)); | |||
} | |||
return; | |||
} | |||
ERL_NIF_TERM next(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
LRUNode<ErlTerm,ErlTerm> *node; | |||
ErlTerm key; | |||
ErlTerm value; | |||
if (argc != 2) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
key.t = argv[1]; | |||
node = bt_lru->get(key); | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
node = node->next; | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
key.t = enif_make_copy(env, node->key.t); | |||
value.t = enif_make_copy(env, node->data.t); | |||
return enif_make_tuple2(env, key.t, value.t); | |||
} | |||
ERL_NIF_TERM prev(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
LRUNode<ErlTerm,ErlTerm> *node; | |||
ErlTerm key; | |||
ErlTerm value; | |||
if (argc != 2) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
key.t = argv[1]; | |||
node = bt_lru->get(key); | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
node = node->prev; | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
key.t = enif_make_copy(env, node->key.t); | |||
value.t = enif_make_copy(env, node->data.t); | |||
return enif_make_tuple2(env, key.t, value.t); | |||
} | |||
ERL_NIF_TERM create(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
unsigned long max_size; | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
ERL_NIF_TERM lru_term; | |||
/* get max_size */ | |||
if (enif_get_ulong(env, argv[0], &max_size) < 1){ | |||
return enif_make_tuple2(env, atom_error, atom_max_size); | |||
} | |||
if (!(bt_lru = new LRUBtree<ErlTerm,ErlTerm>(max_size, node_free, node_kickout))) { | |||
return enif_make_tuple2(env, atom_error, enif_make_atom(env, "alloction")); | |||
} | |||
lru = (object_resource *) enif_alloc_resource(lruResource, sizeof(object_resource)); | |||
lru->object = bt_lru; | |||
lru->allocated = true; | |||
lru_term = enif_make_resource(env, lru); | |||
enif_release_resource(lru); | |||
return enif_make_tuple2(env, atom_ok, lru_term); | |||
} | |||
ERL_NIF_TERM seek(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
object_resource *it; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
LRUBtree<ErlTerm,ErlTerm>::iterator *bt_it_; | |||
LRUBtree<ErlTerm,ErlTerm>::iterator bt_it; | |||
ErlTerm key; | |||
ERL_NIF_TERM it_term; | |||
ERL_NIF_TERM kv; | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
key.t = argv[1]; | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *)lru->object; | |||
bt_it = bt_lru->bmap.lower_bound(key); | |||
if ( bt_it == bt_lru->bmap.end() ) { | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
} | |||
bt_it_ = new LRUBtree<ErlTerm,ErlTerm>::iterator; | |||
*bt_it_ = bt_it; | |||
it = (object_resource *) enif_alloc_resource(iteratorResource, sizeof(object_resource)); | |||
it->object = bt_it_; | |||
it->allocated = true; | |||
it_term = enif_make_resource(env, it); | |||
enif_release_resource(it); | |||
kv = enif_make_tuple2(env, bt_it->second->key.t, bt_it->second->data.t); | |||
return enif_make_tuple2(env, kv, it_term); | |||
} | |||
ERL_NIF_TERM iterate_next(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
object_resource *it; | |||
LRUBtree<ErlTerm,ErlTerm>::iterator *bt_it_; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
ERL_NIF_TERM kv; | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[1], iteratorResource, (void **) &it)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *)lru->object; | |||
bt_it_ = (LRUBtree<ErlTerm,ErlTerm>::iterator *) it->object; | |||
if (bt_it_ == NULL) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
(*bt_it_)++; | |||
if ( *bt_it_ == bt_lru->bmap.end() ) { | |||
it->allocated = false; | |||
delete bt_it_; | |||
it->object = NULL; | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
} | |||
kv = enif_make_tuple2(env, (*bt_it_)->second->key.t, (*bt_it_)->second->data.t); | |||
return enif_make_tuple2(env, atom_ok, kv); | |||
} | |||
ERL_NIF_TERM close(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *)lru->object; | |||
lru->allocated = false; | |||
delete bt_lru; | |||
return atom_ok; | |||
} | |||
ERL_NIF_TERM read(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
LRUNode<ErlTerm,ErlTerm> *node; | |||
ErlTerm key; | |||
ERL_NIF_TERM kv; | |||
if (argc != 2) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
key.t = argv[1]; | |||
node = bt_lru->get(key); | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
kv = enif_make_tuple2(env, enif_make_copy(env, node->key.t), enif_make_copy(env, node->data.t)); | |||
return enif_make_tuple2(env, atom_ok, kv); | |||
} | |||
ERL_NIF_TERM remove(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
ErlTerm key; | |||
if (argc != 2) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
key.t = argv[1]; | |||
bt_lru->erase(key); | |||
return atom_ok; | |||
} | |||
ERL_NIF_TERM oldest(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
LRUNode<ErlTerm,ErlTerm> *node; | |||
ERL_NIF_TERM key; | |||
ERL_NIF_TERM value; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
node = bt_lru->getOldest(); | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
key = enif_make_copy(env, node->key.t); | |||
value = enif_make_copy(env, node->data.t); | |||
return enif_make_tuple2(env, key, value); | |||
} | |||
ERL_NIF_TERM latest(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
LRUNode<ErlTerm,ErlTerm> *node; | |||
ERL_NIF_TERM key; | |||
ERL_NIF_TERM value; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
// last is "last in" in the lru | |||
node = bt_lru->getLatest(); | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
key = enif_make_copy(env, node->key.t); | |||
value = enif_make_copy(env, node->data.t); | |||
return enif_make_tuple2(env, key, value); | |||
} | |||
ERL_NIF_TERM last(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
LRUNode<ErlTerm,ErlTerm> *node; | |||
ERL_NIF_TERM key; | |||
ERL_NIF_TERM value; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
node = bt_lru->bmap.rbegin()->second; | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
key = enif_make_copy(env, node->key.t); | |||
value = enif_make_copy(env, node->data.t); | |||
return enif_make_tuple2(env, key, value); | |||
} | |||
ERL_NIF_TERM first(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
LRUNode<ErlTerm,ErlTerm> *node; | |||
ERL_NIF_TERM key; | |||
ERL_NIF_TERM value; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
node = bt_lru->bmap.begin()->second; | |||
if (!node) | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
key = enif_make_copy(env, node->key.t); | |||
value = enif_make_copy(env, node->data.t); | |||
return enif_make_tuple2(env, key, value); | |||
} | |||
ERL_NIF_TERM write(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
ErlTerm key; | |||
ErlTerm value; | |||
ErlNifEnv *kv_env; | |||
size_t size; | |||
if (argc != 3) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm, ErlTerm> *) lru->object; | |||
kv_env = enif_alloc_env(); | |||
key.t = enif_make_copy(kv_env, argv[1]); | |||
value.t = enif_make_copy(kv_env, argv[2]); | |||
/* do not use the size of term | |||
size = enif_size_term(key.t); | |||
size += enif_size_term(value.t); | |||
*/ | |||
/* size based on entries */ | |||
size = 1; | |||
bt_lru->put(key, value, kv_env, env, size); | |||
return atom_ok; | |||
} | |||
ERL_NIF_TERM register_pid(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
if (argc != 2) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
if (!enif_get_local_pid(env, argv[1], &(bt_lru->pid))) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru->pid_set = true; | |||
return atom_ok; | |||
} | |||
ERL_NIF_TERM unregister_pid(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
bt_lru->pid_set = false; | |||
return atom_ok; | |||
} | |||
ERL_NIF_TERM get_registered_pid(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
if (!bt_lru->pid_set) { | |||
return enif_make_tuple2(env, atom_error, atom_invalid); | |||
} | |||
return enif_make_pid(env, &(bt_lru->pid)); | |||
} | |||
ERL_NIF_TERM get_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
return enif_make_ulong(env, bt_lru->getSize()); | |||
} | |||
ERL_NIF_TERM get_max_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
if (argc != 1) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
return enif_make_ulong(env, bt_lru->getMaxSize()); | |||
} | |||
ERL_NIF_TERM set_max_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |||
object_resource *lru; | |||
unsigned long max_size; | |||
LRUBtree<ErlTerm,ErlTerm> *bt_lru; | |||
if (argc != 2) { | |||
return enif_make_badarg(env); | |||
} | |||
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) { | |||
return enif_make_badarg(env); | |||
} | |||
/* get max_size */ | |||
if (enif_get_ulong(env, argv[1], &max_size) < 1){ | |||
return enif_make_tuple2(env, atom_error, atom_max_size); | |||
} | |||
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object; | |||
bt_lru->setMaxSize(max_size); | |||
return atom_ok; | |||
} | |||
ErlNifFunc nif_funcs[] = { | |||
{"create", 1, create}, | |||
{"close", 1, close, ERL_NIF_DIRTY_JOB_IO_BOUND}, | |||
{"register_pid", 2, register_pid}, | |||
{"unregister_pid", 1, unregister_pid}, | |||
{"get_registered_pid", 1, get_registered_pid}, | |||
{"get_size", 1, get_size}, | |||
{"get_max_size", 1, get_max_size}, | |||
{"set_max_size", 2, set_max_size}, | |||
{"oldest", 1, oldest}, | |||
{"latest", 1, latest}, | |||
{"last", 1, last}, | |||
{"first", 1, first}, | |||
{"read", 2, read}, | |||
{"next", 2, next}, | |||
{"prev", 2, prev}, | |||
{"seek", 2, seek}, | |||
{"iterate_next", 2, iterate_next}, | |||
{"remove", 2, remove}, | |||
{"write", 3, write} | |||
}; | |||
} /* anonymouse namespace ends */ | |||
ERL_NIF_INIT(btree_lru, nif_funcs, load, reload, upgrade, NULL) |
@ -1,71 +0,0 @@ | |||
#include "erl_nif.h" | |||
class ErlTerm { | |||
public: | |||
ERL_NIF_TERM t; | |||
static void *operator new(size_t size) { | |||
return enif_alloc(size); | |||
} | |||
static void operator delete(void *block) { | |||
enif_free(block); | |||
} | |||
bool operator< (const ErlTerm &term) { | |||
if (enif_compare(t, term.t) < 0) | |||
return true; | |||
return false; | |||
} | |||
bool operator< (ErlTerm &term) { | |||
if (enif_compare(t, term.t) < 0) | |||
return true; | |||
return false; | |||
} | |||
bool operator> (const ErlTerm &term) { | |||
if (enif_compare(t, term.t) > 0) | |||
return true; | |||
return false; | |||
} | |||
bool operator> (ErlTerm &term) { | |||
if (enif_compare(t, term.t) > 0) | |||
return true; | |||
return false; | |||
} | |||
bool operator== (const ErlTerm &term) { | |||
if (enif_compare(t, term.t) == 0) | |||
return true; | |||
return false; | |||
} | |||
bool operator== (ErlTerm &term) { | |||
if (enif_compare(t, term.t) == 0) | |||
return true; | |||
return false; | |||
} | |||
}; | |||
inline bool operator < (const ErlTerm &a, const ErlTerm &b) { | |||
if (enif_compare(a.t, b.t) < 0) | |||
return true; | |||
return false; | |||
} | |||
#if 0 | |||
// extend std::hash to understand ErlTerm used by hashmap not btree | |||
namespace std { | |||
template <> | |||
struct hash<ErlTerm> | |||
{ | |||
size_t operator()(const ErlTerm& term) const | |||
{ | |||
return (size_t) enif_hash_term(term.t); | |||
} | |||
}; | |||
} | |||
#endif |
@ -1,266 +0,0 @@ | |||
#include "btree_map.h" | |||
#include <algorithm> | |||
#include <iostream> | |||
#include "murmurhash2.h" | |||
#include "binary.h" | |||
#include "erl_nif.h" | |||
// extend std::hash to understand Binary type | |||
namespace std { | |||
template <> | |||
struct hash<Binary> | |||
{ | |||
size_t operator()(const Binary& b) const | |||
{ | |||
return MurmurHash2(b.bin, b.size, 4242); | |||
} | |||
}; | |||
} | |||
template <typename K, typename V> | |||
struct LRUNode | |||
{ | |||
K key; | |||
V data; | |||
void *kvenv; | |||
LRUNode<K,V> *prev; | |||
LRUNode<K,V> *next; | |||
size_t size; | |||
LRUNode(void *kvenv = NULL, size_t size=0) : kvenv(kvenv), prev(NULL), next(NULL), size(size) { } | |||
/* | |||
static void *LRUNode<ErlTerm,ErlTerm>::operator new(size_t size) { | |||
return enif_alloc(size); | |||
} | |||
static void operator delete(void *block) { | |||
enif_free(block); | |||
} | |||
*/ | |||
void printChain() { | |||
LRUNode<K,V>* node; | |||
int i=11; | |||
std::cout << "("; | |||
for(node = this; node && i; node = node->next, i--) { | |||
std::cout << node->key << " -> "; | |||
} | |||
if (node) { | |||
std::cout << " loop detection end "; | |||
} else { | |||
std::cout << " end "; | |||
} | |||
std::cout << ")" << std::endl; | |||
} | |||
void printNextPrevKey() { | |||
std::cout << "("; | |||
printNextKey(); | |||
printPrevKey(); | |||
std::cout << ")"; | |||
} | |||
void printNextKey() { | |||
if (next) { | |||
std::cout << "next key " << next->key << " "; | |||
} | |||
} | |||
void printPrevKey() { | |||
if (prev) { | |||
std::cout << "prev key " << prev->key << " "; | |||
} | |||
} | |||
}; | |||
template <class K,class V> | |||
class LRUBtree { | |||
private: | |||
LRUNode<K,V> *oldest; | |||
LRUNode<K,V> *latest; | |||
unsigned long size; | |||
unsigned long max_size; | |||
void (*node_free)(LRUBtree<K,V> *lru, LRUNode<K,V> *node); | |||
void (*node_kickout)(LRUBtree<K,V> *lru, LRUNode<K,V> *node, void *call_env); | |||
typedef btree::btree_map<K, LRUNode<K,V>*> LRUBtree_map; | |||
public: | |||
LRUBtree_map bmap; | |||
bool pid_set = false; | |||
ErlNifPid pid; | |||
typedef typename LRUBtree_map::iterator iterator; | |||
typedef typename LRUBtree_map::reverse_iterator reverse_iterator; | |||
void printLatest() { | |||
if (latest) { | |||
std::cout << " latest " << latest->key; | |||
} else { | |||
std::cout << " no data in lru "; | |||
} | |||
} | |||
private: | |||
LRUNode<K,V>* erase(LRUNode<K,V> *node) { | |||
if (node->next) { | |||
node->next->prev = node->prev; | |||
} | |||
if (node->prev) { | |||
node->prev->next = node->next; | |||
} | |||
if (node == oldest) { | |||
oldest = node->prev; | |||
} | |||
if (node == latest) { | |||
latest = node->next; | |||
} | |||
if (node_free) { | |||
node_free(this, node); | |||
} | |||
node->next = NULL; | |||
node->prev = NULL; | |||
return node; | |||
} | |||
void printOldest() { | |||
if(oldest) { | |||
std::cout << " oldest " << oldest->key; | |||
} else { | |||
std::cout << " no data in lru "; | |||
} | |||
} | |||
void check_size(void *call_env) { | |||
if (size > max_size) { | |||
if (oldest) { // remove check if oldest exist and rely on max_size always being positive | |||
if (node_kickout) | |||
node_kickout(this, oldest, call_env); | |||
erase(oldest->key); | |||
} | |||
} | |||
} | |||
#define SIZE_100MB 100*1024*1024 | |||
public: | |||
LRUBtree(unsigned long max_size = SIZE_100MB, | |||
void (*node_free)(LRUBtree<K,V> *lru, LRUNode<K,V> *node) = NULL, | |||
void (*node_kickout)(LRUBtree<K,V> *lru, LRUNode<K,V> *node, void *call_env) = NULL) | |||
: oldest(NULL), latest(NULL), size(0), max_size(max_size), node_free(node_free), | |||
node_kickout(node_kickout) { } | |||
~LRUBtree() { | |||
LRUNode<K,V> *node; | |||
LRUNode<K,V> *next; | |||
node = latest; | |||
while(node) { | |||
if (node_free) { | |||
node_free(this, node); | |||
} | |||
next = node->next; | |||
delete node; | |||
node = next; | |||
} | |||
} | |||
void printSize() { | |||
std::cout << "size " << size << std::endl; | |||
} | |||
unsigned long getSize() { | |||
return size; | |||
} | |||
unsigned long getMaxSize() { | |||
return max_size; | |||
} | |||
void setMaxSize(unsigned long max_size) { | |||
this->max_size = max_size; | |||
} | |||
void erase(K key) { | |||
LRUNode<K,V> *node; | |||
if ((node = bmap[key])) { | |||
erase(node); | |||
bmap.erase(key); | |||
size -= node->size; | |||
delete node; | |||
} | |||
} | |||
inline void put(K key, V data, | |||
void *kvenv = NULL, void *call_env = NULL, | |||
size_t size = 1) { | |||
LRUNode<K,V> *node; | |||
this->size += size; | |||
check_size(call_env); | |||
// overwrite already existing key | |||
if ((node = bmap[key])) { | |||
this->size -= node->size; | |||
erase(node); | |||
node->kvenv = kvenv; | |||
node->next = latest; | |||
node->size = size; | |||
if (node->next) { | |||
node->next->prev = node; | |||
} | |||
if (!oldest) { | |||
oldest = node; | |||
} | |||
latest = node; | |||
node->key = key; | |||
node->data = data; | |||
} | |||
else if (!oldest) { | |||
node = new LRUNode<K,V>; | |||
node->key = key; | |||
node->data = data; | |||
node->kvenv = kvenv; | |||
node->size = size; | |||
oldest = node; | |||
latest = node; | |||
bmap[node->key] = node; | |||
} | |||
else { | |||
node = new LRUNode<K,V>; | |||
node->key = key; | |||
node->data = data; | |||
node->kvenv = kvenv; | |||
node->size = size; | |||
latest->prev = node; | |||
node->next = latest; | |||
latest = node; | |||
bmap[node->key] = node; | |||
} | |||
} | |||
LRUNode<K,V>* get(K key) { | |||
return bmap[key]; | |||
} | |||
LRUNode<K,V>* getOldest() { | |||
return oldest; | |||
} | |||
LRUNode<K,V>* getLatest() { | |||
return latest; | |||
} | |||
LRUNode<K,V>* getNext(LRUNode<K,V> *node) { | |||
return node->next; | |||
} | |||
LRUNode<K,V>* getPrev(LRUNode<K,V> *node) { | |||
return node->prev; | |||
} | |||
}; | |||
@ -1,73 +0,0 @@ | |||
//----------------------------------------------------------------------------- | |||
// MurmurHash2, by Austin Appleby | |||
// Note - This code makes a few assumptions about how your machine behaves - | |||
// 1. We can read a 4-byte value from any address without crashing | |||
// 2. sizeof(int) == 4 | |||
// And it has a few limitations - | |||
// 1. It will not wo | |||
// | |||
// rk incrementally. | |||
// 2. It will not produce the same results on little-endian and big-endian | |||
// machines. | |||
unsigned int MurmurHash2 ( const void * key, int len, unsigned int seed ) | |||
{ | |||
// 'm' and 'r' are mixing constants generated offline. | |||
// They're not really 'magic', they just happen to work well. | |||
const unsigned int m = 0x5bd1e995; | |||
const int r = 24; | |||
// Initialize the hash to a 'random' value | |||
unsigned int h = seed ^ len; | |||
// Mix 4 bytes at a time into the hash | |||
const unsigned char * data = (const unsigned char *)key; | |||
while(len >= 4) | |||
{ | |||
unsigned int k = *(unsigned int *)data; | |||
k *= m; | |||
k ^= k >> r; | |||
k *= m; | |||
h *= m; | |||
h ^= k; | |||
data += 4; | |||
len -= 4; | |||
} | |||
// Handle the last few bytes of the input array | |||
switch(len) | |||
{ | |||
case 3: h ^= data[2] << 16; | |||
case 2: h ^= data[1] << 8; | |||
case 1: h ^= data[0]; | |||
h *= m; | |||
}; | |||
// Do a few final mixes of t | |||
// | |||
// | |||
// | |||
// he hash to ensure the last few | |||
// bytes are well-incorporated. | |||
h ^= h >> 13; | |||
h *= m; | |||
h ^= h >> 15; | |||
return h; | |||
} | |||
@ -1,7 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/btreelru_nif.so", ["btreelru_nif.cpp"]} | |||
]}. | |||
@ -1,90 +0,0 @@ | |||
#include "erl_nif.h" | |||
#define A_OK(env) enif_make_atom(env, "ok") | |||
#define assert_badarg(S, Env) if (! S) { return enif_make_badarg(env); } | |||
static ErlNifResourceType* array_handle = NULL; | |||
static void array_handle_cleanup(ErlNifEnv* env, void* arg) {} | |||
static int load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info) | |||
{ | |||
ErlNifResourceFlags flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER; | |||
array_handle = enif_open_resource_type(env, "native_array_nif", "array_handle", | |||
&array_handle_cleanup, flags, 0); | |||
// 用于存储指针的数组, 最多1000个array | |||
*priv = enif_alloc(1000 * sizeof(void*)); | |||
return 0; | |||
} | |||
static void unload(ErlNifEnv* env, void* priv) | |||
{ | |||
enif_free(priv); | |||
} | |||
static ERL_NIF_TERM new_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
// 取参数 | |||
int refindex; | |||
assert_badarg(enif_get_int(env, argv[0], &refindex), env); | |||
// 取参数length | |||
unsigned long length; | |||
assert_badarg(enif_get_ulong(env, argv[1], &length), env); | |||
// 分配内存 | |||
// unsigned char* ref = enif_alloc_resource(array_handle, length); | |||
unsigned char* ref = enif_alloc(length); | |||
// 保存指针 | |||
*((unsigned char**)enif_priv_data(env) + refindex) = ref; | |||
return A_OK(env); | |||
} | |||
static ERL_NIF_TERM get_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
// 取参数ref | |||
int refindex; | |||
assert_badarg(enif_get_int(env, argv[0], &refindex), env); | |||
unsigned char* ref = *((unsigned char**)enif_priv_data(env) + refindex); | |||
assert_badarg(ref, env); | |||
// 取参数offset | |||
unsigned long offset; | |||
assert_badarg(enif_get_ulong(env, argv[1], &offset), env); | |||
return enif_make_int(env, (int)(*(ref + offset - 1))); | |||
} | |||
static ERL_NIF_TERM put_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
// 取参数ref | |||
int refindex; | |||
assert_badarg(enif_get_int(env, argv[0], &refindex), env); | |||
unsigned char* ref = *((unsigned char**)enif_priv_data(env) + refindex); | |||
// 取参数offset | |||
unsigned long offset; | |||
assert_badarg(enif_get_ulong(env, argv[1], &offset), env); | |||
// 取参数newval | |||
unsigned int newval; | |||
assert_badarg(enif_get_uint(env, argv[2], &newval), env); | |||
// 赋值 | |||
*(ref + offset - 1) = (unsigned char)newval; | |||
return A_OK(env); | |||
} | |||
static ERL_NIF_TERM delete_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |||
{ | |||
// 取参数ref | |||
int refindex; | |||
assert_badarg(enif_get_int(env, argv[0], &refindex), env); | |||
unsigned char* ref = *((unsigned char**)enif_priv_data(env) + refindex); | |||
//enif_release_resource(ref); | |||
enif_free(ref); | |||
return A_OK(env); | |||
} | |||
static ErlNifFunc nif_funcs[] = { | |||
{"new", 2, new_nif}, | |||
{"get", 2, get_nif}, | |||
{"put", 3, put_nif}, | |||
{"delete", 1, delete_nif}, | |||
}; | |||
ERL_NIF_INIT(native_array, nif_funcs, &load, NULL, NULL, &unload) | |||
@ -1,7 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/native_array_nif.so", ["*.c"]} | |||
]}. | |||
@ -1,905 +0,0 @@ | |||
#include "NeuralTable.h" | |||
/* !!!! A NOTE ON KEYS !!!! | |||
* Keys should be integer values passed from the erlang emulator, | |||
* and should be generated by a hashing function. There is no easy | |||
* way to hash an erlang term from a NIF, but ERTS is more than | |||
* capable of doing so. | |||
* | |||
* Additionally, this workaround means that traditional collision | |||
* handling mechanisms for hash tables will not work without | |||
* special consideration. For instance, to compare keys as you | |||
* would by storing linked lists, you must retrieve the stored | |||
* tuple and call enif_compare or enif_is_identical on the key | |||
* elements of each tuple. | |||
*/ | |||
table_set NeuralTable::tables; | |||
atomic<bool> NeuralTable::running(true); | |||
ErlNifMutex *NeuralTable::table_mutex; | |||
NeuralTable::NeuralTable(unsigned int kp) { | |||
for (int i = 0; i < BUCKET_COUNT; ++i) { | |||
ErlNifEnv *env = enif_alloc_env(); | |||
env_buckets[i] = env; | |||
locks[i] = enif_rwlock_create("neural_table"); | |||
garbage_cans[i] = 0; | |||
reclaimable[i] = enif_make_list(env, 0); | |||
} | |||
start_gc(); | |||
start_batch(); | |||
key_pos = kp; | |||
} | |||
NeuralTable::~NeuralTable() { | |||
stop_batch(); | |||
stop_gc(); | |||
for (int i = 0; i < BUCKET_COUNT; ++i) { | |||
enif_rwlock_destroy(locks[i]); | |||
enif_free_env(env_buckets[i]); | |||
} | |||
} | |||
/* ================================================================ | |||
* MakeTable | |||
* Allocates a new table, assuming a unique atom identifier. This | |||
* table is stored in a static container. All interactions with | |||
* the table must be performed through the static class API. | |||
*/ | |||
ERL_NIF_TERM NeuralTable::MakeTable(ErlNifEnv *env, ERL_NIF_TERM name, ERL_NIF_TERM key_pos) { | |||
char *atom; | |||
string key; | |||
unsigned int len = 0, | |||
pos = 0; | |||
ERL_NIF_TERM ret; | |||
// Allocate space for the name of the table | |||
enif_get_atom_length(env, name, &len, ERL_NIF_LATIN1); | |||
atom = (char*)enif_alloc(len + 1); | |||
// Fetch the value of the atom and store it in a string (because I can, that's why) | |||
enif_get_atom(env, name, atom, len + 1, ERL_NIF_LATIN1); | |||
key = atom; | |||
// Deallocate that space | |||
enif_free(atom); | |||
// Get the key position value | |||
enif_get_uint(env, key_pos, &pos); | |||
enif_mutex_lock(table_mutex); | |||
if (NeuralTable::tables.find(key) != NeuralTable::tables.end()) { | |||
// Table already exists? Bad monkey! | |||
ret = enif_make_badarg(env); | |||
} else { | |||
// All good. Make the table | |||
NeuralTable::tables[key] = new NeuralTable(pos); | |||
ret = enif_make_atom(env, "ok"); | |||
} | |||
enif_mutex_unlock(table_mutex); | |||
return ret; | |||
} | |||
/* ================================================================ | |||
* GetTable | |||
* Retrieves a handle to the table referenced by name, assuming | |||
* such a table exists. If not, throw badarg. | |||
*/ | |||
NeuralTable* NeuralTable::GetTable(ErlNifEnv *env, ERL_NIF_TERM name) { | |||
char *atom = NULL; | |||
string key; | |||
unsigned len = 0; | |||
NeuralTable *ret = NULL; | |||
table_set::const_iterator it; | |||
// Allocate space for the table name | |||
enif_get_atom_length(env, name, &len, ERL_NIF_LATIN1); | |||
atom = (char*)enif_alloc(len + 1); | |||
// Copy the table name into a string | |||
enif_get_atom(env, name, atom, len + 1, ERL_NIF_LATIN1); | |||
key = atom; | |||
// Deallocate that space | |||
enif_free(atom); | |||
// Look for the table and return its pointer if found | |||
it = NeuralTable::tables.find(key); | |||
if (it != NeuralTable::tables.end()) { | |||
ret = it->second; | |||
} | |||
return ret; | |||
} | |||
/* ================================================================ | |||
* Insert | |||
* Inserts a tuple into the table with key. | |||
*/ | |||
ERL_NIF_TERM NeuralTable::Insert(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object) { | |||
NeuralTable *tb; | |||
ERL_NIF_TERM ret, old; | |||
unsigned long int entry_key = 0; | |||
// Grab table or bail. | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { | |||
return enif_make_badarg(env); | |||
} | |||
// Get key value. | |||
enif_get_ulong(env, key, &entry_key); | |||
// Lock the key. | |||
tb->rwlock(entry_key); | |||
// Attempt to lookup the value. If nonempty, increment | |||
// discarded term counter and return a copy of the | |||
// old value | |||
if (tb->find(entry_key, old)) { | |||
tb->reclaim(entry_key, old); | |||
ret = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_copy(env, old)); | |||
} else { | |||
ret = enif_make_atom(env, "ok"); | |||
} | |||
// Write that shit out | |||
tb->put(entry_key, object); | |||
// Oh, and unlock the key if you would. | |||
tb->rwunlock(entry_key); | |||
return ret; | |||
} | |||
/* ================================================================ | |||
* InsertNew | |||
* Inserts a tuple into the table with key, assuming there is not | |||
* a value with key already. Returns true if there was no value | |||
* for key, or false if there was. | |||
*/ | |||
ERL_NIF_TERM NeuralTable::InsertNew(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object) { | |||
NeuralTable *tb; | |||
ERL_NIF_TERM ret, old; | |||
unsigned long int entry_key = 0; | |||
// Get the table or bail | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { | |||
return enif_make_badarg(env); | |||
} | |||
// Get the key value | |||
enif_get_ulong(env, key, &entry_key); | |||
// Get write lock for the key | |||
tb->rwlock(entry_key); | |||
if (tb->find(entry_key, old)) { | |||
// Key was found. Return false and do not insert | |||
ret = enif_make_atom(env, "false"); | |||
} else { | |||
// Key was not found. Return true and insert | |||
tb->put(entry_key, object); | |||
ret = enif_make_atom(env, "true"); | |||
} | |||
// Release write lock for the key | |||
tb->rwunlock(entry_key); | |||
return ret; | |||
} | |||
/* ================================================================ | |||
* Increment | |||
* Processes a list of update operations. Each operation specifies | |||
* a position in the stored tuple to update and an integer to add | |||
* to it. | |||
*/ | |||
ERL_NIF_TERM NeuralTable::Increment(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) { | |||
NeuralTable *tb; | |||
ERL_NIF_TERM ret, old; | |||
ERL_NIF_TERM it; | |||
unsigned long int entry_key = 0; | |||
// Get table handle or bail | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { | |||
return enif_make_badarg(env); | |||
} | |||
// Get key value | |||
enif_get_ulong(env, key, &entry_key); | |||
// Acquire read/write lock for key | |||
tb->rwlock(entry_key); | |||
// Try to read the value as it is | |||
if (tb->find(entry_key, old)) { | |||
// Value exists | |||
ERL_NIF_TERM op_cell; | |||
const ERL_NIF_TERM *tb_tpl; | |||
const ERL_NIF_TERM *op_tpl; | |||
ERL_NIF_TERM *new_tpl; | |||
ErlNifEnv *bucket_env = tb->get_env(entry_key); | |||
unsigned long int pos = 0; | |||
long int incr = 0; | |||
unsigned int ops_length = 0; | |||
int op_arity = 0, | |||
tb_arity = 0; | |||
// Expand tuple to work on elements | |||
enif_get_tuple(bucket_env, old, &tb_arity, &tb_tpl); | |||
// Allocate space for a copy the contents of the table | |||
// tuple and copy it in. All changes are to be made to | |||
// the copy of the tuple. | |||
new_tpl = (ERL_NIF_TERM*)enif_alloc(sizeof(ERL_NIF_TERM) * tb_arity); | |||
memcpy(new_tpl, tb_tpl, sizeof(ERL_NIF_TERM) * tb_arity); | |||
// Create empty list cell for return value. | |||
ret = enif_make_list(env, 0); | |||
// Set iterator to first cell of ops | |||
it = ops; | |||
while(!enif_is_empty_list(env, it)) { | |||
long int value = 0; | |||
enif_get_list_cell(env, it, &op_cell, &it); // op_cell = hd(it), it = tl(it) | |||
enif_get_tuple(env, op_cell, &op_arity, &op_tpl); // op_arity = tuple_size(op_cell), op_tpl = [TplPos1, TplPos2] | |||
enif_get_ulong(env, op_tpl[0], &pos); // pos = (uint64)op_tpl[0] | |||
enif_get_long(env, op_tpl[1], &incr); // incr = (uint64)op_tpl[1] | |||
// Is the operation trying to modify a nonexistant | |||
// position? | |||
if (pos <= 0 || pos > tb_arity) { | |||
ret = enif_make_badarg(env); | |||
goto bailout; | |||
} | |||
// Is the operation trying to add to a value that's | |||
// not a number? | |||
if (!enif_is_number(bucket_env, new_tpl[pos - 1])) { | |||
ret = enif_make_badarg(env); | |||
goto bailout; | |||
} | |||
// Update the value stored in the tuple. | |||
enif_get_long(env, new_tpl[pos - 1], &value); | |||
tb->reclaim(entry_key, new_tpl[pos - 1]); | |||
new_tpl[pos - 1] = enif_make_long(bucket_env, value + incr); | |||
// Copy the new value to the head of the return list | |||
ret = enif_make_list_cell(env, enif_make_copy(env, new_tpl[pos - 1]), ret); | |||
} | |||
tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity)); | |||
// Bailout allows cancelling the update opertion | |||
// in case something goes wrong. It must always | |||
// come after tb->put and before enif_free and | |||
// rwunlock | |||
bailout: | |||
enif_free(new_tpl); | |||
} else { | |||
ret = enif_make_badarg(env); | |||
} | |||
// Release the rwlock for entry_key | |||
tb->rwunlock(entry_key); | |||
return ret; | |||
} | |||
/* ================================================================ | |||
* Unshift | |||
* Processes a list of update operations. Each update operation is | |||
* a tuple specifying the position of a list in the stored value to | |||
* update and a list of values to append. Elements are shifted from | |||
* the input list to the stored list, so: | |||
* | |||
* unshift([a,b,c,d]) results in [d,c,b,a] | |||
*/ | |||
ERL_NIF_TERM NeuralTable::Unshift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) { | |||
NeuralTable *tb; | |||
ERL_NIF_TERM ret, old, it; | |||
unsigned long int entry_key; | |||
ErlNifEnv *bucket_env; | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { | |||
return enif_make_badarg(env); | |||
} | |||
enif_get_ulong(env, key, &entry_key); | |||
tb->rwlock(entry_key); | |||
bucket_env = tb->get_env(entry_key); | |||
if (tb->find(entry_key, old)) { | |||
const ERL_NIF_TERM *old_tpl, | |||
*op_tpl; | |||
ERL_NIF_TERM *new_tpl; | |||
int tb_arity = 0, | |||
op_arity = 0; | |||
unsigned long pos = 0; | |||
unsigned int new_length = 0; | |||
ERL_NIF_TERM op, | |||
unshift, | |||
copy_it, | |||
copy_val; | |||
enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl); | |||
new_tpl = (ERL_NIF_TERM*)enif_alloc(sizeof(ERL_NIF_TERM) * tb_arity); | |||
memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity); | |||
it = ops; | |||
ret = enif_make_list(env, 0); | |||
while (!enif_is_empty_list(env, it)) { | |||
// Examine the operation. | |||
enif_get_list_cell(env, it, &op, &it); // op = hd(it), it = tl(it) | |||
enif_get_tuple(env, op, &op_arity, &op_tpl); // op_arity = tuple_size(op), op_tpl = [TplPos1, TplPos2] | |||
enif_get_ulong(env, op_tpl[0], &pos); // Tuple position to modify | |||
unshift = op_tpl[1]; // Values to unshfit | |||
// Argument 1 of the operation tuple is position; | |||
// make sure it's within the bounds of the tuple | |||
// in the table. | |||
if (pos <= 0 || pos > tb_arity) { | |||
ret = enif_make_badarg(env); | |||
goto bailout; | |||
} | |||
// Make sure we were passed a list of things to push | |||
// onto the posth element of the entry | |||
if (!enif_is_list(env, unshift)) { | |||
ret = enif_make_badarg(env); | |||
} | |||
// Now iterate over unshift, moving its values to | |||
// the head of new_tpl[pos - 1] one by one | |||
copy_it = unshift; | |||
while (!enif_is_empty_list(env, copy_it)) { | |||
enif_get_list_cell(env, copy_it, ©_val, ©_it); | |||
new_tpl[pos - 1] = enif_make_list_cell(bucket_env, enif_make_copy(bucket_env, copy_val), new_tpl[pos - 1]); | |||
} | |||
enif_get_list_length(bucket_env, new_tpl[pos - 1], &new_length); | |||
ret = enif_make_list_cell(env, enif_make_uint(env, new_length), ret); | |||
} | |||
tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity)); | |||
bailout: | |||
enif_free(new_tpl); | |||
} else { | |||
ret = enif_make_badarg(env); | |||
} | |||
tb->rwunlock(entry_key); | |||
return ret; | |||
} | |||
ERL_NIF_TERM NeuralTable::Shift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) { | |||
NeuralTable *tb; | |||
ERL_NIF_TERM ret, old, it; | |||
unsigned long int entry_key; | |||
ErlNifEnv *bucket_env; | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { | |||
return enif_make_badarg(env); | |||
} | |||
enif_get_ulong(env, key, &entry_key); | |||
tb->rwlock(entry_key); | |||
bucket_env = tb->get_env(entry_key); | |||
if (tb->find(entry_key, old)) { | |||
const ERL_NIF_TERM *old_tpl; | |||
const ERL_NIF_TERM *op_tpl; | |||
ERL_NIF_TERM *new_tpl; | |||
int tb_arity = 0, | |||
op_arity = 0; | |||
unsigned long pos = 0, | |||
count = 0; | |||
ERL_NIF_TERM op, list, shifted, reclaim; | |||
enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl); | |||
new_tpl = (ERL_NIF_TERM*)enif_alloc(tb_arity * sizeof(ERL_NIF_TERM)); | |||
memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity); | |||
it = ops; | |||
ret = enif_make_list(env, 0); | |||
reclaim = enif_make_list(bucket_env, 0); | |||
while(!enif_is_empty_list(env, it)) { | |||
enif_get_list_cell(env, it, &op, &it); | |||
enif_get_tuple(env, op, &op_arity, &op_tpl); | |||
enif_get_ulong(env, op_tpl[0], &pos); | |||
enif_get_ulong(env, op_tpl[1], &count); | |||
if (pos <= 0 || pos > tb_arity) { | |||
ret = enif_make_badarg(env); | |||
goto bailout; | |||
} | |||
if (!enif_is_list(env, new_tpl[pos -1])) { | |||
ret = enif_make_badarg(env); | |||
goto bailout; | |||
} | |||
shifted = enif_make_list(env, 0); | |||
if (count > 0) { | |||
ERL_NIF_TERM copy_it = new_tpl[pos - 1], | |||
val; | |||
int i = 0; | |||
while (i < count && !enif_is_empty_list(bucket_env, copy_it)) { | |||
enif_get_list_cell(bucket_env, copy_it, &val, ©_it); | |||
++i; | |||
shifted = enif_make_list_cell(env, enif_make_copy(env, val), shifted); | |||
reclaim = enif_make_list_cell(env, val, reclaim); | |||
} | |||
new_tpl[pos - 1] = copy_it; | |||
} else if (count < 0) { | |||
ERL_NIF_TERM copy_it = new_tpl[pos - 1], | |||
val; | |||
while (!enif_is_empty_list(bucket_env, copy_it)) { | |||
enif_get_list_cell(bucket_env, copy_it, &val, ©_it); | |||
shifted = enif_make_list_cell(env, enif_make_copy(env, val), shifted); | |||
reclaim = enif_make_list_cell(env, val, reclaim); | |||
} | |||
new_tpl[pos - 1] = copy_it; | |||
} | |||
ret = enif_make_list_cell(env, shifted, ret); | |||
} | |||
tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity)); | |||
tb->reclaim(entry_key, reclaim); | |||
bailout: | |||
enif_free(new_tpl); | |||
} else { | |||
ret = enif_make_badarg(env); | |||
} | |||
tb->rwunlock(entry_key); | |||
return ret; | |||
} | |||
ERL_NIF_TERM NeuralTable::Swap(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) { | |||
NeuralTable *tb; | |||
ERL_NIF_TERM ret, old, it; | |||
unsigned long int entry_key; | |||
ErlNifEnv *bucket_env; | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { | |||
return enif_make_badarg(env); | |||
} | |||
enif_get_ulong(env, key, &entry_key); | |||
tb->rwlock(entry_key); | |||
bucket_env = tb->get_env(entry_key); | |||
if (tb->find(entry_key, old)) { | |||
const ERL_NIF_TERM *old_tpl; | |||
const ERL_NIF_TERM *op_tpl; | |||
ERL_NIF_TERM *new_tpl; | |||
int tb_arity = 0, | |||
op_arity = 0; | |||
unsigned long pos = 0; | |||
ERL_NIF_TERM op, list, shifted, reclaim; | |||
enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl); | |||
new_tpl = (ERL_NIF_TERM*)enif_alloc(tb_arity * sizeof(ERL_NIF_TERM)); | |||
memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity); | |||
it = ops; | |||
ret = enif_make_list(env, 0); | |||
reclaim = enif_make_list(bucket_env, 0); | |||
while (!enif_is_empty_list(env, it)) { | |||
enif_get_list_cell(env, it, &op, &it); | |||
enif_get_tuple(env, op, &op_arity, &op_tpl); | |||
enif_get_ulong(env, op_tpl[0], &pos); | |||
if (pos <= 0 || pos > tb_arity) { | |||
ret = enif_make_badarg(env); | |||
goto bailout; | |||
} | |||
reclaim = enif_make_list_cell(bucket_env, new_tpl[pos - 1], reclaim); | |||
ret = enif_make_list_cell(env, enif_make_copy(env, new_tpl[pos -1]), ret); | |||
new_tpl[pos - 1] = enif_make_copy(bucket_env, op_tpl[1]); | |||
} | |||
tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity)); | |||
tb->reclaim(entry_key, reclaim); | |||
bailout: | |||
enif_free(new_tpl); | |||
} else { | |||
ret = enif_make_badarg(env); | |||
} | |||
tb->rwunlock(entry_key); | |||
return ret; | |||
} | |||
ERL_NIF_TERM NeuralTable::Delete(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key) { | |||
NeuralTable *tb; | |||
ERL_NIF_TERM val, ret; | |||
unsigned long int entry_key; | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { return enif_make_badarg(env); } | |||
enif_get_ulong(env, key, &entry_key); | |||
tb->rwlock(entry_key); | |||
if (tb->erase(entry_key, val)) { | |||
tb->reclaim(entry_key, val); | |||
ret = enif_make_copy(env, val); | |||
} else { | |||
ret = enif_make_atom(env, "undefined"); | |||
} | |||
tb->rwunlock(entry_key); | |||
return ret; | |||
} | |||
ERL_NIF_TERM NeuralTable::Empty(ErlNifEnv *env, ERL_NIF_TERM table) { | |||
NeuralTable *tb; | |||
int n = 0; | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { return enif_make_badarg(env); } | |||
// First, lock EVERY bucket. We want this to be an isolated operation. | |||
for (n = 0; n < BUCKET_COUNT; ++n) { | |||
enif_rwlock_rwlock(tb->locks[n]); | |||
} | |||
// Now clear the table | |||
for (n = 0; n < BUCKET_COUNT; ++n) { | |||
tb->hash_buckets[n].clear(); | |||
enif_clear_env(tb->env_buckets[n]); | |||
tb->garbage_cans[n] = 0; | |||
tb->reclaimable[n] = enif_make_list(tb->env_buckets[n], 0); | |||
} | |||
// Now unlock every bucket. | |||
for (n = 0; n < BUCKET_COUNT; ++n) { | |||
enif_rwlock_rwunlock(tb->locks[n]); | |||
} | |||
return enif_make_atom(env, "ok"); | |||
} | |||
ERL_NIF_TERM NeuralTable::Get(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key) { | |||
NeuralTable *tb; | |||
ERL_NIF_TERM ret, val; | |||
unsigned long int entry_key; | |||
// Acquire table handle, or quit if the table doesn't exist. | |||
tb = GetTable(env, table); | |||
if (tb == NULL) { return enif_make_badarg(env); } | |||
// Get key value | |||
enif_get_ulong(env, key, &entry_key); | |||
// Lock the key | |||
tb->rlock(entry_key); | |||
// Read current value | |||
if (!tb->find(entry_key, val)) { | |||
ret = enif_make_atom(env, "undefined"); | |||
} else { | |||
ret = enif_make_copy(env, val); | |||
} | |||
tb->runlock(entry_key); | |||
return ret; | |||
} | |||
ERL_NIF_TERM NeuralTable::Dump(ErlNifEnv *env, ERL_NIF_TERM table) { | |||
NeuralTable *tb = GetTable(env, table); | |||
ErlNifPid self; | |||
ERL_NIF_TERM ret; | |||
if (tb == NULL) { return enif_make_badarg(env); } | |||
enif_self(env, &self); | |||
tb->add_batch_job(self, &NeuralTable::batch_dump); | |||
return enif_make_atom(env, "$neural_batch_wait"); | |||
} | |||
ERL_NIF_TERM NeuralTable::Drain(ErlNifEnv *env, ERL_NIF_TERM table) { | |||
NeuralTable *tb = GetTable(env, table); | |||
ErlNifPid self; | |||
int ret; | |||
if (tb == NULL) { return enif_make_badarg(env); } | |||
enif_self(env, &self); | |||
tb->add_batch_job(self, &NeuralTable::batch_drain); | |||
return enif_make_atom(env, "$neural_batch_wait"); | |||
} | |||
ERL_NIF_TERM NeuralTable::GetKeyPosition(ErlNifEnv *env, ERL_NIF_TERM table) { | |||
NeuralTable *tb = GetTable(env, table); | |||
if (tb == NULL) { return enif_make_badarg(env); } | |||
return enif_make_uint(env, tb->key_pos); | |||
} | |||
ERL_NIF_TERM NeuralTable::GarbageCollect(ErlNifEnv *env, ERL_NIF_TERM table) { | |||
NeuralTable *tb = GetTable(env, table); | |||
if (tb == NULL) { return enif_make_badarg(env); } | |||
enif_cond_signal(tb->gc_cond); | |||
return enif_make_atom(env, "ok"); | |||
} | |||
ERL_NIF_TERM NeuralTable::GarbageSize(ErlNifEnv *env, ERL_NIF_TERM table) { | |||
NeuralTable *tb = GetTable(env, table); | |||
unsigned long int size = 0; | |||
if (tb == NULL) { return enif_make_badarg(env); } | |||
size = tb->garbage_size(); | |||
return enif_make_ulong(env, size); | |||
} | |||
void* NeuralTable::DoGarbageCollection(void *table) { | |||
NeuralTable *tb = (NeuralTable*)table; | |||
enif_mutex_lock(tb->gc_mutex); | |||
while (running.load(memory_order_acquire)) { | |||
while (running.load(memory_order_acquire) && tb->garbage_size() < RECLAIM_THRESHOLD) { | |||
enif_cond_wait(tb->gc_cond, tb->gc_mutex); | |||
} | |||
tb->gc(); | |||
} | |||
enif_mutex_unlock(tb->gc_mutex); | |||
return NULL; | |||
} | |||
void* NeuralTable::DoReclamation(void *table) { | |||
const int max_eat = 5; | |||
NeuralTable *tb = (NeuralTable*)table; | |||
int i = 0, c = 0, t = 0;; | |||
ERL_NIF_TERM tl, hd; | |||
ErlNifEnv *env; | |||
while (running.load(memory_order_acquire)) { | |||
for (i = 0; i < BUCKET_COUNT; ++i) { | |||
c = 0; | |||
t = 0; | |||
tb->rwlock(i); | |||
env = tb->get_env(i); | |||
tl = tb->reclaimable[i]; | |||
while (c++ < max_eat && !enif_is_empty_list(env, tl)) { | |||
enif_get_list_cell(env, tl, &hd, &tl); | |||
tb->garbage_cans[i] += estimate_size(env, hd); | |||
t += tb->garbage_cans[i]; | |||
} | |||
tb->rwunlock(i); | |||
if (t >= RECLAIM_THRESHOLD) { | |||
enif_cond_signal(tb->gc_cond); | |||
} | |||
} | |||
#ifdef _WIN32 | |||
Sleep(50); | |||
#else | |||
usleep(50000); | |||
#endif | |||
} | |||
return NULL; | |||
} | |||
void* NeuralTable::DoBatchOperations(void *table) { | |||
NeuralTable *tb = (NeuralTable*)table; | |||
enif_mutex_lock(tb->batch_mutex); | |||
while (running.load(memory_order_acquire)) { | |||
while (running.load(memory_order_acquire) && tb->batch_jobs.empty()) { | |||
enif_cond_wait(tb->batch_cond, tb->batch_mutex); | |||
} | |||
BatchJob job = tb->batch_jobs.front(); | |||
(tb->*job.fun)(job.pid); | |||
tb->batch_jobs.pop(); | |||
} | |||
enif_mutex_unlock(tb->batch_mutex); | |||
return NULL; | |||
} | |||
void NeuralTable::start_gc() { | |||
int ret; | |||
gc_mutex = enif_mutex_create("neural_table_gc"); | |||
gc_cond = enif_cond_create("neural_table_gc"); | |||
ret = enif_thread_create("neural_garbage_collector", &gc_tid, NeuralTable::DoGarbageCollection, (void*)this, NULL); | |||
if (ret != 0) { | |||
printf("[neural_gc] Can't create GC thread. Error Code: %d\r\n", ret); | |||
} | |||
// Start the reclaimer after the garbage collector. | |||
ret = enif_thread_create("neural_reclaimer", &rc_tid, NeuralTable::DoReclamation, (void*)this, NULL); | |||
if (ret != 0) { | |||
printf("[neural_gc] Can't create reclamation thread. Error Code: %d\r\n", ret); | |||
} | |||
} | |||
void NeuralTable::stop_gc() { | |||
enif_cond_signal(gc_cond); | |||
// Join the reclaimer before the garbage collector. | |||
enif_thread_join(rc_tid, NULL); | |||
enif_thread_join(gc_tid, NULL); | |||
} | |||
void NeuralTable::start_batch() { | |||
int ret; | |||
batch_mutex = enif_mutex_create("neural_table_batch"); | |||
batch_cond = enif_cond_create("neural_table_batch"); | |||
ret = enif_thread_create("neural_batcher", &batch_tid, NeuralTable::DoBatchOperations, (void*)this, NULL); | |||
if (ret != 0) { | |||
printf("[neural_batch] Can't create batch thread. Error Code: %d\r\n", ret); | |||
} | |||
} | |||
void NeuralTable::stop_batch() { | |||
enif_cond_signal(batch_cond); | |||
enif_thread_join(batch_tid, NULL); | |||
} | |||
void NeuralTable::put(unsigned long int key, ERL_NIF_TERM tuple) { | |||
ErlNifEnv *env = get_env(key); | |||
hash_buckets[GET_BUCKET(key)][key] = enif_make_copy(env, tuple); | |||
} | |||
ErlNifEnv* NeuralTable::get_env(unsigned long int key) { | |||
return env_buckets[GET_BUCKET(key)]; | |||
} | |||
bool NeuralTable::find(unsigned long int key, ERL_NIF_TERM &ret) { | |||
hash_table *bucket = &hash_buckets[GET_BUCKET(key)]; | |||
hash_table::iterator it = bucket->find(key); | |||
if (bucket->end() == it) { | |||
return false; | |||
} else { | |||
ret = it->second; | |||
return true; | |||
} | |||
} | |||
bool NeuralTable::erase(unsigned long int key, ERL_NIF_TERM &val) { | |||
hash_table *bucket = &hash_buckets[GET_BUCKET(key)]; | |||
hash_table::iterator it = bucket->find(key); | |||
bool ret = false; | |||
if (it != bucket->end()) { | |||
ret = true; | |||
val = it->second; | |||
bucket->erase(it); | |||
} | |||
return ret; | |||
} | |||
void NeuralTable::add_batch_job(ErlNifPid pid, BatchFunction fun) { | |||
BatchJob job; | |||
job.pid = pid; | |||
job.fun = fun; | |||
enif_mutex_lock(batch_mutex); | |||
batch_jobs.push(job); | |||
enif_mutex_unlock(batch_mutex); | |||
enif_cond_signal(batch_cond); | |||
} | |||
void NeuralTable::batch_drain(ErlNifPid pid) { | |||
ErlNifEnv *env = enif_alloc_env(); | |||
ERL_NIF_TERM msg, value; | |||
value = enif_make_list(env, 0); | |||
for (int i = 0; i < BUCKET_COUNT; ++i) { | |||
enif_rwlock_rwlock(locks[i]); | |||
for (hash_table::iterator it = hash_buckets[i].begin(); it != hash_buckets[i].end(); ++it) { | |||
value = enif_make_list_cell(env, enif_make_copy(env, it->second), value); | |||
} | |||
enif_clear_env(env_buckets[i]); | |||
hash_buckets[i].clear(); | |||
garbage_cans[i] = 0; | |||
reclaimable[i] = enif_make_list(env_buckets[i], 0); | |||
enif_rwlock_rwunlock(locks[i]); | |||
} | |||
msg = enif_make_tuple2(env, enif_make_atom(env, "$neural_batch_response"), value); | |||
enif_send(NULL, &pid, env, msg); | |||
enif_free_env(env); | |||
} | |||
void NeuralTable::batch_dump(ErlNifPid pid) { | |||
ErlNifEnv *env = enif_alloc_env(); | |||
ERL_NIF_TERM msg, value; | |||
value = enif_make_list(env, 0); | |||
for (int i = 0; i < BUCKET_COUNT; ++i) { | |||
enif_rwlock_rlock(locks[i]); | |||
for (hash_table::iterator it = hash_buckets[i].begin(); it != hash_buckets[i].end(); ++it) { | |||
value = enif_make_list_cell(env, enif_make_copy(env, it->second), value); | |||
} | |||
enif_rwlock_runlock(locks[i]); | |||
} | |||
msg = enif_make_tuple2(env, enif_make_atom(env, "$neural_batch_response"), value); | |||
enif_send(NULL, &pid, env, msg); | |||
enif_free_env(env); | |||
} | |||
void NeuralTable::reclaim(unsigned long int key, ERL_NIF_TERM term) { | |||
int bucket = GET_BUCKET(key); | |||
ErlNifEnv *env = get_env(key); | |||
reclaimable[bucket] = enif_make_list_cell(env, term, reclaimable[bucket]); | |||
} | |||
void NeuralTable::gc() { | |||
ErlNifEnv *fresh = NULL, | |||
*old = NULL; | |||
hash_table *bucket = NULL; | |||
hash_table::iterator it; | |||
unsigned int gc_curr = 0; | |||
for (; gc_curr < BUCKET_COUNT; ++gc_curr) { | |||
bucket = &hash_buckets[gc_curr]; | |||
old = env_buckets[gc_curr]; | |||
fresh = enif_alloc_env(); | |||
enif_rwlock_rwlock(locks[gc_curr]); | |||
for (it = bucket->begin(); it != bucket->end(); ++it) { | |||
it->second = enif_make_copy(fresh, it->second); | |||
} | |||
garbage_cans[gc_curr] = 0; | |||
env_buckets[gc_curr] = fresh; | |||
reclaimable[gc_curr] = enif_make_list(fresh, 0); | |||
enif_free_env(old); | |||
enif_rwlock_rwunlock(locks[gc_curr]); | |||
} | |||
} | |||
unsigned long int NeuralTable::garbage_size() { | |||
unsigned long int size = 0; | |||
for (int i = 0; i < BUCKET_COUNT; ++i) { | |||
enif_rwlock_rlock(locks[i]); | |||
size += garbage_cans[i]; | |||
enif_rwlock_runlock(locks[i]); | |||
} | |||
return size; | |||
} |
@ -1,121 +0,0 @@ | |||
#ifndef NEURALTABLE_H | |||
#define NEURALTABLE_H | |||
#include "erl_nif.h" | |||
#include "neural_utils.h" | |||
#include <string> | |||
#include <stdio.h> | |||
#include <string.h> | |||
#include <unordered_map> | |||
#include <queue> | |||
#include <atomic> | |||
#ifdef _WIN32 | |||
#include <windows.h> | |||
#include <io.h> | |||
#include <process.h> | |||
#else | |||
#include <unistd.h> | |||
#endif | |||
#define BUCKET_COUNT 64 | |||
#define BUCKET_MASK (BUCKET_COUNT - 1) | |||
#define GET_BUCKET(key) key & BUCKET_MASK | |||
#define GET_LOCK(key) key & BUCKET_MASK | |||
#define RECLAIM_THRESHOLD 1048576 | |||
using namespace std; | |||
class NeuralTable; | |||
typedef unordered_map<string, NeuralTable*> table_set; | |||
typedef unordered_map<unsigned long int, ERL_NIF_TERM> hash_table; | |||
typedef void (NeuralTable::*BatchFunction)(ErlNifPid pid); | |||
class NeuralTable { | |||
public: | |||
static ERL_NIF_TERM MakeTable(ErlNifEnv *env, ERL_NIF_TERM name, ERL_NIF_TERM keypos); | |||
static ERL_NIF_TERM Insert(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object); | |||
static ERL_NIF_TERM InsertNew(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object); | |||
static ERL_NIF_TERM Delete(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key); | |||
static ERL_NIF_TERM Empty(ErlNifEnv *env, ERL_NIF_TERM table); | |||
static ERL_NIF_TERM Get(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key); | |||
static ERL_NIF_TERM Increment(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops); | |||
static ERL_NIF_TERM Shift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops); | |||
static ERL_NIF_TERM Unshift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops); | |||
static ERL_NIF_TERM Swap(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops); | |||
static ERL_NIF_TERM Dump(ErlNifEnv *env, ERL_NIF_TERM table); | |||
static ERL_NIF_TERM Drain(ErlNifEnv *env, ERL_NIF_TERM table); | |||
static ERL_NIF_TERM GetKeyPosition(ErlNifEnv *env, ERL_NIF_TERM table); | |||
static ERL_NIF_TERM GarbageCollect(ErlNifEnv *env, ERL_NIF_TERM table); | |||
static ERL_NIF_TERM GarbageSize(ErlNifEnv *env, ERL_NIF_TERM table); | |||
static NeuralTable* GetTable(ErlNifEnv *env, ERL_NIF_TERM name); | |||
static void* DoGarbageCollection(void *table); | |||
static void* DoBatchOperations(void *table); | |||
static void* DoReclamation(void *table); | |||
static void Initialize() { | |||
table_mutex = enif_mutex_create("neural_table_maker"); | |||
} | |||
static void Shutdown() { | |||
running = false; | |||
table_set::iterator it(tables.begin()); | |||
while (it != tables.end()) { | |||
delete it->second; | |||
tables.erase(it); | |||
it = tables.begin(); | |||
} | |||
enif_mutex_destroy(table_mutex); | |||
} | |||
void rlock(unsigned long int key) { enif_rwlock_rlock(locks[GET_LOCK(key)]); } | |||
void runlock(unsigned long int key) { enif_rwlock_runlock(locks[GET_LOCK(key)]); } | |||
void rwlock(unsigned long int key) { enif_rwlock_rwlock(locks[GET_LOCK(key)]); } | |||
void rwunlock(unsigned long int key) { enif_rwlock_rwunlock(locks[GET_LOCK(key)]); } | |||
ErlNifEnv *get_env(unsigned long int key); | |||
bool erase(unsigned long int key, ERL_NIF_TERM &ret); | |||
bool find(unsigned long int key, ERL_NIF_TERM &ret); | |||
void put(unsigned long int key, ERL_NIF_TERM tuple); | |||
void batch_dump(ErlNifPid pid); | |||
void batch_drain(ErlNifPid pid); | |||
void start_gc(); | |||
void stop_gc(); | |||
void start_batch(); | |||
void stop_batch(); | |||
void gc(); | |||
void reclaim(unsigned long int key, ERL_NIF_TERM reclaim); | |||
unsigned long int garbage_size(); | |||
void add_batch_job(ErlNifPid pid, BatchFunction fun); | |||
protected: | |||
static table_set tables; | |||
static atomic<bool> running; | |||
static ErlNifMutex *table_mutex; | |||
struct BatchJob { | |||
ErlNifPid pid; | |||
BatchFunction fun; | |||
}; | |||
NeuralTable(unsigned int kp); | |||
~NeuralTable(); | |||
unsigned int garbage_cans[BUCKET_COUNT]; | |||
hash_table hash_buckets[BUCKET_COUNT]; | |||
ErlNifEnv *env_buckets[BUCKET_COUNT]; | |||
ERL_NIF_TERM reclaimable[BUCKET_COUNT]; | |||
ErlNifRWLock *locks[BUCKET_COUNT]; | |||
ErlNifCond *gc_cond; | |||
ErlNifMutex *gc_mutex; | |||
ErlNifTid gc_tid; | |||
ErlNifTid rc_tid; | |||
ErlNifCond *batch_cond; | |||
ErlNifMutex *batch_mutex; | |||
queue<BatchJob> batch_jobs; | |||
ErlNifTid batch_tid; | |||
unsigned int key_pos; | |||
}; | |||
#endif |
@ -1,134 +0,0 @@ | |||
#include "erl_nif.h" | |||
#include "NeuralTable.h" | |||
#include <stdio.h> | |||
// Prototypes | |||
static ERL_NIF_TERM neural_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_put(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_put_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_increment(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_unshift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_shift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_swap(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_get(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_delete(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_garbage(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_garbage_size(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_empty(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_drain(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_dump(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM neural_key_pos(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); | |||
static ErlNifFunc nif_funcs[] = | |||
{ | |||
{"make_table", 2, neural_new}, | |||
{"do_fetch", 2, neural_get}, | |||
{"do_delete", 2, neural_delete}, | |||
{"do_dump", 1, neural_dump}, | |||
{"do_drain", 1, neural_drain}, | |||
{"empty", 1, neural_empty}, | |||
{"insert", 3, neural_put}, | |||
{"insert_new", 3, neural_put_new}, | |||
{"do_increment", 3, neural_increment}, | |||
{"do_unshift", 3, neural_unshift}, | |||
{"do_shift", 3, neural_shift}, | |||
{"do_swap", 3, neural_swap}, | |||
{"garbage", 1, neural_garbage}, | |||
{"garbage_size", 1, neural_garbage_size}, | |||
{"key_pos", 1, neural_key_pos} | |||
}; | |||
static ERL_NIF_TERM neural_key_pos(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
// This function is directly exposed, so no strict guards or patterns protecting us. | |||
if (argc != 1 || !enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } | |||
return NeuralTable::GetKeyPosition(env, argv[0]); | |||
} | |||
static ERL_NIF_TERM neural_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
return NeuralTable::MakeTable(env, argv[0], argv[1]); | |||
} | |||
static ERL_NIF_TERM neural_put(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
return NeuralTable::Insert(env, argv[0], argv[1], argv[2]); | |||
} | |||
static ERL_NIF_TERM neural_put_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
return NeuralTable::InsertNew(env, argv[0], argv[1], argv[2]); | |||
} | |||
static ERL_NIF_TERM neural_increment(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
if (!enif_is_atom(env, argv[0]) || !enif_is_number(env, argv[1]) || !enif_is_list(env, argv[2])) { | |||
return enif_make_badarg(env); | |||
} | |||
return NeuralTable::Increment(env, argv[0], argv[1], argv[2]); | |||
} | |||
static ERL_NIF_TERM neural_shift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
return NeuralTable::Shift(env, argv[0], argv[1], argv[2]); | |||
} | |||
static ERL_NIF_TERM neural_unshift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
return NeuralTable::Unshift(env, argv[0], argv[1], argv[2]); | |||
} | |||
static ERL_NIF_TERM neural_swap(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]){ | |||
return NeuralTable::Swap(env, argv[0], argv[1], argv[2]); | |||
} | |||
static ERL_NIF_TERM neural_get(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
return NeuralTable::Get(env, argv[0], argv[1]); | |||
} | |||
static ERL_NIF_TERM neural_delete(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
return NeuralTable::Delete(env, argv[0], argv[1]); | |||
} | |||
static ERL_NIF_TERM neural_empty(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } | |||
return NeuralTable::Empty(env, argv[0]); | |||
} | |||
static ERL_NIF_TERM neural_dump(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } | |||
return NeuralTable::Dump(env, argv[0]); | |||
} | |||
static ERL_NIF_TERM neural_drain(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } | |||
return NeuralTable::Drain(env, argv[0]); | |||
} | |||
static ERL_NIF_TERM neural_garbage(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } | |||
return NeuralTable::GarbageCollect(env, argv[0]); | |||
} | |||
static ERL_NIF_TERM neural_garbage_size(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |||
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } | |||
return NeuralTable::GarbageSize(env, argv[0]); | |||
} | |||
static void neural_resource_cleanup(ErlNifEnv* env, void* arg) | |||
{ | |||
/* Delete any dynamically allocated memory stored in neural_handle */ | |||
/* neural_handle* handle = (neural_handle*)arg; */ | |||
} | |||
static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) | |||
{ | |||
NeuralTable::Initialize(); | |||
return 0; | |||
} | |||
static void on_unload(ErlNifEnv *env, void *priv_data) { | |||
NeuralTable::Shutdown(); | |||
} | |||
ERL_NIF_INIT(neural, nif_funcs, &on_load, NULL, NULL, &on_unload); |
@ -1,46 +0,0 @@ | |||
#include "neural_utils.h" | |||
unsigned long int estimate_size(ErlNifEnv *env, ERL_NIF_TERM term) { | |||
if (enif_is_atom(env, term)) { | |||
return WORD_SIZE; | |||
} | |||
// Treating all numbers like longs. | |||
if (enif_is_number(env, term)) { | |||
return 2 * WORD_SIZE; | |||
} | |||
if (enif_is_binary(env, term)) { | |||
ErlNifBinary bin; | |||
enif_inspect_binary(env, term, &bin); | |||
return bin.size + (6 * WORD_SIZE); | |||
} | |||
if (enif_is_list(env, term)) { | |||
unsigned long int size = 0; | |||
ERL_NIF_TERM it, curr; | |||
it = term; | |||
size += WORD_SIZE; | |||
while (!enif_is_empty_list(env, it)) { | |||
enif_get_list_cell(env, it, &curr, &it); | |||
size += estimate_size(env, curr) + WORD_SIZE; | |||
} | |||
return size; | |||
} | |||
if (enif_is_tuple(env, term)) { | |||
unsigned long int size = 0; | |||
const ERL_NIF_TERM *tpl; | |||
int arity; | |||
enif_get_tuple(env, term, &arity, &tpl); | |||
for (int i = 0; i < arity; ++i) { | |||
size += estimate_size(env, tpl[i]); | |||
} | |||
return size; | |||
} | |||
// Return 1 word by default | |||
return WORD_SIZE; | |||
} | |||
@ -1,9 +0,0 @@ | |||
#ifndef NEURAL_UTILS_H | |||
#define NEURAL_UTILS_H | |||
#include "erl_nif.h" | |||
#define WORD_SIZE sizeof(int) | |||
unsigned long int estimate_size(ErlNifEnv *env, ERL_NIF_TERM term); | |||
#endif |
@ -1,14 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/neural.so", ["*.cpp"]} | |||
]}. | |||
{port_env, [ | |||
{".*", "CXXFLAGS", "$CXXFLAGS -std=c++11 -O3"}, | |||
{".*", "LDFLAGS", "$LDFLAGS -lstdc++ -shared"} | |||
]}. | |||
@ -1,111 +0,0 @@ | |||
#include "erl_nif.h" | |||
#include <stdio.h> | |||
#include <string.h> | |||
static ErlNifResourceType* test_RESOURCE = NULL; | |||
// Prototypes | |||
static ERL_NIF_TERM get_bin_address(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM new_array(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM size_array(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM put_array(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]); | |||
static ERL_NIF_TERM get_array(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]); | |||
static ErlNifFunc nif_funcs[] = | |||
{ | |||
{"get_bin_address", 1, get_bin_address}, | |||
{"new_array", 1, new_array}, | |||
{"size_array", 1, size_array}, | |||
{"put_array", 3, put_array}, | |||
{"get_array", 2, get_array} | |||
}; | |||
static ERL_NIF_TERM get_bin_address(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
enif_inspect_binary(env, argv[0], &bin); | |||
char buf[256]; | |||
sprintf(buf, "bin: size=%zu, ptr=%p", bin.size, bin.data); | |||
return enif_make_string(env, buf, ERL_NIF_LATIN1); | |||
} | |||
static ERL_NIF_TERM new_array(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
unsigned long size; | |||
// unsigned char* data; | |||
enif_get_ulong(env, argv[0], &size); | |||
// enif_inspect_binary(env, argv[1], &bin); | |||
enif_alloc_binary(size * sizeof(long), &bin); | |||
return enif_make_binary(env, &bin); | |||
} | |||
static ERL_NIF_TERM size_array(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
enif_inspect_binary(env, argv[0], &bin); | |||
return enif_make_int64(env, bin.size); | |||
} | |||
static ERL_NIF_TERM put_array(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
unsigned long* array; | |||
unsigned long pos, value; | |||
enif_get_ulong(env, argv[0], &pos); | |||
enif_get_ulong(env, argv[1], &value); | |||
enif_inspect_binary(env, argv[2], &bin); | |||
array = (unsigned long*)bin.data; | |||
array[pos] = value; | |||
return enif_make_atom(env, "ok"); | |||
} | |||
static ERL_NIF_TERM get_array(ErlNifEnv* env, int argc, | |||
const ERL_NIF_TERM argv[]) | |||
{ | |||
ErlNifBinary bin; | |||
unsigned long* array; | |||
unsigned long pos; | |||
enif_get_ulong(env, argv[0], &pos); | |||
enif_inspect_binary(env, argv[1], &bin); | |||
array = (unsigned long*)bin.data; | |||
return enif_make_int64(env, *(array + pos)); | |||
} | |||
static void test_resource_cleanup(ErlNifEnv* env, void* arg) | |||
{ | |||
/* Delete any dynamically allocated memory stored in test_handle */ | |||
/* test_handle* handle = (test_handle*)arg; */ | |||
} | |||
static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) | |||
{ | |||
ErlNifResourceFlags flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER; | |||
ErlNifResourceType* rt = enif_open_resource_type(env, NULL, | |||
"test_resource", | |||
&test_resource_cleanup, | |||
flags, NULL); | |||
if (rt == NULL) | |||
return -1; | |||
test_RESOURCE = rt; | |||
return 0; | |||
} | |||
ERL_NIF_INIT(binary_tools, nif_funcs, &on_load, NULL, NULL, NULL); |
@ -1,13 +0,0 @@ | |||
{port_specs, [ | |||
{"../../priv/binary_tools.so", ["*.c"]} | |||
]}. | |||
{port_env, [ | |||
{"linux", "CFLAGS", "$CFLAGS -Wall -O2 -Wpointer-sign"} | |||
]}. | |||
@ -1,20 +0,0 @@ | |||
-module(bitmap_filter). | |||
-export([init/0, filter/1]). | |||
-on_load(init/0). | |||
init() -> | |||
PrivDir = case code:priv_dir(?MODULE) of | |||
{error, _} -> | |||
EbinDir = filename:dirname(code:which(?MODULE)), | |||
AppPath = filename:dirname(EbinDir), | |||
filename:join(AppPath, "priv"); | |||
Path -> | |||
Path | |||
end, | |||
erlang:load_nif(filename:join(PrivDir, "bitmap_filter"), 0). | |||
% Hack - overriden by init, which is called in on_load. | |||
% I couldn't find another way that the compiler or code load didn't complain about. | |||
filter(DefaultArgs) -> | |||
DefaultArgs. |