瀏覽代碼

some nif sample

master
SisMaker 5 年之前
父節點
當前提交
706eaa1289
共有 78 個檔案被更改,包括 15255 行新增1940 行删除
  1. +4
    -1
      .gitignore
  2. +442
    -0
      c_src/.enq/enq_nif.c
  3. +71
    -0
      c_src/.enq/fifo.h
  4. +63
    -0
      c_src/.enq/lifo.h
  5. +12
    -0
      c_src/.enq/rebar.config
  6. +80
    -0
      c_src/bitmap_filter/bitmap_filter.c
  7. +3
    -3
      c_src/bitmap_filter/rebar.config
  8. +448
    -0
      c_src/bsn/bsn_ext.c
  9. +331
    -0
      c_src/bsn/bsn_int.c
  10. +448
    -0
      c_src/bsn/c_src/bsn_ext.c
  11. +331
    -0
      c_src/bsn/c_src/bsn_int.c
  12. +7
    -4
      c_src/bsn/rebar.config
  13. +318
    -0
      c_src/couchdb_hqueue/c_src/hqueue.c
  14. +5
    -0
      c_src/couchdb_hqueue/c_src/hqueue.d
  15. +60
    -0
      c_src/couchdb_hqueue/c_src/hqueue.h
  16. +601
    -0
      c_src/couchdb_hqueue/c_src/hqueue_nif.c
  17. +5
    -0
      c_src/couchdb_hqueue/c_src/hqueue_nif.d
  18. +72
    -0
      c_src/couchdb_hqueue/c_src/valgrind_sample.c
  19. +318
    -0
      c_src/couchdb_hqueue/hqueue.c
  20. +60
    -0
      c_src/couchdb_hqueue/hqueue.h
  21. +601
    -0
      c_src/couchdb_hqueue/hqueue_nif.c
  22. +13
    -0
      c_src/couchdb_hqueue/rebar.config
  23. +72
    -0
      c_src/couchdb_hqueue/valgrind_sample.c
  24. +0
    -564
      c_src/cq/cq_nif.c
  25. +0
    -71
      c_src/cq/cq_nif.h
  26. +0
    -564
      c_src/cq1/cq_nif.c
  27. +0
    -71
      c_src/cq1/cq_nif.h
  28. +0
    -26
      c_src/cq1/rebar.config
  29. +0
    -564
      c_src/cq2/cq_nif.c
  30. +0
    -71
      c_src/cq2/cq_nif.h
  31. +80
    -0
      c_src/enlfq/Makefile
  32. +3637
    -0
      c_src/enlfq/concurrentqueue.h
  33. +84
    -0
      c_src/enlfq/enlfq.cc
  34. +10
    -0
      c_src/enlfq/enlfq.h
  35. +57
    -0
      c_src/enlfq/enlfq_nif.cc
  36. +19
    -0
      c_src/enlfq/enlfq_nif.h
  37. +27
    -0
      c_src/enlfq/nif_utils.cc
  38. +6
    -0
      c_src/enlfq/nif_utils.h
  39. +7
    -0
      c_src/enlfq/rebar.config
  40. +172
    -0
      c_src/etsq/etsq.cpp
  41. +130
    -0
      c_src/etsq/etsq.h
  42. +7
    -0
      c_src/etsq/rebar.config
  43. +103
    -0
      c_src/gb_lru/binary.h
  44. +2394
    -0
      c_src/gb_lru/btree.h
  45. +349
    -0
      c_src/gb_lru/btree_container.h
  46. +130
    -0
      c_src/gb_lru/btree_map.h
  47. +619
    -0
      c_src/gb_lru/btreelru_nif.cpp
  48. +71
    -0
      c_src/gb_lru/erlterm.h
  49. +266
    -0
      c_src/gb_lru/lru.h
  50. +73
    -0
      c_src/gb_lru/murmurhash2.h
  51. +7
    -0
      c_src/gb_lru/rebar.config
  52. +90
    -0
      c_src/native_array/native_array_nif.c
  53. +7
    -0
      c_src/native_array/rebar.config
  54. +905
    -0
      c_src/neural/NeuralTable.cpp
  55. +121
    -0
      c_src/neural/NeuralTable.h
  56. +134
    -0
      c_src/neural/neural.cpp
  57. +46
    -0
      c_src/neural/neural_utils.cpp
  58. +9
    -0
      c_src/neural/neural_utils.h
  59. +14
    -0
      c_src/neural/rebar.config
  60. +1
    -1
      src/dataType/utTermSize.erl
  61. +20
    -0
      src/nifSrc/bitmap_filter/bitmap_filter.erl
  62. +77
    -0
      src/nifSrc/bsn/bsn.erl
  63. +56
    -0
      src/nifSrc/bsn/bsn_ext.erl
  64. +45
    -0
      src/nifSrc/bsn/bsn_int.erl
  65. +236
    -0
      src/nifSrc/bsn/bsn_measure.erl
  66. +160
    -0
      src/nifSrc/couchdb_hqeue/hqueue.erl
  67. +0
    -0
      src/nifSrc/cq/cq.erl
  68. +51
    -0
      src/nifSrc/enlfq/enlfq.erl
  69. +71
    -0
      src/nifSrc/enlfq/testing/benchmark.erl
  70. +23
    -0
      src/nifSrc/enlfq/testing/multi_spawn.erl
  71. +159
    -0
      src/nifSrc/enq/enq.erl
  72. +63
    -0
      src/nifSrc/enq/enq_nif.erl
  73. +103
    -0
      src/nifSrc/etsq/etsq.erl
  74. +65
    -0
      src/nifSrc/etsq/etsq_tests.erl
  75. +102
    -0
      src/nifSrc/gb_lru/btree_lru.erl
  76. +59
    -0
      src/nifSrc/gb_lru/btree_lru_test.erl
  77. +6
    -0
      src/nifSrc/gb_lru/gb_lru.app.src
  78. +19
    -0
      src/nifSrc/native_array/native_array.erl

+ 4
- 1
.gitignore 查看文件

@ -22,4 +22,7 @@ priv
.idea
*.iml
cmake-build*
CMakeLists.txt
CMakeLists.txt
*.pdb
compile_commands.json

+ 442
- 0
c_src/.enq/enq_nif.c 查看文件

@ -0,0 +1,442 @@
#define _GNU_SOURCE
#include "erl_nif.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <time.h>
// #include "fifo.h"
#include "lifo.h"
typedef struct {
ERL_NIF_TERM ok;
ERL_NIF_TERM error;
ERL_NIF_TERM fifo;
ERL_NIF_TERM lifo;
ERL_NIF_TERM ttl;
ERL_NIF_TERM max_size;
} atoms_t;
typedef struct {
ErlNifResourceType *queue;
atoms_t atoms;
} priv_t;
typedef struct {
union {
fifo_handle_t fifo;
lifo_handle_t lifo;
} handle;
ErlNifBinary data;
struct timespec added;
} item_t;
typedef enum {
QTYPE_FIFO = 0,
QTYPE_LIFO
} queue_type_t;
typedef struct queue {
union {
fifo_t fifo;
lifo_t lifo;
} queue;
uint64_t ttl;
uint64_t max_size;
void (*push) (struct queue *inst, item_t *item);
item_t* (*pop) (struct queue *inst);
void (*free) (struct queue *inst);
uint64_t (*size) (struct queue *inst);
void (*cleanup) (struct queue *inst);
} queue_t;
// returns tuple {error, atom()}
static inline ERL_NIF_TERM
make_error(ErlNifEnv* env, const char *error) {
priv_t *priv = (priv_t *) enif_priv_data(env);
return enif_make_tuple2(env, priv->atoms.error, enif_make_atom(env, error));
}
// returns time diff in milliseconds
static inline int64_t
tdiff(struct timespec *t2, struct timespec *t1) {
return (t2->tv_sec * 1000 + t2->tv_nsec / 1000000UL) -
(t1->tv_sec * 1000 + t1->tv_nsec / 1000000UL);
}
static inline void
gettime(struct timespec *tp) {
int rc = clock_gettime(CLOCK_MONOTONIC_RAW, tp);
assert(rc == 0);
}
/******************************************************************************/
/* FIFO callbacks */
/******************************************************************************/
static void
cleanup_fifo(queue_t *inst) {
struct timespec now;
gettime(&now);
for (;;) {
item_t *item = NULL;
__fifo_peak(&inst->queue.fifo, item, handle.fifo);
if (item == NULL)
return;
int64_t diff = tdiff(&now, &item->added);
if (diff < inst->ttl) {
return;
} else {
__fifo_pop(&inst->queue.fifo, item, handle.fifo);
enif_release_binary(&item->data);
enif_free(item);
}
}
}
static void
push_fifo(queue_t *inst, item_t *item) {
__fifo_push(&inst->queue.fifo, item, handle.fifo);
}
static item_t *
pop_fifo(queue_t *inst) {
item_t *item = NULL;
if (inst->ttl > 0) {
struct timespec now;
gettime(&now);
for (;;) {
__fifo_pop(&inst->queue.fifo, item, handle.fifo);
if (item == NULL)
return NULL;
int64_t diff = tdiff(&now, &item->added);
if (diff < inst->ttl) {
return item;
} else {
enif_release_binary(&item->data);
enif_free(item);
}
}
} else {
__fifo_pop(&inst->queue.fifo, item, handle.fifo);
}
return item;
}
static void
free_fifo(queue_t *inst) {
item_t *item;
for(;;) {
__fifo_pop(&inst->queue.fifo, item, handle.fifo);
if (item == NULL)
return;
enif_release_binary(&item->data);
enif_free(item);
}
}
static uint64_t
size_fifo(queue_t *inst) {
return fifo_length(&inst->queue.fifo);
}
/******************************************************************************/
/* LIFO callbacks */
/******************************************************************************/
static void
cleanup_lifo(queue_t *inst) {
struct timespec now;
gettime(&now);
for(;;) {
item_t *item = inst->queue.lifo.tail;
if (item == NULL)
return;
int64_t diff = tdiff(&now, &item->added);
if (diff < inst->ttl) {
return;
} else {
item_t *prev = item->handle.lifo.prev;
if (prev != NULL)
prev->handle.lifo.next = NULL;
inst->queue.lifo.tail = prev;
enif_release_binary(&item->data);
enif_free(item);
}
}
}
static void
push_lifo(queue_t *inst, item_t *item) {
__lifo_push(&inst->queue.lifo, item, handle.lifo);
}
static item_t *
pop_lifo(queue_t *inst) {
item_t *item = NULL;
if (inst->ttl > 0)
cleanup_lifo(inst);
__lifo_pop(&inst->queue.lifo, item, handle.lifo);
return item;
}
static void
free_lifo(queue_t *inst) {
item_t *item;
for(;;) {
__lifo_pop(&inst->queue.lifo, item, handle.lifo);
if (item == NULL)
return;
enif_release_binary(&item->data);
enif_free(item);
}
}
static uint64_t
size_lifo(queue_t *inst) {
return lifo_length(&inst->queue.lifo);
}
/******************************************************************************
** NIFs
*******************************************************************************/
static ERL_NIF_TERM
new_queue(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
if (!enif_is_list(env, argv[0]))
return enif_make_badarg(env);
priv_t *priv = (priv_t *) enif_priv_data(env);
queue_type_t qtype = QTYPE_FIFO;
unsigned long ttl = 0;
unsigned long max_size = 0;
ERL_NIF_TERM settings_list = argv[0];
ERL_NIF_TERM head;
// parses proplist [fifo, lifo, {ttl, non_neg_integer()}, {max_size, non_neg_integer()}]
while(enif_get_list_cell(env, settings_list, &head, &settings_list))
{
const ERL_NIF_TERM *items;
int arity;
if (enif_is_atom(env, head)) {
if (enif_is_identical(head, priv->atoms.fifo)) {
qtype = QTYPE_FIFO;
} else if (enif_is_identical(head, priv->atoms.lifo)) {
qtype = QTYPE_LIFO;
} else {
return enif_make_badarg(env);
}
} else if (enif_get_tuple(env, head, &arity, &items) && arity == 2) {
if (enif_is_identical(items[0], priv->atoms.ttl)) {
if (!enif_get_ulong(env, items[1], &ttl)) {
return enif_make_badarg(env);
}
} else if (enif_is_identical(items[0], priv->atoms.max_size)) {
if (!enif_get_ulong(env, items[1], &max_size)) {
return enif_make_badarg(env);
}
} else {
return enif_make_badarg(env);
}
} else {
return enif_make_badarg(env);
}
}
queue_t *inst = (queue_t *) enif_alloc_resource(priv->queue, sizeof(*inst));
if (inst == NULL)
return make_error(env, "enif_alloc_resource");
inst->ttl = ttl;
inst->max_size = max_size;
switch (qtype) {
case QTYPE_FIFO:
fifo_init(&inst->queue.fifo);
inst->push = &push_fifo;
inst->pop = &pop_fifo;
inst->free = &free_fifo;
inst->size = &size_fifo;
inst->cleanup = &cleanup_fifo;
break;
case QTYPE_LIFO:
lifo_init(&inst->queue.lifo);
inst->push = &push_lifo;
inst->pop = &pop_lifo;
inst->free = &free_lifo;
inst->size = &size_lifo;
inst->cleanup = &cleanup_lifo;
break;
}
ERL_NIF_TERM result = enif_make_resource(env, inst);
enif_release_resource(inst);
return enif_make_tuple2(env, priv->atoms.ok, result);
}
static ERL_NIF_TERM
push_item(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
priv_t *priv = (priv_t *) enif_priv_data(env);
queue_t *inst;
if (!enif_get_resource(env, argv[0], priv->queue, (void**) &inst))
return enif_make_badarg(env);
// todo: check an owner of the queue
ErlNifBinary bin;
if (!enif_inspect_binary(env, argv[1], &bin))
return enif_make_badarg(env);
if (inst->ttl > 0) {
inst->cleanup(inst);
}
if (inst->max_size > 0 && inst->size(inst) >= inst->max_size) {
return enif_make_tuple2(env, priv->atoms.error, priv->atoms.max_size);
}
item_t *item = (item_t *) enif_alloc(sizeof(*item));
if (item == NULL)
return make_error(env, "enif_alloc");
if (!enif_alloc_binary(bin.size, &item->data)) {
enif_free(item);
return make_error(env, "enif_alloc_binary");
}
memcpy(item->data.data, bin.data, bin.size);
if (inst->ttl > 0) {
gettime(&item->added);
}
inst->push(inst, item);
return priv->atoms.ok;
}
static ERL_NIF_TERM
pop_item(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
priv_t *priv = (priv_t *) enif_priv_data(env);
queue_t *inst;
item_t *item;
if (!enif_get_resource(env, argv[0], priv->queue, (void**) &inst))
return enif_make_badarg(env);
// todo: check an owner of the queue
item = inst->pop(inst);
if (item == NULL)
return enif_make_list(env, 0);
ERL_NIF_TERM result = enif_make_binary(env, &item->data);
enif_free(item);
return enif_make_list1(env, result);
}
static ERL_NIF_TERM
queue_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
priv_t *priv = (priv_t *) enif_priv_data(env);
queue_t *inst;
if (!enif_get_resource(env, argv[0], priv->queue, (void**) &inst))
return enif_make_badarg(env);
return enif_make_uint64(env, inst->size(inst));
}
/******************************************************************************
** NIF initialization
*******************************************************************************/
static void
enq_queue_free(ErlNifEnv* env, void* obj) {
queue_t *inst = obj;
inst->free(inst);
}
static priv_t *
make_priv(ErlNifEnv *env) {
priv_t *priv = enif_alloc(sizeof(*priv));
if (priv == NULL)
return NULL;
ErlNifResourceFlags flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER;
priv->queue = enif_open_resource_type(env, NULL, "enq_queue", enq_queue_free, flags, NULL);
priv->atoms.ok = enif_make_atom(env, "ok");
priv->atoms.error = enif_make_atom(env, "error");
priv->atoms.fifo = enif_make_atom(env, "fifo");
priv->atoms.lifo = enif_make_atom(env, "lifo");
priv->atoms.ttl = enif_make_atom(env, "ttl");
priv->atoms.max_size = enif_make_atom(env, "max_size");
return priv;
}
static int
enq_nif_load(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM load_info) {
*priv_data = make_priv(env);
return 0;
}
static int
enq_nif_upgrade(ErlNifEnv *env, void **priv_data, void **old_priv_data, ERL_NIF_TERM load_info) {
*priv_data = make_priv(env);
return 0;
}
static ErlNifFunc enq_nif_funcs[] = {
{"new", 1, new_queue},
{"push", 2, push_item},
{"pop", 1, pop_item},
{"size", 1, queue_size},
};
ERL_NIF_INIT(enq_nif, enq_nif_funcs, enq_nif_load, NULL, enq_nif_upgrade, NULL)

+ 71
- 0
c_src/.enq/fifo.h 查看文件

@ -0,0 +1,71 @@
#ifndef _FIFO_H
#define _FIFO_H
/* Main FIFO structure. Allocate memory for it yourself. */
typedef struct fifo_t {
void *head;
void *tail;
unsigned long long count;
} fifo_t;
typedef struct fifo_handle_t {
void *next;
} fifo_handle_t;
/* Initializes fifo structure. */
#define fifo_init(fifo) \
do { \
fifo_t *__q = fifo; \
__q->head = NULL; \
__q->tail = NULL; \
__q->count = 0; \
} while (0)
#define __fifo_push(fifo, p, h) \
do { \
fifo_t *__q = fifo; \
__typeof__ (p) e = p; \
e->h.next = NULL; \
if (__q->tail == NULL) { \
__q->head = e; \
} else { \
__typeof__ (e) t = __q->tail; \
t->h.next = e; \
} \
__q->tail = e; \
__q->count++; \
} while (0)
/* Puts an element to the queue. */
#define fifo_push(fifo, p) __fifo_push (fifo, p, fifo_handle)
#define __fifo_pop(fifo, p, h) \
do { \
fifo_t *__q = fifo; \
p = __q->head; \
if (p != NULL) { \
__q->count--; \
__q->head = p->h.next; \
if (__q->tail == p) \
__q->tail = NULL; \
} \
} while (0)
/* Pops the first element out of the queue. */
#define fifo_pop(fifo, p) __fifo_pop (fifo, p, fifo_handle)
#define __fifo_peak(fifo, p, h) \
do { \
p = (fifo)->head; \
} while (0)
/* Returns the first elemnt of the queue without removing. */
#define fifo_peak(fifo, p) __fifo_peak (fifo, p, fifo_handle)
/* Returns the length of the queue. */
#define fifo_length(fifo) ((fifo)->count)
/* Returns true if the queue is empty. */
#define fifo_empty(fifo) ((fifo)->count == 0)
#endif /* _FIFO_H */

+ 63
- 0
c_src/.enq/lifo.h 查看文件

@ -0,0 +1,63 @@
#ifndef _LIFO_H
#define _LIFO_H
typedef struct lifo_t {
void *head;
void *tail;
unsigned long long count;
} lifo_t;
typedef struct lifo_handle_t {
void *next;
void *prev;
} lifo_handle_t;
#define lifo_init(lifo) \
do { \
lifo_t *__q = lifo; \
__q->head = NULL; \
__q->tail = NULL; \
__q->count = 0; \
} while (0)
#define __lifo_push(lifo, p, h) \
do { \
lifo_t *__q = lifo; \
__typeof__ (p) e = p; \
e->h.next = __q->head; \
e->h.prev = NULL; \
if (__q->head == NULL) { \
__q->tail = e; \
} else { \
__typeof__ (e) t = __q->head; \
t->h.prev = e; \
} \
__q->head = e; \
__q->count++; \
} while (0)
#define lifo_push(lifo, p) __lifo_push (lifo, p, lifo_handle)
#define __lifo_pop(lifo, p, h) \
do { \
lifo_t *__q = lifo; \
p = __q->head; \
if (p != NULL) { \
__q->count--; \
__q->head = p->h.next; \
if (__q->head != NULL) { \
__typeof__ (p) t = __q->head; \
t->h.prev = NULL; \
} else { \
__q->tail = NULL; \
} \
} \
} while (0)
#define lifo_pop(lifo, p) __lifo_pop (lifo, p, lifo_handle)
#define lifo_length(lifo) ((lifo)->count)
#define lifo_empty(lifo) ((lifo)->count == 0)
#endif /* _LIFO_H */

+ 12
- 0
c_src/.enq/rebar.config 查看文件

@ -0,0 +1,12 @@
{port_specs, [
{"../../priv/enq_nif.so", ["*.c"]}
]}.
% {port_env, [
% {"LDFLAGS", "$ERL_LDFLAGS -lrt"},
% {"CFLAGS", "$CFLAGS --std=gnu99 -Wall -O3"}
% ]}.

+ 80
- 0
c_src/bitmap_filter/bitmap_filter.c 查看文件

@ -0,0 +1,80 @@
#include <erl_nif.h>
/*
This function expects a list of list of tuples of type {int, _}.
It filters the tuples, using the first int field as a key,
and removing duplicating keys with precedence given the the order
in which they were seen (first given precedence).
*/
static ERL_NIF_TERM
bitmap_filter(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
size_t seen_forklift_id[3000] = { 0 };
if(argc != 1)
{
return enif_make_badarg(env);
}
if(!enif_is_list(env, argv[0]))
{
return enif_make_badarg(env);
}
ERL_NIF_TERM ret = enif_make_list(env, 0);
ERL_NIF_TERM outer_list = argv[0];
ERL_NIF_TERM inner_list;
ERL_NIF_TERM inner_head;
const ERL_NIF_TERM* tuple_elems;
int num_elems;
unsigned int key;
while(enif_get_list_cell(env, outer_list, &inner_list, &outer_list))
{
if(!enif_is_list(env, inner_list))
{
return enif_make_badarg(env);
}
while(enif_get_list_cell(env, inner_list, &inner_head, &inner_list))
{
if(!enif_get_tuple(env, inner_head, &num_elems, &tuple_elems))
{
return enif_make_badarg(env);
}
if(num_elems != 2)
{
return enif_make_badarg(env);
}
if(!enif_get_uint(env, tuple_elems[0], &key))
{
return enif_make_badarg(env);
}
if(key >= 3000)
{
return enif_make_badarg(env);
}
if(!seen_forklift_id[key])
{
seen_forklift_id[key] = 1;
ret = enif_make_list_cell(env, inner_head, ret);
}
}
}
return ret;
}
static ErlNifFunc nif_funcs[] =
{
{"filter", 1, bitmap_filter, 0}
};
ERL_NIF_INIT(bitmap_filter, nif_funcs, NULL, NULL, NULL, NULL)

c_src/cq/rebar.config → c_src/bitmap_filter/rebar.config 查看文件

@ -1,9 +1,9 @@
{port_specs, [
{"../../priv/cq.so", [
"*.c",
"*.cc"
{"../../priv/bitmap_filter.so", [
"*.c"
]}
]}.
%{port_specs, [{"../../priv/granderl.so", []}]}.
%% {port_env, [
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)",

+ 448
- 0
c_src/bsn/bsn_ext.c 查看文件

@ -0,0 +1,448 @@
#include "erl_nif.h"
ErlNifResourceType* bsn_type;
ERL_NIF_TERM ATOM_TRUE, ATOM_FALSE;
/*
typedef struct {
unsigned size;
unsigned char* data;
} ErlNifBinary;
*/
struct bsn_elem_struct {
ErlNifBinary bin;
struct bsn_elem_struct* next;
};
typedef struct bsn_elem_struct bsn_elem;
typedef bsn_elem* bsn_list;
typedef struct {
unsigned int count; /* count of elements */
unsigned int max; /* count of slots */
ErlNifMutex *mutex;
bsn_list* list;
} bsn_res;
inline static ERL_NIF_TERM bool_to_term(int value) {
return value ? ATOM_TRUE : ATOM_FALSE;
}
/* Calculate the sum of chars. */
unsigned int
private_hash(const ErlNifBinary* b, unsigned int max)
{
unsigned char* ptr;
unsigned int i, sum = 0;
ptr = b->data;
i = b->size;
for (; i; i--, ptr++)
sum += *ptr;
return sum % max;
}
inline void
private_clear_elem(bsn_elem* el)
{
enif_release_binary(&(el->bin));
enif_free(el);
}
inline void
private_chain_clear_all(bsn_elem* ptr)
{
bsn_elem* next;
while (ptr != NULL) {
next = ptr->next;
private_clear_elem(ptr);
ptr = next;
}
}
inline int
private_compare(ErlNifBinary* b1, ErlNifBinary* b2)
{
unsigned char* p1;
unsigned char* p2;
unsigned len;
if (b1->size != b2->size)
return 0;
p1 = b1->data;
p2 = b2->data;
len = b1->size;
while (len) {
if ((*p1) != (*p2))
return 0;
len--; p1++; p2++;
}
return 1;
}
/* Skip existing elements. If the element bin is not found, return last element.
* If el.bin == bin, return el. */
bsn_elem*
private_chain_shift(bsn_elem* ptr, ErlNifBinary* bin, int* num_ptr)
{
(*num_ptr)++;
if ((ptr) == NULL)
return ptr;
while (1) {
if (private_compare(&(ptr->bin), bin)) {
/* found an equal binary. Invert num */
(*num_ptr) *= -1;
return ptr;
}
if ((ptr->next) == NULL)
return ptr;
ptr = ptr->next;
(*num_ptr)++;
}
}
/* Append the element `el' to the chain `chain' */
void
private_chain_append(bsn_elem** chain, bsn_elem* el, int* num_ptr)
{
bsn_elem* last;
if ((*chain) == NULL) {
/* The new element is last */
*chain = el;
} else {
last = private_chain_shift(*chain, &(el->bin), num_ptr);
if ((*num_ptr) < 0) {
/* Element was already added. */
private_clear_elem(el);
} else {
last->next = el;
}
}
}
bsn_elem*
private_chain_shift_clear(bsn_elem** ptr, ErlNifBinary* bin, int* num_ptr)
{
bsn_elem** prev = NULL;
bsn_elem* el;
while ((*ptr) != NULL) {
if (private_compare(&((*ptr)->bin), bin)) {
(*num_ptr) *= -1;
/* found an equal binary. Delete elem. Invert num */
if (prev == NULL) {
el = *ptr;
(*ptr) = (*ptr)->next;
return el;
}
*prev = (*ptr)->next;
return *ptr;
}
prev = ptr;
el = *ptr;
ptr = (bsn_elem**) &(el->next);
(*num_ptr)++;
}
return NULL;
}
static ERL_NIF_TERM
bsn_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
unsigned int max;
bsn_list* ptr;
bsn_res* r;
if (!(enif_get_uint(env, argv[0], &max) && (max>0)))
return enif_make_badarg(env);
ptr = enif_alloc(sizeof(bsn_list) * max);
if (ptr == NULL)
return enif_make_badarg(env);
r = (bsn_res*) enif_alloc_resource(bsn_type, sizeof(bsn_res));
r->mutex = enif_mutex_create("Mutex for the BSN writer");
r->count = 0;
r->max = max;
r->list = ptr;
for (; max; max--, ptr++)
*ptr = NULL;
return enif_make_resource(env, r);
}
static ERL_NIF_TERM
bsn_add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos;
int num = 0;
bsn_elem* elem_ptr;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
enif_realloc_binary(&bin, bin.size);
pos = private_hash(&bin, r->max);
elem_ptr = enif_alloc(sizeof(bsn_elem));
if (elem_ptr == NULL)
return enif_make_badarg(env);
elem_ptr->next = NULL;
elem_ptr->bin = bin;
enif_mutex_lock(r->mutex);
private_chain_append(&(r->list[pos]), elem_ptr, &num);
if (num >= 0)
(r->count)++;
enif_mutex_unlock(r->mutex);
/* Already added */
if (num < 0)
enif_release_binary(&(bin));
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos;
int num = 0;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
pos = private_hash(&bin, r->max);
enif_mutex_lock(r->mutex);
private_chain_shift(r->list[pos], &bin, &num);
enif_mutex_unlock(r->mutex);
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos;
int num = 0;
bsn_elem* elem_ptr;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
pos = private_hash(&bin, r->max);
enif_mutex_lock(r->mutex);
elem_ptr = private_chain_shift_clear(&(r->list[pos]), &bin, &num);
if (elem_ptr != NULL) {
private_clear_elem(elem_ptr);
(r->count)--;
}
enif_mutex_unlock(r->mutex);
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_all_chain(ErlNifEnv* env, bsn_elem* e, ERL_NIF_TERM tail)
{
ERL_NIF_TERM head;
ErlNifBinary bin;
while (e != NULL) {
bin = e->bin;
enif_realloc_binary(&bin, bin.size);
head = enif_make_binary(env, &bin);
tail = enif_make_list_cell(env, head, tail);
e = e->next;
}
return tail;
}
static ERL_NIF_TERM
bsn_chains(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
unsigned int max;
bsn_list* ptr;
ERL_NIF_TERM tail, head;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
tail = enif_make_list(env, 0);
ptr = r->list;
enif_mutex_lock(r->mutex);
max = r->max;
while (max) {
head = enif_make_list(env, 0);
head = bsn_all_chain(env, *ptr, head);
tail = enif_make_list_cell(env, head, tail);
ptr++;
max--;
}
enif_mutex_unlock(r->mutex);
return tail;
}
static ERL_NIF_TERM
bsn_all(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
unsigned int max;
bsn_list* ptr;
ERL_NIF_TERM list;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
list = enif_make_list(env, 0);
ptr = r->list;
enif_mutex_lock(r->mutex);
max = r->max;
while (max) {
list = bsn_all_chain(env, *ptr, list);
ptr++;
max--;
}
enif_mutex_unlock(r->mutex);
return list;
}
static ERL_NIF_TERM
bsn_count(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
return enif_make_int(env, r->count);
}
static ERL_NIF_TERM
bsn_hash(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
unsigned int max;
if (!(enif_inspect_binary(env, argv[0], &bin)
&& enif_get_uint(env, argv[1], &max) && (max>0)))
return enif_make_badarg(env);
return enif_make_uint(env,
private_hash(&bin, max));
}
static ERL_NIF_TERM
bsn_compare(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary b1, b2;
if (!(enif_inspect_binary(env, argv[0], &b1)
&& enif_inspect_binary(env, argv[1], &b2)))
return enif_make_badarg(env);
return bool_to_term(private_compare(&b1, &b2));
}
void private_clear_all(bsn_res* r)
{
unsigned int max;
bsn_list* ptr;
max = r->max;
ptr = r->list;
while (max) {
private_chain_clear_all(*ptr);
ptr++;
max--;
}
}
void
bsn_type_dtor(ErlNifEnv* env, void* obj)
{
bsn_res* r = (bsn_res*) obj;
private_clear_all(r);
enif_mutex_destroy(r->mutex);
enif_free(r->list);
}
int
on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
{
ATOM_TRUE = enif_make_atom(env, "true");
ATOM_FALSE = enif_make_atom(env, "false");
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE |
ERL_NIF_RT_TAKEOVER);
bsn_type = enif_open_resource_type(env, NULL, "bsn_type",
bsn_type_dtor, flags, NULL);
if (bsn_type == NULL) return 1;
return 0;
}
int
on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info)
{
return 0;
}
static ErlNifFunc nif_functions[] = {
{"new", 1, bsn_new},
{"add", 2, bsn_add},
{"all", 1, bsn_all},
{"chains", 1, bsn_chains},
{"in", 2, bsn_search},
{"clear", 2, bsn_clear},
{"count", 1, bsn_count},
{"hash", 2, bsn_hash},
{"compare", 2, bsn_compare},
};
ERL_NIF_INIT(bsn_ext, nif_functions, &on_load, &on_load, &on_upgrade, NULL);

+ 331
- 0
c_src/bsn/bsn_int.c 查看文件

@ -0,0 +1,331 @@
#include "erl_nif.h"
ErlNifResourceType* bsn_type;
ERL_NIF_TERM ATOM_TRUE, ATOM_FALSE, ATOM_NO_MORE;
struct bsn_elem_struct {
ErlNifBinary bin;
unsigned int hash;
};
typedef struct bsn_elem_struct bsn_elem;
typedef struct {
unsigned int count; /* count of elements */
unsigned int max; /* count of slots */
ErlNifMutex *mutex;
bsn_elem* list;
unsigned int (*next_pos)
(void*, unsigned int, unsigned int);
} bsn_res;
inline static ERL_NIF_TERM bool_to_term(int value) {
return value ? ATOM_TRUE : ATOM_FALSE;
}
unsigned int next_pos_linear(bsn_res* r, unsigned int hash, unsigned int step) {
return (hash + step) % (r->max);
}
unsigned int next_pos_quadric(bsn_res* r, unsigned int hash, unsigned int step) {
return (hash + (step*step)) % (r->max);
}
/* Calculate the sum of chars. */
unsigned int
private_hash(const ErlNifBinary* b, unsigned int max)
{
unsigned char* ptr;
unsigned int i, sum = 0;
ptr = b->data;
i = b->size;
for (; i; i--, ptr++)
sum += *ptr;
return sum % max;
}
inline int
private_compare(ErlNifBinary* b1, ErlNifBinary* b2)
{
unsigned char* p1;
unsigned char* p2;
unsigned len;
if (b1->size != b2->size)
return 0;
p1 = b1->data;
p2 = b2->data;
len = b1->size;
while (len) {
if ((*p1) != (*p2))
return 0;
len--; p1++; p2++;
}
return 1;
}
static ERL_NIF_TERM
bsn_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int max; /* This value will be set by a client:
if (max<0) -> use quadric algorithm */
bsn_elem* ptr;
bsn_res* r;
if (!enif_get_int(env, argv[0], &max) || (max == 0))
return enif_make_badarg(env);
r = (bsn_res*) enif_alloc_resource(bsn_type, sizeof(bsn_res));
r->mutex = enif_mutex_create("Mutex for the BSN writer");
r->count = 0;
/* Select an algorithm */
if (max>0) {
r->next_pos = &next_pos_linear;
} else if (max<0) {
r->next_pos = &next_pos_quadric;
max *= -1;
}
/* Now max is cells' count in the array. */
r->max = (unsigned int) max;
ptr = enif_alloc(sizeof(bsn_elem) * max);
if (ptr == NULL)
return enif_make_badarg(env);
r->list = ptr;
for (; max; max--, ptr++)
ptr->hash = r->max;
return enif_make_resource(env, r);
}
static ERL_NIF_TERM
bsn_add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos, hash, max;
int num = 0;
bsn_elem* elem_ptr;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
enif_realloc_binary(&bin, bin.size);
hash = pos = private_hash(&bin, r->max);
enif_mutex_lock(r->mutex);
max = r->max;
while (num < max) {
elem_ptr = &(r->list[pos]);
/* Found free space */
if (elem_ptr->hash == max) {
elem_ptr->bin = bin;
elem_ptr->hash = hash;
break;
}
/* Found elem */
if ((elem_ptr->hash == hash)
&& private_compare(&bin, &(elem_ptr->bin))) {
num *= -1;
break;
}
pos = (r->next_pos)(r, hash, num);
num++;
}
if ((num >= 0) && (num < max))
(r->count)++;
enif_mutex_unlock(r->mutex);
/* Error: already added or owerflow */
if (!((num >= 0) && (num < max)))
enif_release_binary(&bin);
if (num >= max)
return ATOM_NO_MORE;
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos, max, hash;
int num = 1;
bsn_elem* elem_ptr;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
hash = pos = private_hash(&bin, r->max);
enif_mutex_lock(r->mutex);
max = r->max;
while (num < max) {
elem_ptr = &(r->list[pos]);
/* Found free space */
if (elem_ptr->hash == max) {
break;
}
/* Found elem */
if ((elem_ptr->hash == hash)
&& private_compare(&bin, &(elem_ptr->bin))) {
num *= -1;
break;
}
pos = (r->next_pos)(r, hash, num);
num++;
}
enif_mutex_unlock(r->mutex);
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return enif_make_badarg(env);
}
static ERL_NIF_TERM
bsn_all(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
unsigned int max, pos = 0;
ERL_NIF_TERM head, tail;
ErlNifBinary bin;
bsn_elem* elem_ptr;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
tail = enif_make_list(env, 0);
enif_mutex_lock(r->mutex);
max = r->max;
elem_ptr = r->list;
do {
if (elem_ptr->hash != max) {
bin = elem_ptr->bin;
enif_realloc_binary(&bin, bin.size);
head = enif_make_binary(env, &bin);
tail = enif_make_list_cell(env, head, tail);
}
elem_ptr++;
pos++;
} while (pos < max);
enif_mutex_unlock(r->mutex);
return tail;
}
static ERL_NIF_TERM
bsn_count(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
return enif_make_int(env, r->count);
}
void private_clear_all(bsn_res* r)
{
unsigned int max, num;
bsn_elem* ptr;
num = max = r->max;
ptr = r->list;
while (num) {
if (ptr->hash != max) {
enif_release_binary(&(ptr->bin));
}
ptr++;
num--;
}
}
void
bsn_type_dtor(ErlNifEnv* env, void* obj)
{
bsn_res* r = (bsn_res*) obj;
private_clear_all(r);
enif_mutex_destroy(r->mutex);
enif_free(r->list);
}
int
on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
{
ATOM_TRUE = enif_make_atom(env, "true");
ATOM_FALSE = enif_make_atom(env, "false");
ATOM_NO_MORE = enif_make_atom(env, "no_more");
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE |
ERL_NIF_RT_TAKEOVER);
bsn_type = enif_open_resource_type(env, NULL, "bsn_type",
bsn_type_dtor, flags, NULL);
if (bsn_type == NULL) return 1;
return 0;
}
int
on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info)
{
return 0;
}
static ErlNifFunc nif_functions[] = {
{"new", 1, bsn_new},
{"add", 2, bsn_add},
{"all", 1, bsn_all},
{"in", 2, bsn_search},
{"clear", 2, bsn_clear},
{"count", 1, bsn_count},
};
ERL_NIF_INIT(bsn_int, nif_functions, &on_load, &on_load, &on_upgrade, NULL);

+ 448
- 0
c_src/bsn/c_src/bsn_ext.c 查看文件

@ -0,0 +1,448 @@
#include "erl_nif.h"
ErlNifResourceType* bsn_type;
ERL_NIF_TERM ATOM_TRUE, ATOM_FALSE;
/*
typedef struct {
unsigned size;
unsigned char* data;
} ErlNifBinary;
*/
struct bsn_elem_struct {
ErlNifBinary bin;
struct bsn_elem_struct* next;
};
typedef struct bsn_elem_struct bsn_elem;
typedef bsn_elem* bsn_list;
typedef struct {
unsigned int count; /* count of elements */
unsigned int max; /* count of slots */
ErlNifMutex *mutex;
bsn_list* list;
} bsn_res;
inline static ERL_NIF_TERM bool_to_term(int value) {
return value ? ATOM_TRUE : ATOM_FALSE;
}
/* Calculate the sum of chars. */
unsigned int
private_hash(const ErlNifBinary* b, unsigned int max)
{
unsigned char* ptr;
unsigned int i, sum = 0;
ptr = b->data;
i = b->size;
for (; i; i--, ptr++)
sum += *ptr;
return sum % max;
}
inline void
private_clear_elem(bsn_elem* el)
{
enif_release_binary(&(el->bin));
enif_free(el);
}
inline void
private_chain_clear_all(bsn_elem* ptr)
{
bsn_elem* next;
while (ptr != NULL) {
next = ptr->next;
private_clear_elem(ptr);
ptr = next;
}
}
inline int
private_compare(ErlNifBinary* b1, ErlNifBinary* b2)
{
unsigned char* p1;
unsigned char* p2;
unsigned len;
if (b1->size != b2->size)
return 0;
p1 = b1->data;
p2 = b2->data;
len = b1->size;
while (len) {
if ((*p1) != (*p2))
return 0;
len--; p1++; p2++;
}
return 1;
}
/* Skip existing elements. If the element bin is not found, return last element.
* If el.bin == bin, return el. */
bsn_elem*
private_chain_shift(bsn_elem* ptr, ErlNifBinary* bin, int* num_ptr)
{
(*num_ptr)++;
if ((ptr) == NULL)
return ptr;
while (1) {
if (private_compare(&(ptr->bin), bin)) {
/* found an equal binary. Invert num */
(*num_ptr) *= -1;
return ptr;
}
if ((ptr->next) == NULL)
return ptr;
ptr = ptr->next;
(*num_ptr)++;
}
}
/* Append the element `el' to the chain `chain' */
void
private_chain_append(bsn_elem** chain, bsn_elem* el, int* num_ptr)
{
bsn_elem* last;
if ((*chain) == NULL) {
/* The new element is last */
*chain = el;
} else {
last = private_chain_shift(*chain, &(el->bin), num_ptr);
if ((*num_ptr) < 0) {
/* Element was already added. */
private_clear_elem(el);
} else {
last->next = el;
}
}
}
bsn_elem*
private_chain_shift_clear(bsn_elem** ptr, ErlNifBinary* bin, int* num_ptr)
{
bsn_elem** prev = NULL;
bsn_elem* el;
while ((*ptr) != NULL) {
if (private_compare(&((*ptr)->bin), bin)) {
(*num_ptr) *= -1;
/* found an equal binary. Delete elem. Invert num */
if (prev == NULL) {
el = *ptr;
(*ptr) = (*ptr)->next;
return el;
}
*prev = (*ptr)->next;
return *ptr;
}
prev = ptr;
el = *ptr;
ptr = (bsn_elem**) &(el->next);
(*num_ptr)++;
}
return NULL;
}
static ERL_NIF_TERM
bsn_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
unsigned int max;
bsn_list* ptr;
bsn_res* r;
if (!(enif_get_uint(env, argv[0], &max) && (max>0)))
return enif_make_badarg(env);
ptr = enif_alloc(sizeof(bsn_list) * max);
if (ptr == NULL)
return enif_make_badarg(env);
r = (bsn_res*) enif_alloc_resource(bsn_type, sizeof(bsn_res));
r->mutex = enif_mutex_create("Mutex for the BSN writer");
r->count = 0;
r->max = max;
r->list = ptr;
for (; max; max--, ptr++)
*ptr = NULL;
return enif_make_resource(env, r);
}
static ERL_NIF_TERM
bsn_add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos;
int num = 0;
bsn_elem* elem_ptr;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
enif_realloc_binary(&bin, bin.size);
pos = private_hash(&bin, r->max);
elem_ptr = enif_alloc(sizeof(bsn_elem));
if (elem_ptr == NULL)
return enif_make_badarg(env);
elem_ptr->next = NULL;
elem_ptr->bin = bin;
enif_mutex_lock(r->mutex);
private_chain_append(&(r->list[pos]), elem_ptr, &num);
if (num >= 0)
(r->count)++;
enif_mutex_unlock(r->mutex);
/* Already added */
if (num < 0)
enif_release_binary(&(bin));
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos;
int num = 0;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
pos = private_hash(&bin, r->max);
enif_mutex_lock(r->mutex);
private_chain_shift(r->list[pos], &bin, &num);
enif_mutex_unlock(r->mutex);
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos;
int num = 0;
bsn_elem* elem_ptr;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
pos = private_hash(&bin, r->max);
enif_mutex_lock(r->mutex);
elem_ptr = private_chain_shift_clear(&(r->list[pos]), &bin, &num);
if (elem_ptr != NULL) {
private_clear_elem(elem_ptr);
(r->count)--;
}
enif_mutex_unlock(r->mutex);
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_all_chain(ErlNifEnv* env, bsn_elem* e, ERL_NIF_TERM tail)
{
ERL_NIF_TERM head;
ErlNifBinary bin;
while (e != NULL) {
bin = e->bin;
enif_realloc_binary(&bin, bin.size);
head = enif_make_binary(env, &bin);
tail = enif_make_list_cell(env, head, tail);
e = e->next;
}
return tail;
}
static ERL_NIF_TERM
bsn_chains(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
unsigned int max;
bsn_list* ptr;
ERL_NIF_TERM tail, head;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
tail = enif_make_list(env, 0);
ptr = r->list;
enif_mutex_lock(r->mutex);
max = r->max;
while (max) {
head = enif_make_list(env, 0);
head = bsn_all_chain(env, *ptr, head);
tail = enif_make_list_cell(env, head, tail);
ptr++;
max--;
}
enif_mutex_unlock(r->mutex);
return tail;
}
static ERL_NIF_TERM
bsn_all(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
unsigned int max;
bsn_list* ptr;
ERL_NIF_TERM list;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
list = enif_make_list(env, 0);
ptr = r->list;
enif_mutex_lock(r->mutex);
max = r->max;
while (max) {
list = bsn_all_chain(env, *ptr, list);
ptr++;
max--;
}
enif_mutex_unlock(r->mutex);
return list;
}
static ERL_NIF_TERM
bsn_count(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
return enif_make_int(env, r->count);
}
static ERL_NIF_TERM
bsn_hash(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
unsigned int max;
if (!(enif_inspect_binary(env, argv[0], &bin)
&& enif_get_uint(env, argv[1], &max) && (max>0)))
return enif_make_badarg(env);
return enif_make_uint(env,
private_hash(&bin, max));
}
static ERL_NIF_TERM
bsn_compare(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary b1, b2;
if (!(enif_inspect_binary(env, argv[0], &b1)
&& enif_inspect_binary(env, argv[1], &b2)))
return enif_make_badarg(env);
return bool_to_term(private_compare(&b1, &b2));
}
void private_clear_all(bsn_res* r)
{
unsigned int max;
bsn_list* ptr;
max = r->max;
ptr = r->list;
while (max) {
private_chain_clear_all(*ptr);
ptr++;
max--;
}
}
void
bsn_type_dtor(ErlNifEnv* env, void* obj)
{
bsn_res* r = (bsn_res*) obj;
private_clear_all(r);
enif_mutex_destroy(r->mutex);
enif_free(r->list);
}
int
on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
{
ATOM_TRUE = enif_make_atom(env, "true");
ATOM_FALSE = enif_make_atom(env, "false");
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE |
ERL_NIF_RT_TAKEOVER);
bsn_type = enif_open_resource_type(env, NULL, "bsn_type",
bsn_type_dtor, flags, NULL);
if (bsn_type == NULL) return 1;
return 0;
}
int
on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info)
{
return 0;
}
static ErlNifFunc nif_functions[] = {
{"new", 1, bsn_new},
{"add", 2, bsn_add},
{"all", 1, bsn_all},
{"chains", 1, bsn_chains},
{"in", 2, bsn_search},
{"clear", 2, bsn_clear},
{"count", 1, bsn_count},
{"hash", 2, bsn_hash},
{"compare", 2, bsn_compare},
};
ERL_NIF_INIT(bsn_ext, nif_functions, &on_load, &on_load, &on_upgrade, NULL);

+ 331
- 0
c_src/bsn/c_src/bsn_int.c 查看文件

@ -0,0 +1,331 @@
#include "erl_nif.h"
ErlNifResourceType* bsn_type;
ERL_NIF_TERM ATOM_TRUE, ATOM_FALSE, ATOM_NO_MORE;
struct bsn_elem_struct {
ErlNifBinary bin;
unsigned int hash;
};
typedef struct bsn_elem_struct bsn_elem;
typedef struct {
unsigned int count; /* count of elements */
unsigned int max; /* count of slots */
ErlNifMutex *mutex;
bsn_elem* list;
unsigned int (*next_pos)
(void*, unsigned int, unsigned int);
} bsn_res;
inline static ERL_NIF_TERM bool_to_term(int value) {
return value ? ATOM_TRUE : ATOM_FALSE;
}
unsigned int next_pos_linear(bsn_res* r, unsigned int hash, unsigned int step) {
return (hash + step) % (r->max);
}
unsigned int next_pos_quadric(bsn_res* r, unsigned int hash, unsigned int step) {
return (hash + (step*step)) % (r->max);
}
/* Calculate the sum of chars. */
unsigned int
private_hash(const ErlNifBinary* b, unsigned int max)
{
unsigned char* ptr;
unsigned int i, sum = 0;
ptr = b->data;
i = b->size;
for (; i; i--, ptr++)
sum += *ptr;
return sum % max;
}
inline int
private_compare(ErlNifBinary* b1, ErlNifBinary* b2)
{
unsigned char* p1;
unsigned char* p2;
unsigned len;
if (b1->size != b2->size)
return 0;
p1 = b1->data;
p2 = b2->data;
len = b1->size;
while (len) {
if ((*p1) != (*p2))
return 0;
len--; p1++; p2++;
}
return 1;
}
static ERL_NIF_TERM
bsn_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int max; /* This value will be set by a client:
if (max<0) -> use quadric algorithm */
bsn_elem* ptr;
bsn_res* r;
if (!enif_get_int(env, argv[0], &max) || (max == 0))
return enif_make_badarg(env);
r = (bsn_res*) enif_alloc_resource(bsn_type, sizeof(bsn_res));
r->mutex = enif_mutex_create("Mutex for the BSN writer");
r->count = 0;
/* Select an algorithm */
if (max>0) {
r->next_pos = &next_pos_linear;
} else if (max<0) {
r->next_pos = &next_pos_quadric;
max *= -1;
}
/* Now max is cells' count in the array. */
r->max = (unsigned int) max;
ptr = enif_alloc(sizeof(bsn_elem) * max);
if (ptr == NULL)
return enif_make_badarg(env);
r->list = ptr;
for (; max; max--, ptr++)
ptr->hash = r->max;
return enif_make_resource(env, r);
}
static ERL_NIF_TERM
bsn_add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos, hash, max;
int num = 0;
bsn_elem* elem_ptr;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
enif_realloc_binary(&bin, bin.size);
hash = pos = private_hash(&bin, r->max);
enif_mutex_lock(r->mutex);
max = r->max;
while (num < max) {
elem_ptr = &(r->list[pos]);
/* Found free space */
if (elem_ptr->hash == max) {
elem_ptr->bin = bin;
elem_ptr->hash = hash;
break;
}
/* Found elem */
if ((elem_ptr->hash == hash)
&& private_compare(&bin, &(elem_ptr->bin))) {
num *= -1;
break;
}
pos = (r->next_pos)(r, hash, num);
num++;
}
if ((num >= 0) && (num < max))
(r->count)++;
enif_mutex_unlock(r->mutex);
/* Error: already added or owerflow */
if (!((num >= 0) && (num < max)))
enif_release_binary(&bin);
if (num >= max)
return ATOM_NO_MORE;
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
bsn_res* r;
unsigned int pos, max, hash;
int num = 1;
bsn_elem* elem_ptr;
if (!(enif_get_resource(env, argv[0], bsn_type, (void**) &r)
&& enif_inspect_binary(env, argv[1], &bin)))
return enif_make_badarg(env);
hash = pos = private_hash(&bin, r->max);
enif_mutex_lock(r->mutex);
max = r->max;
while (num < max) {
elem_ptr = &(r->list[pos]);
/* Found free space */
if (elem_ptr->hash == max) {
break;
}
/* Found elem */
if ((elem_ptr->hash == hash)
&& private_compare(&bin, &(elem_ptr->bin))) {
num *= -1;
break;
}
pos = (r->next_pos)(r, hash, num);
num++;
}
enif_mutex_unlock(r->mutex);
return enif_make_int(env, num);
}
static ERL_NIF_TERM
bsn_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return enif_make_badarg(env);
}
static ERL_NIF_TERM
bsn_all(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
unsigned int max, pos = 0;
ERL_NIF_TERM head, tail;
ErlNifBinary bin;
bsn_elem* elem_ptr;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
tail = enif_make_list(env, 0);
enif_mutex_lock(r->mutex);
max = r->max;
elem_ptr = r->list;
do {
if (elem_ptr->hash != max) {
bin = elem_ptr->bin;
enif_realloc_binary(&bin, bin.size);
head = enif_make_binary(env, &bin);
tail = enif_make_list_cell(env, head, tail);
}
elem_ptr++;
pos++;
} while (pos < max);
enif_mutex_unlock(r->mutex);
return tail;
}
static ERL_NIF_TERM
bsn_count(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bsn_res* r;
if (!enif_get_resource(env, argv[0], bsn_type, (void**) &r))
return enif_make_badarg(env);
return enif_make_int(env, r->count);
}
void private_clear_all(bsn_res* r)
{
unsigned int max, num;
bsn_elem* ptr;
num = max = r->max;
ptr = r->list;
while (num) {
if (ptr->hash != max) {
enif_release_binary(&(ptr->bin));
}
ptr++;
num--;
}
}
void
bsn_type_dtor(ErlNifEnv* env, void* obj)
{
bsn_res* r = (bsn_res*) obj;
private_clear_all(r);
enif_mutex_destroy(r->mutex);
enif_free(r->list);
}
int
on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
{
ATOM_TRUE = enif_make_atom(env, "true");
ATOM_FALSE = enif_make_atom(env, "false");
ATOM_NO_MORE = enif_make_atom(env, "no_more");
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE |
ERL_NIF_RT_TAKEOVER);
bsn_type = enif_open_resource_type(env, NULL, "bsn_type",
bsn_type_dtor, flags, NULL);
if (bsn_type == NULL) return 1;
return 0;
}
int
on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info)
{
return 0;
}
static ErlNifFunc nif_functions[] = {
{"new", 1, bsn_new},
{"add", 2, bsn_add},
{"all", 1, bsn_all},
{"in", 2, bsn_search},
{"clear", 2, bsn_clear},
{"count", 1, bsn_count},
};
ERL_NIF_INIT(bsn_int, nif_functions, &on_load, &on_load, &on_upgrade, NULL);

c_src/cq2/rebar.config → c_src/bsn/rebar.config 查看文件

@ -1,9 +1,8 @@
{port_specs, [
{"../../priv/cq2.so", [
"*.c",
"*.cc"
]}
{"../../priv/bsn_ext.so", ["bsn_ext.c"]},
{"../../priv/bsn_int.so", ["bsn_int.c"]}
]}.
%{port_specs, [{"../../priv/granderl.so", []}]}.
%% {port_env, [
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)",
@ -24,3 +23,7 @@
%%
%% {"win32", "CXXFLAGS", "$CXXFLAGS /O2 /DNDEBUG"}
%% ]}.

+ 318
- 0
c_src/couchdb_hqueue/c_src/hqueue.c 查看文件

@ -0,0 +1,318 @@
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "hqueue.h"
struct hqueue
{
int version;
uint32_t idx;
uint32_t max_elems;
uint32_t heap_size;
hqnode_t* heap; // one based index
};
struct hqnode
{
double priority;
void* value;
};
static inline void
hqueue_exchange(hqueue_t* hqueue, int i, int j)
{
hqnode_t tmp;
tmp = hqueue->heap[i];
hqueue->heap[i] = hqueue->heap[j];
hqueue->heap[j] = tmp;
return;
}
static inline int
hqueue_less(hqueue_t* hqueue, int i, int j)
{
return hqueue->heap[i].priority < hqueue->heap[j].priority;
}
static void
hqueue_fix_up(hqueue_t* hqueue, int k)
{
while(k > 1 && hqueue_less(hqueue, k/2, k)) {
hqueue_exchange(hqueue, k/2, k);
k = k/2;
}
return;
}
static void
hqueue_fix_down(hqueue_t* hqueue, int k)
{
int j;
int n = hqueue->idx;
while(2*k <= n) {
j = 2*k;
if(j < n && hqueue_less(hqueue, j, j+1)) {
j++;
}
if(!hqueue_less(hqueue, k, j)) {
break;
}
hqueue_exchange(hqueue, k, j);
k = j;
}
return;
}
hqueue_t*
hqueue_new(uint32_t max_elems, uint32_t heap_size)
{
hqueue_t* hqueue = NULL;
size_t total_heap_size;
if(max_elems == 0 || heap_size == 0) {
return NULL;
}
if(max_elems < heap_size) {
heap_size = max_elems;
}
hqueue = HQUEUE_ALLOC(sizeof(hqueue_t));
if(hqueue == NULL) {
return NULL;
}
memset(hqueue, '\0', sizeof(hqueue_t));
hqueue->version = HQ_VERSION;
hqueue->max_elems = max_elems;
hqueue->heap_size = heap_size;
hqueue->idx = 0;
total_heap_size = sizeof(hqnode_t) * (hqueue->heap_size+1);
hqueue->heap = (hqnode_t*) HQUEUE_ALLOC(total_heap_size);
if(hqueue->heap == NULL ) {
HQUEUE_FREE(hqueue);
return NULL;
}
memset(hqueue->heap, '\0', total_heap_size);
return hqueue;
}
void
hqueue_free(hqueue_t* hqueue)
{
HQUEUE_FREE(hqueue->heap);
HQUEUE_FREE(hqueue);
return;
}
void
hqueue_free2(hqueue_t* hqueue, void (*free_node)(void* node))
{
uint32_t i;
for(i = 1; i < hqueue->heap_size + 1; i++) {
if(i <= hqueue->idx) {
free_node(hqueue->heap[i].value);
} else {
assert(hqueue->heap[i].value == NULL && "inactive elements must be NULL");
}
}
hqueue_free(hqueue);
return;
}
// Extraction order is undefined for entries with duplicate priorities
int
hqueue_extract_max(hqueue_t* hqueue, double* priority, void** value)
{
if(hqueue->idx <= 0) {
return 0;
}
hqueue_exchange(hqueue, 1, hqueue->idx);
*priority = hqueue->heap[hqueue->idx].priority;
*value = hqueue->heap[hqueue->idx].value;
hqueue->heap[hqueue->idx].value = NULL;
hqueue->idx--; // heap uses one based index, so we decrement after
hqueue_fix_down(hqueue, 1);
return 1;
}
void
hqueue_get_elem(hqueue_t* hqueue, uint32_t idx, double *priority, void** value)
{
*priority = hqueue->heap[idx].priority;
*value = hqueue->heap[idx].value;
return;
}
static int
hqueue_maybe_resize(hqueue_t* hqueue)
{
uint32_t min_resize;
if(hqueue->idx + 1 > hqueue->heap_size) {
if(hqueue->idx * HQ_SCALE_FACTOR > hqueue->max_elems) {
min_resize = hqueue->max_elems;
} else {
min_resize = hqueue->idx * HQ_SCALE_FACTOR;
}
return hqueue_resize_heap(hqueue, min_resize);
}
return 1;
}
int
hqueue_insert(hqueue_t* hqueue, double priority, void* value)
{
if(hqueue->idx >= hqueue->max_elems) {
return 0;
}
if(!hqueue_maybe_resize(hqueue)) {
return 0;
}
hqueue->idx++; // heap uses one based index, so we increment first
hqueue->heap[hqueue->idx].priority = priority;
hqueue->heap[hqueue->idx].value = value;
hqueue_fix_up(hqueue, hqueue->idx);
return 1;
}
uint32_t
hqueue_size(hqueue_t* hqueue)
{
return hqueue->idx;
}
uint32_t
hqueue_heap_size(hqueue_t* hqueue)
{
return hqueue->heap_size;
}
uint32_t
hqueue_max_elems(hqueue_t* hqueue)
{
return hqueue->max_elems;
}
void
hqueue_scale_by(hqueue_t* hqueue, double factor)
{
uint32_t i;
for(i = 1; i <= hqueue->idx && i <= hqueue->heap_size; i++) {
hqueue->heap[i].priority *= factor;
}
return;
}
uint32_t
hqueue_resize_heap(hqueue_t* hqueue, uint32_t new_heap_size)
{
uint32_t old_heap_size;
size_t total_heap_size;
hqnode_t* tmp_heap;
uint32_t i;
if(hqueue->idx > new_heap_size) {
return 0;
}
total_heap_size = sizeof(hqnode_t) * (new_heap_size+1);
old_heap_size = hqueue->heap_size;
if((tmp_heap = (hqnode_t*) HQUEUE_ALLOC(total_heap_size)) == NULL) {
return 0;
}
memset(tmp_heap, '\0', total_heap_size);
for(i = 1; i <= hqueue->idx && i <= old_heap_size; i++) {
if(i <= hqueue->idx) {
tmp_heap[i] = hqueue->heap[i];
hqueue->heap[i].value = NULL;
} else {
assert(hqueue->heap[i].value == NULL &&
"unexpected NULL element during heap resize");
}
}
HQUEUE_FREE(hqueue->heap);
hqueue->heap = tmp_heap;
hqueue->heap_size = new_heap_size;
return old_heap_size;
}
int
hqueue_set_max_elems(hqueue_t* hqueue, uint32_t new_max_elems)
{
uint32_t old_max_elems;
if(hqueue->heap_size > new_max_elems) {
if(!hqueue_resize_heap(hqueue, new_max_elems)) {
return 0;
}
}
old_max_elems = hqueue->max_elems;
hqueue->max_elems = new_max_elems;
return old_max_elems;
}

+ 5
- 0
c_src/couchdb_hqueue/c_src/hqueue.d 查看文件

@ -0,0 +1,5 @@
c_src/hqueue.o: c_src/hqueue.c c_src/hqueue.h \
/usr/lib/erlang/erts-10.6.2/include/erl_nif.h \
/usr/lib/erlang/erts-10.6.2/include/erl_drv_nif.h \
/usr/lib/erlang/erts-10.6.2/include/erl_int_sizes_config.h \
/usr/lib/erlang/erts-10.6.2/include/erl_nif_api_funcs.h

+ 60
- 0
c_src/couchdb_hqueue/c_src/hqueue.h 查看文件

@ -0,0 +1,60 @@
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#pragma once
#include <stdint.h>
#define HQ_VERSION 0
#define HQ_SCALE_FACTOR 2 // heap expansion scale factor
// Override the default memory allocator to use the Erlang versions.
// This bubbles up memory usage for the NIF into Erlang stats.
#ifdef HQ_ENIF_ALLOC
#include "erl_nif.h"
#define HQUEUE_ALLOC enif_alloc
#define HQUEUE_FREE enif_free
#else
#define HQUEUE_ALLOC malloc
#define HQUEUE_FREE free
#endif
typedef struct hqnode hqnode_t;
typedef struct hqueue hqueue_t;
hqueue_t* hqueue_new(uint32_t max_elems, uint32_t heap_size);
void hqueue_free(hqueue_t* hqueue);
void hqueue_free2(hqueue_t* hqueue, void (*free_node)(void* node));
int hqueue_insert(hqueue_t* hqueue, double priority, void* val);
int hqueue_extract_max(hqueue_t* hqueue, double* priority, void** value);
void hqueue_get_elem(hqueue_t* hqueue, uint32_t idx, double *priority,
void** value);
uint32_t hqueue_size(hqueue_t* hqueue);
uint32_t hqueue_heap_size(hqueue_t* hqueue);
uint32_t hqueue_max_elems(hqueue_t* hqueue);
int hqueue_set_max_elems(hqueue_t* hqueue, uint32_t new_max_elems);
void hqueue_scale_by(hqueue_t* hqueue, double factor);
uint32_t hqueue_resize_heap(hqueue_t* hqueue, uint32_t new_heap_size);

+ 601
- 0
c_src/couchdb_hqueue/c_src/hqueue_nif.c 查看文件

@ -0,0 +1,601 @@
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include "hqueue.h"
typedef struct
{
ERL_NIF_TERM atom_ok;
ERL_NIF_TERM atom_error;
ERL_NIF_TERM atom_value;
ERL_NIF_TERM atom_empty;
ERL_NIF_TERM atom_full;
ERL_NIF_TERM atom_max_elems;
ERL_NIF_TERM atom_heap_size;
ERL_NIF_TERM atom_too_small;
ErlNifResourceType* res_hqueue;
} hqueue_priv;
typedef struct
{
ErlNifEnv* env;
ERL_NIF_TERM value;
} hqnode_nif_t;
typedef struct
{
int version;
uint64_t gen;
hqueue_t* hqueue;
ErlNifPid p;
} hqueue_nif_t;
static const uint32_t default_max_elems = UINT32_MAX-1;
static const uint32_t default_heap_size = 1024;
static inline ERL_NIF_TERM
make_atom(ErlNifEnv* env, const char* name)
{
ERL_NIF_TERM ret;
if(enif_make_existing_atom(env, name, &ret, ERL_NIF_LATIN1)) {
return ret;
}
return enif_make_atom(env, name);
}
static inline ERL_NIF_TERM
make_ok(ErlNifEnv* env, hqueue_priv* priv, ERL_NIF_TERM value)
{
return enif_make_tuple2(env, priv->atom_ok, value);
}
static inline ERL_NIF_TERM
make_error(ErlNifEnv* env, hqueue_priv* priv, ERL_NIF_TERM reason)
{
return enif_make_tuple2(env, priv->atom_error, reason);
}
static inline int
check_pid(ErlNifEnv* env, hqueue_nif_t* hqueue_nif)
{
ErlNifPid pid;
enif_self(env, &pid);
if(enif_compare(pid.pid, hqueue_nif->p.pid) == 0) {
return 1;
}
return 0;
}
void
hqueue_nif_node_free(hqnode_nif_t* hqnode_nif)
{
enif_free_env(hqnode_nif->env);
enif_free(hqnode_nif);
return;
}
void
hqueue_nif_node_free_ext(void* node)
{
hqueue_nif_node_free((hqnode_nif_t*) node);
return;
}
hqnode_nif_t*
hqueue_nif_node_alloc()
{
hqnode_nif_t* node = (hqnode_nif_t*) enif_alloc(sizeof(hqnode_nif_t*));
memset(node, 0, sizeof(hqnode_nif_t));
node->env = enif_alloc_env();
return node;
}
static int
get_uint_param(ErlNifEnv* env, ERL_NIF_TERM value, ERL_NIF_TERM atom, uint32_t* p)
{
const ERL_NIF_TERM* tuple;
int arity;
if(!enif_get_tuple(env, value, &arity, &tuple)) {
return 0;
}
if(arity != 2) {
return 0;
}
if(enif_compare(tuple[0], atom) != 0) {
return 0;
}
if(!enif_get_uint(env, tuple[1], p)) {
return 0;
}
return 1;
}
static inline hqueue_nif_t*
hqueue_nif_create_int(ErlNifEnv* env, hqueue_priv* priv, uint32_t max_elems,
uint32_t heap_size)
{
hqueue_nif_t* hqueue_nif = NULL;
assert(priv != NULL && "missing private data member");
hqueue_nif = (hqueue_nif_t*) enif_alloc_resource(
priv->res_hqueue, sizeof(hqueue_nif_t));
memset(hqueue_nif, 0, sizeof(hqueue_nif_t));
hqueue_nif->version = HQ_VERSION;
hqueue_nif->hqueue = hqueue_new(max_elems, heap_size);
if(hqueue_nif->hqueue == NULL ) {
enif_release_resource(hqueue_nif);
return NULL;
}
enif_self(env, &(hqueue_nif->p));
return hqueue_nif;
}
static ERL_NIF_TERM
hqueue_nif_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
ERL_NIF_TERM opts;
ERL_NIF_TERM value;
uint32_t max_elems = default_max_elems;
uint32_t heap_size = default_heap_size;
if(argc != 1) {
return enif_make_badarg(env);
}
opts = argv[0];
if(!enif_is_list(env, opts)) {
return enif_make_badarg(env);
}
while(enif_get_list_cell(env, opts, &value, &opts)) {
if(get_uint_param(env, value, priv->atom_max_elems, &max_elems)) {
continue;
} else if(get_uint_param(env, value, priv->atom_heap_size, &heap_size)) {
continue;
} else {
return enif_make_badarg(env);
}
}
hqueue_nif = hqueue_nif_create_int(env, priv, max_elems, heap_size);
if(hqueue_nif == NULL) {
return enif_make_badarg(env);
}
ret = enif_make_resource(env, hqueue_nif);
enif_release_resource(hqueue_nif);
return make_ok(env, priv, ret);
}
static void
hqueue_nif_free(ErlNifEnv* env, void* obj)
{
hqueue_nif_t* hqueue_nif = (hqueue_nif_t*) obj;
hqueue_free2(hqueue_nif->hqueue, hqueue_nif_node_free_ext);
return;
}
static ERL_NIF_TERM
hqueue_nif_extract_max(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
hqnode_nif_t* hqnode_nif;
double tmp_priority;
ERL_NIF_TERM ret;
ERL_NIF_TERM priority;
ERL_NIF_TERM value;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if (!hqueue_extract_max(hqueue_nif->hqueue, &tmp_priority, (void**) &hqnode_nif)) {
return make_error(env, priv, priv->atom_empty);
}
priority = enif_make_double(env, tmp_priority);
value = enif_make_copy(env, hqnode_nif->value);
ret = enif_make_tuple2(env, priority, value);
hqueue_nif_node_free(hqnode_nif);
return ret;
}
static ERL_NIF_TERM
hqueue_nif_insert(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
hqnode_nif_t* hqnode_nif;
ERL_NIF_TERM ret;
double priority;
if(argc != 3) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if(!enif_get_double(env, argv[1], &priority)) {
return enif_make_badarg(env);
}
if(priority < 0.0) {
return enif_make_badarg(env);
}
hqnode_nif = hqueue_nif_node_alloc();
hqnode_nif->value = enif_make_copy(hqnode_nif->env, argv[2]);
if (!hqueue_insert(hqueue_nif->hqueue, priority, (void*) hqnode_nif)) {
return make_error(env, priv, priv->atom_full);
}
ret = priv->atom_ok;
return ret;
}
static ERL_NIF_TERM
hqueue_nif_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, hqueue_size(hqueue_nif->hqueue));
return ret;
}
static ERL_NIF_TERM
hqueue_nif_heap_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, hqueue_heap_size(hqueue_nif->hqueue));
return ret;
}
static ERL_NIF_TERM
hqueue_nif_max_elems(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, hqueue_max_elems(hqueue_nif->hqueue));
return ret;
}
static ERL_NIF_TERM
hqueue_nif_to_list(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
hqueue_t* hqueue;
hqnode_nif_t* hqnode_nif;
double tmp_priority;
ERL_NIF_TERM ret = enif_make_list(env, 0);
ERL_NIF_TERM priority;
ERL_NIF_TERM value;
ERL_NIF_TERM tuple;
uint32_t i;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
hqueue = hqueue_nif->hqueue;
for (i = 1; i <= hqueue_size(hqueue); i++) {
hqueue_get_elem(hqueue, i, &tmp_priority, (void **) &hqnode_nif);
priority = enif_make_double(env, tmp_priority);
value = enif_make_copy(env, hqnode_nif->value);
tuple = enif_make_tuple2(env, priority, value);
ret = enif_make_list_cell(env, tuple, ret);
}
return ret;
}
static ERL_NIF_TERM
hqueue_nif_scale_by(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
double factor;
if(argc != 2) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if(!enif_get_double(env, argv[1], &factor)) {
return enif_make_badarg(env);
}
if(factor < 0.0) {
return enif_make_badarg(env);
}
hqueue_scale_by(hqueue_nif->hqueue, factor);
ret = priv->atom_ok;
return ret;
}
static ERL_NIF_TERM
hqueue_nif_resize_heap(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
uint32_t new_heap_size;
uint32_t old_heap_size;
if(argc != 2) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if(!enif_get_uint(env, argv[1], &new_heap_size)) {
return enif_make_badarg(env);
}
if(hqueue_size(hqueue_nif->hqueue) > new_heap_size) {
return make_error(env, priv, priv->atom_too_small);
}
if((old_heap_size = hqueue_resize_heap(hqueue_nif->hqueue, new_heap_size)) == 0) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, old_heap_size);
return ret;
}
static ERL_NIF_TERM
hqueue_nif_set_max_elems(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
uint32_t new_max_elems;
uint32_t old_max_elems;
if(argc != 2) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if(!enif_get_uint(env, argv[1], &new_max_elems)) {
return enif_make_badarg(env);
}
if(hqueue_size(hqueue_nif->hqueue) > new_max_elems) {
return make_error(env, priv, priv->atom_too_small);
}
if ((old_max_elems = hqueue_set_max_elems(hqueue_nif->hqueue, new_max_elems)) == 0) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, old_max_elems);
return ret;
}
static int
load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
{
int flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER;
ErlNifResourceType* res;
hqueue_priv* new_priv = (hqueue_priv*) enif_alloc(sizeof(hqueue_priv));
if(new_priv == NULL) {
return 1;
}
res = enif_open_resource_type(
env, NULL, "hqueue", hqueue_nif_free, flags, NULL);
if(res == NULL) {
enif_free(new_priv);
return 1;
}
new_priv->res_hqueue = res;
new_priv->atom_ok = make_atom(env, "ok");
new_priv->atom_error = make_atom(env, "error");
new_priv->atom_value = make_atom(env, "value");
new_priv->atom_empty = make_atom(env, "empty");
new_priv->atom_full = make_atom(env, "full");
new_priv->atom_max_elems = make_atom(env, "max_elems");
new_priv->atom_heap_size = make_atom(env, "heap_size");
new_priv->atom_too_small = make_atom(env, "too_small");
*priv = (void*) new_priv;
return 0;
}
static int
upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info)
{
return load(env, priv, info);
}
static void
unload(ErlNifEnv* env, void* priv)
{
enif_free(priv);
return;
}
static ErlNifFunc funcs[] = {
{"new", 1, hqueue_nif_new},
{"extract_max", 1, hqueue_nif_extract_max},
{"insert", 3, hqueue_nif_insert},
{"size", 1, hqueue_nif_size},
{"heap_size", 1, hqueue_nif_heap_size},
{"max_elems", 1, hqueue_nif_max_elems},
{"set_max_elems", 2, hqueue_nif_set_max_elems},
{"to_list", 1, hqueue_nif_to_list},
{"scale_by", 2, hqueue_nif_scale_by},
{"resize_heap", 2, hqueue_nif_resize_heap}
};
ERL_NIF_INIT(hqueue, funcs, &load, NULL, &upgrade, &unload);

+ 5
- 0
c_src/couchdb_hqueue/c_src/hqueue_nif.d 查看文件

@ -0,0 +1,5 @@
c_src/hqueue_nif.o: c_src/hqueue_nif.c c_src/hqueue.h \
/usr/lib/erlang/erts-10.6.2/include/erl_nif.h \
/usr/lib/erlang/erts-10.6.2/include/erl_drv_nif.h \
/usr/lib/erlang/erts-10.6.2/include/erl_int_sizes_config.h \
/usr/lib/erlang/erts-10.6.2/include/erl_nif_api_funcs.h

+ 72
- 0
c_src/couchdb_hqueue/c_src/valgrind_sample.c 查看文件

@ -0,0 +1,72 @@
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "hqueue.h"
// Simple test script to stress the public HQueue API.
// Primary use case is for running this under Valgrind.
int main(void)
{
int str_len = 100;
int iterations = 1000;
uint32_t max_elems = 1024;
uint32_t heap_size = 64;
hqueue_t* hq = hqueue_new(max_elems, heap_size);
double priority;
double priority_res;
char* val;
char* val_res;
int i;
assert(max_elems == hqueue_max_elems(hq));
assert(heap_size == hqueue_heap_size(hq));
for(i = 0; i < iterations; i++) {
priority = 1234.4321 * i;
val = (char*) malloc(str_len + 1);
if(val == NULL) {
return 1;
}
assert(hqueue_size(hq) == i);
if(snprintf(val, str_len + 1, "Fun string #%d\n", i)) {
if(!hqueue_insert(hq, priority, val)) {
return 1;
}
} else {
return 1;
}
}
hqueue_scale_by(hq, 3.7);
// Added 1000 elements, so heap size should have expanded to 1024
assert(max_elems == hqueue_max_elems(hq));
assert(max_elems == hqueue_heap_size(hq));
if(!hqueue_extract_max(hq, &priority_res, (void**) &val_res)) {
return 1;
}
free(val_res);
hqueue_free2(hq, free);
return 0;
}

+ 318
- 0
c_src/couchdb_hqueue/hqueue.c 查看文件

@ -0,0 +1,318 @@
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "hqueue.h"
struct hqueue
{
int version;
uint32_t idx;
uint32_t max_elems;
uint32_t heap_size;
hqnode_t* heap; // one based index
};
struct hqnode
{
double priority;
void* value;
};
static inline void
hqueue_exchange(hqueue_t* hqueue, int i, int j)
{
hqnode_t tmp;
tmp = hqueue->heap[i];
hqueue->heap[i] = hqueue->heap[j];
hqueue->heap[j] = tmp;
return;
}
static inline int
hqueue_less(hqueue_t* hqueue, int i, int j)
{
return hqueue->heap[i].priority < hqueue->heap[j].priority;
}
static void
hqueue_fix_up(hqueue_t* hqueue, int k)
{
while(k > 1 && hqueue_less(hqueue, k/2, k)) {
hqueue_exchange(hqueue, k/2, k);
k = k/2;
}
return;
}
static void
hqueue_fix_down(hqueue_t* hqueue, int k)
{
int j;
int n = hqueue->idx;
while(2*k <= n) {
j = 2*k;
if(j < n && hqueue_less(hqueue, j, j+1)) {
j++;
}
if(!hqueue_less(hqueue, k, j)) {
break;
}
hqueue_exchange(hqueue, k, j);
k = j;
}
return;
}
hqueue_t*
hqueue_new(uint32_t max_elems, uint32_t heap_size)
{
hqueue_t* hqueue = NULL;
size_t total_heap_size;
if(max_elems == 0 || heap_size == 0) {
return NULL;
}
if(max_elems < heap_size) {
heap_size = max_elems;
}
hqueue = HQUEUE_ALLOC(sizeof(hqueue_t));
if(hqueue == NULL) {
return NULL;
}
memset(hqueue, '\0', sizeof(hqueue_t));
hqueue->version = HQ_VERSION;
hqueue->max_elems = max_elems;
hqueue->heap_size = heap_size;
hqueue->idx = 0;
total_heap_size = sizeof(hqnode_t) * (hqueue->heap_size+1);
hqueue->heap = (hqnode_t*) HQUEUE_ALLOC(total_heap_size);
if(hqueue->heap == NULL ) {
HQUEUE_FREE(hqueue);
return NULL;
}
memset(hqueue->heap, '\0', total_heap_size);
return hqueue;
}
void
hqueue_free(hqueue_t* hqueue)
{
HQUEUE_FREE(hqueue->heap);
HQUEUE_FREE(hqueue);
return;
}
void
hqueue_free2(hqueue_t* hqueue, void (*free_node)(void* node))
{
uint32_t i;
for(i = 1; i < hqueue->heap_size + 1; i++) {
if(i <= hqueue->idx) {
free_node(hqueue->heap[i].value);
} else {
assert(hqueue->heap[i].value == NULL && "inactive elements must be NULL");
}
}
hqueue_free(hqueue);
return;
}
// Extraction order is undefined for entries with duplicate priorities
int
hqueue_extract_max(hqueue_t* hqueue, double* priority, void** value)
{
if(hqueue->idx <= 0) {
return 0;
}
hqueue_exchange(hqueue, 1, hqueue->idx);
*priority = hqueue->heap[hqueue->idx].priority;
*value = hqueue->heap[hqueue->idx].value;
hqueue->heap[hqueue->idx].value = NULL;
hqueue->idx--; // heap uses one based index, so we decrement after
hqueue_fix_down(hqueue, 1);
return 1;
}
void
hqueue_get_elem(hqueue_t* hqueue, uint32_t idx, double *priority, void** value)
{
*priority = hqueue->heap[idx].priority;
*value = hqueue->heap[idx].value;
return;
}
static int
hqueue_maybe_resize(hqueue_t* hqueue)
{
uint32_t min_resize;
if(hqueue->idx + 1 > hqueue->heap_size) {
if(hqueue->idx * HQ_SCALE_FACTOR > hqueue->max_elems) {
min_resize = hqueue->max_elems;
} else {
min_resize = hqueue->idx * HQ_SCALE_FACTOR;
}
return hqueue_resize_heap(hqueue, min_resize);
}
return 1;
}
int
hqueue_insert(hqueue_t* hqueue, double priority, void* value)
{
if(hqueue->idx >= hqueue->max_elems) {
return 0;
}
if(!hqueue_maybe_resize(hqueue)) {
return 0;
}
hqueue->idx++; // heap uses one based index, so we increment first
hqueue->heap[hqueue->idx].priority = priority;
hqueue->heap[hqueue->idx].value = value;
hqueue_fix_up(hqueue, hqueue->idx);
return 1;
}
uint32_t
hqueue_size(hqueue_t* hqueue)
{
return hqueue->idx;
}
uint32_t
hqueue_heap_size(hqueue_t* hqueue)
{
return hqueue->heap_size;
}
uint32_t
hqueue_max_elems(hqueue_t* hqueue)
{
return hqueue->max_elems;
}
void
hqueue_scale_by(hqueue_t* hqueue, double factor)
{
uint32_t i;
for(i = 1; i <= hqueue->idx && i <= hqueue->heap_size; i++) {
hqueue->heap[i].priority *= factor;
}
return;
}
uint32_t
hqueue_resize_heap(hqueue_t* hqueue, uint32_t new_heap_size)
{
uint32_t old_heap_size;
size_t total_heap_size;
hqnode_t* tmp_heap;
uint32_t i;
if(hqueue->idx > new_heap_size) {
return 0;
}
total_heap_size = sizeof(hqnode_t) * (new_heap_size+1);
old_heap_size = hqueue->heap_size;
if((tmp_heap = (hqnode_t*) HQUEUE_ALLOC(total_heap_size)) == NULL) {
return 0;
}
memset(tmp_heap, '\0', total_heap_size);
for(i = 1; i <= hqueue->idx && i <= old_heap_size; i++) {
if(i <= hqueue->idx) {
tmp_heap[i] = hqueue->heap[i];
hqueue->heap[i].value = NULL;
} else {
assert(hqueue->heap[i].value == NULL &&
"unexpected NULL element during heap resize");
}
}
HQUEUE_FREE(hqueue->heap);
hqueue->heap = tmp_heap;
hqueue->heap_size = new_heap_size;
return old_heap_size;
}
int
hqueue_set_max_elems(hqueue_t* hqueue, uint32_t new_max_elems)
{
uint32_t old_max_elems;
if(hqueue->heap_size > new_max_elems) {
if(!hqueue_resize_heap(hqueue, new_max_elems)) {
return 0;
}
}
old_max_elems = hqueue->max_elems;
hqueue->max_elems = new_max_elems;
return old_max_elems;
}

+ 60
- 0
c_src/couchdb_hqueue/hqueue.h 查看文件

@ -0,0 +1,60 @@
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#pragma once
#include <stdint.h>
#define HQ_VERSION 0
#define HQ_SCALE_FACTOR 2 // heap expansion scale factor
// Override the default memory allocator to use the Erlang versions.
// This bubbles up memory usage for the NIF into Erlang stats.
#ifdef HQ_ENIF_ALLOC
#include "erl_nif.h"
#define HQUEUE_ALLOC enif_alloc
#define HQUEUE_FREE enif_free
#else
#define HQUEUE_ALLOC malloc
#define HQUEUE_FREE free
#endif
typedef struct hqnode hqnode_t;
typedef struct hqueue hqueue_t;
hqueue_t* hqueue_new(uint32_t max_elems, uint32_t heap_size);
void hqueue_free(hqueue_t* hqueue);
void hqueue_free2(hqueue_t* hqueue, void (*free_node)(void* node));
int hqueue_insert(hqueue_t* hqueue, double priority, void* val);
int hqueue_extract_max(hqueue_t* hqueue, double* priority, void** value);
void hqueue_get_elem(hqueue_t* hqueue, uint32_t idx, double *priority,
void** value);
uint32_t hqueue_size(hqueue_t* hqueue);
uint32_t hqueue_heap_size(hqueue_t* hqueue);
uint32_t hqueue_max_elems(hqueue_t* hqueue);
int hqueue_set_max_elems(hqueue_t* hqueue, uint32_t new_max_elems);
void hqueue_scale_by(hqueue_t* hqueue, double factor);
uint32_t hqueue_resize_heap(hqueue_t* hqueue, uint32_t new_heap_size);

+ 601
- 0
c_src/couchdb_hqueue/hqueue_nif.c 查看文件

@ -0,0 +1,601 @@
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include "hqueue.h"
typedef struct
{
ERL_NIF_TERM atom_ok;
ERL_NIF_TERM atom_error;
ERL_NIF_TERM atom_value;
ERL_NIF_TERM atom_empty;
ERL_NIF_TERM atom_full;
ERL_NIF_TERM atom_max_elems;
ERL_NIF_TERM atom_heap_size;
ERL_NIF_TERM atom_too_small;
ErlNifResourceType* res_hqueue;
} hqueue_priv;
typedef struct
{
ErlNifEnv* env;
ERL_NIF_TERM value;
} hqnode_nif_t;
typedef struct
{
int version;
uint64_t gen;
hqueue_t* hqueue;
ErlNifPid p;
} hqueue_nif_t;
static const uint32_t default_max_elems = UINT32_MAX-1;
static const uint32_t default_heap_size = 1024;
static inline ERL_NIF_TERM
make_atom(ErlNifEnv* env, const char* name)
{
ERL_NIF_TERM ret;
if(enif_make_existing_atom(env, name, &ret, ERL_NIF_LATIN1)) {
return ret;
}
return enif_make_atom(env, name);
}
static inline ERL_NIF_TERM
make_ok(ErlNifEnv* env, hqueue_priv* priv, ERL_NIF_TERM value)
{
return enif_make_tuple2(env, priv->atom_ok, value);
}
static inline ERL_NIF_TERM
make_error(ErlNifEnv* env, hqueue_priv* priv, ERL_NIF_TERM reason)
{
return enif_make_tuple2(env, priv->atom_error, reason);
}
static inline int
check_pid(ErlNifEnv* env, hqueue_nif_t* hqueue_nif)
{
ErlNifPid pid;
enif_self(env, &pid);
if(enif_compare(pid.pid, hqueue_nif->p.pid) == 0) {
return 1;
}
return 0;
}
void
hqueue_nif_node_free(hqnode_nif_t* hqnode_nif)
{
enif_free_env(hqnode_nif->env);
enif_free(hqnode_nif);
return;
}
void
hqueue_nif_node_free_ext(void* node)
{
hqueue_nif_node_free((hqnode_nif_t*) node);
return;
}
hqnode_nif_t*
hqueue_nif_node_alloc()
{
hqnode_nif_t* node = (hqnode_nif_t*) enif_alloc(sizeof(hqnode_nif_t*));
memset(node, 0, sizeof(hqnode_nif_t));
node->env = enif_alloc_env();
return node;
}
static int
get_uint_param(ErlNifEnv* env, ERL_NIF_TERM value, ERL_NIF_TERM atom, uint32_t* p)
{
const ERL_NIF_TERM* tuple;
int arity;
if(!enif_get_tuple(env, value, &arity, &tuple)) {
return 0;
}
if(arity != 2) {
return 0;
}
if(enif_compare(tuple[0], atom) != 0) {
return 0;
}
if(!enif_get_uint(env, tuple[1], p)) {
return 0;
}
return 1;
}
static inline hqueue_nif_t*
hqueue_nif_create_int(ErlNifEnv* env, hqueue_priv* priv, uint32_t max_elems,
uint32_t heap_size)
{
hqueue_nif_t* hqueue_nif = NULL;
assert(priv != NULL && "missing private data member");
hqueue_nif = (hqueue_nif_t*) enif_alloc_resource(
priv->res_hqueue, sizeof(hqueue_nif_t));
memset(hqueue_nif, 0, sizeof(hqueue_nif_t));
hqueue_nif->version = HQ_VERSION;
hqueue_nif->hqueue = hqueue_new(max_elems, heap_size);
if(hqueue_nif->hqueue == NULL ) {
enif_release_resource(hqueue_nif);
return NULL;
}
enif_self(env, &(hqueue_nif->p));
return hqueue_nif;
}
static ERL_NIF_TERM
hqueue_nif_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
ERL_NIF_TERM opts;
ERL_NIF_TERM value;
uint32_t max_elems = default_max_elems;
uint32_t heap_size = default_heap_size;
if(argc != 1) {
return enif_make_badarg(env);
}
opts = argv[0];
if(!enif_is_list(env, opts)) {
return enif_make_badarg(env);
}
while(enif_get_list_cell(env, opts, &value, &opts)) {
if(get_uint_param(env, value, priv->atom_max_elems, &max_elems)) {
continue;
} else if(get_uint_param(env, value, priv->atom_heap_size, &heap_size)) {
continue;
} else {
return enif_make_badarg(env);
}
}
hqueue_nif = hqueue_nif_create_int(env, priv, max_elems, heap_size);
if(hqueue_nif == NULL) {
return enif_make_badarg(env);
}
ret = enif_make_resource(env, hqueue_nif);
enif_release_resource(hqueue_nif);
return make_ok(env, priv, ret);
}
static void
hqueue_nif_free(ErlNifEnv* env, void* obj)
{
hqueue_nif_t* hqueue_nif = (hqueue_nif_t*) obj;
hqueue_free2(hqueue_nif->hqueue, hqueue_nif_node_free_ext);
return;
}
static ERL_NIF_TERM
hqueue_nif_extract_max(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
hqnode_nif_t* hqnode_nif;
double tmp_priority;
ERL_NIF_TERM ret;
ERL_NIF_TERM priority;
ERL_NIF_TERM value;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if (!hqueue_extract_max(hqueue_nif->hqueue, &tmp_priority, (void**) &hqnode_nif)) {
return make_error(env, priv, priv->atom_empty);
}
priority = enif_make_double(env, tmp_priority);
value = enif_make_copy(env, hqnode_nif->value);
ret = enif_make_tuple2(env, priority, value);
hqueue_nif_node_free(hqnode_nif);
return ret;
}
static ERL_NIF_TERM
hqueue_nif_insert(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
hqnode_nif_t* hqnode_nif;
ERL_NIF_TERM ret;
double priority;
if(argc != 3) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if(!enif_get_double(env, argv[1], &priority)) {
return enif_make_badarg(env);
}
if(priority < 0.0) {
return enif_make_badarg(env);
}
hqnode_nif = hqueue_nif_node_alloc();
hqnode_nif->value = enif_make_copy(hqnode_nif->env, argv[2]);
if (!hqueue_insert(hqueue_nif->hqueue, priority, (void*) hqnode_nif)) {
return make_error(env, priv, priv->atom_full);
}
ret = priv->atom_ok;
return ret;
}
static ERL_NIF_TERM
hqueue_nif_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, hqueue_size(hqueue_nif->hqueue));
return ret;
}
static ERL_NIF_TERM
hqueue_nif_heap_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, hqueue_heap_size(hqueue_nif->hqueue));
return ret;
}
static ERL_NIF_TERM
hqueue_nif_max_elems(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, hqueue_max_elems(hqueue_nif->hqueue));
return ret;
}
static ERL_NIF_TERM
hqueue_nif_to_list(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
hqueue_t* hqueue;
hqnode_nif_t* hqnode_nif;
double tmp_priority;
ERL_NIF_TERM ret = enif_make_list(env, 0);
ERL_NIF_TERM priority;
ERL_NIF_TERM value;
ERL_NIF_TERM tuple;
uint32_t i;
if(argc != 1) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
hqueue = hqueue_nif->hqueue;
for (i = 1; i <= hqueue_size(hqueue); i++) {
hqueue_get_elem(hqueue, i, &tmp_priority, (void **) &hqnode_nif);
priority = enif_make_double(env, tmp_priority);
value = enif_make_copy(env, hqnode_nif->value);
tuple = enif_make_tuple2(env, priority, value);
ret = enif_make_list_cell(env, tuple, ret);
}
return ret;
}
static ERL_NIF_TERM
hqueue_nif_scale_by(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
double factor;
if(argc != 2) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if(!enif_get_double(env, argv[1], &factor)) {
return enif_make_badarg(env);
}
if(factor < 0.0) {
return enif_make_badarg(env);
}
hqueue_scale_by(hqueue_nif->hqueue, factor);
ret = priv->atom_ok;
return ret;
}
static ERL_NIF_TERM
hqueue_nif_resize_heap(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
uint32_t new_heap_size;
uint32_t old_heap_size;
if(argc != 2) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if(!enif_get_uint(env, argv[1], &new_heap_size)) {
return enif_make_badarg(env);
}
if(hqueue_size(hqueue_nif->hqueue) > new_heap_size) {
return make_error(env, priv, priv->atom_too_small);
}
if((old_heap_size = hqueue_resize_heap(hqueue_nif->hqueue, new_heap_size)) == 0) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, old_heap_size);
return ret;
}
static ERL_NIF_TERM
hqueue_nif_set_max_elems(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
hqueue_priv* priv = enif_priv_data(env);
hqueue_nif_t* hqueue_nif;
ERL_NIF_TERM ret;
uint32_t new_max_elems;
uint32_t old_max_elems;
if(argc != 2) {
return enif_make_badarg(env);
}
if(!enif_get_resource(env, argv[0], priv->res_hqueue, (void**) &hqueue_nif)) {
return enif_make_badarg(env);
}
if(!check_pid(env, hqueue_nif)) {
return enif_make_badarg(env);
}
if(!enif_get_uint(env, argv[1], &new_max_elems)) {
return enif_make_badarg(env);
}
if(hqueue_size(hqueue_nif->hqueue) > new_max_elems) {
return make_error(env, priv, priv->atom_too_small);
}
if ((old_max_elems = hqueue_set_max_elems(hqueue_nif->hqueue, new_max_elems)) == 0) {
return enif_make_badarg(env);
}
ret = enif_make_uint64(env, old_max_elems);
return ret;
}
static int
load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
{
int flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER;
ErlNifResourceType* res;
hqueue_priv* new_priv = (hqueue_priv*) enif_alloc(sizeof(hqueue_priv));
if(new_priv == NULL) {
return 1;
}
res = enif_open_resource_type(
env, NULL, "hqueue", hqueue_nif_free, flags, NULL);
if(res == NULL) {
enif_free(new_priv);
return 1;
}
new_priv->res_hqueue = res;
new_priv->atom_ok = make_atom(env, "ok");
new_priv->atom_error = make_atom(env, "error");
new_priv->atom_value = make_atom(env, "value");
new_priv->atom_empty = make_atom(env, "empty");
new_priv->atom_full = make_atom(env, "full");
new_priv->atom_max_elems = make_atom(env, "max_elems");
new_priv->atom_heap_size = make_atom(env, "heap_size");
new_priv->atom_too_small = make_atom(env, "too_small");
*priv = (void*) new_priv;
return 0;
}
static int
upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info)
{
return load(env, priv, info);
}
static void
unload(ErlNifEnv* env, void* priv)
{
enif_free(priv);
return;
}
static ErlNifFunc funcs[] = {
{"new", 1, hqueue_nif_new},
{"extract_max", 1, hqueue_nif_extract_max},
{"insert", 3, hqueue_nif_insert},
{"size", 1, hqueue_nif_size},
{"heap_size", 1, hqueue_nif_heap_size},
{"max_elems", 1, hqueue_nif_max_elems},
{"set_max_elems", 2, hqueue_nif_set_max_elems},
{"to_list", 1, hqueue_nif_to_list},
{"scale_by", 2, hqueue_nif_scale_by},
{"resize_heap", 2, hqueue_nif_resize_heap}
};
ERL_NIF_INIT(hqueue, funcs, &load, NULL, &upgrade, &unload);

+ 13
- 0
c_src/couchdb_hqueue/rebar.config 查看文件

@ -0,0 +1,13 @@
{port_specs, [
{"../../priv/hqueue.so", ["hqueue*.c"]}
]}.
{port_env, [
{"(linux|solaris|darwin|freebsd)", "CFLAGS", "$CFLAGS -g -Wall -Werror -DHQ_ENIF_ALLOC -O3"},
{"win32", "CFLAGS", "$CFLAGS /O2 /DNDEBUG /DHQ_ENIF_ALLOC /Dinline=__inline /Wall"}
%% {".*", "CFLAGS", "$CFLAGS -g -Wall -Werror -Wextra"}
]}.

+ 72
- 0
c_src/couchdb_hqueue/valgrind_sample.c 查看文件

@ -0,0 +1,72 @@
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "hqueue.h"
// Simple test script to stress the public HQueue API.
// Primary use case is for running this under Valgrind.
int main(void)
{
int str_len = 100;
int iterations = 1000;
uint32_t max_elems = 1024;
uint32_t heap_size = 64;
hqueue_t* hq = hqueue_new(max_elems, heap_size);
double priority;
double priority_res;
char* val;
char* val_res;
int i;
assert(max_elems == hqueue_max_elems(hq));
assert(heap_size == hqueue_heap_size(hq));
for(i = 0; i < iterations; i++) {
priority = 1234.4321 * i;
val = (char*) malloc(str_len + 1);
if(val == NULL) {
return 1;
}
assert(hqueue_size(hq) == i);
if(snprintf(val, str_len + 1, "Fun string #%d\n", i)) {
if(!hqueue_insert(hq, priority, val)) {
return 1;
}
} else {
return 1;
}
}
hqueue_scale_by(hq, 3.7);
// Added 1000 elements, so heap size should have expanded to 1024
assert(max_elems == hqueue_max_elems(hq));
assert(max_elems == hqueue_heap_size(hq));
if(!hqueue_extract_max(hq, &priority_res, (void**) &val_res)) {
return 1;
}
free(val_res);
hqueue_free2(hq, free);
return 0;
}

+ 0
- 564
c_src/cq/cq_nif.c 查看文件

@ -1,564 +0,0 @@
#include <stdio.h>
#include <unistd.h>
#include "erl_nif.h"
#include "cq_nif.h"
/* #ifndef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
# error Requires dirty schedulers
#endif */
ERL_NIF_TERM
mk_atom(ErlNifEnv* env, const char* atom)
{
ERL_NIF_TERM ret;
if(!enif_make_existing_atom(env, atom, &ret, ERL_NIF_LATIN1))
return enif_make_atom(env, atom);
return ret;
}
ERL_NIF_TERM
mk_error(ErlNifEnv* env, const char* mesg)
{
return enif_make_tuple2(env, mk_atom(env, "error"), mk_atom(env, mesg));
}
static ERL_NIF_TERM
queue_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
cq_t *q = enif_alloc_resource(CQ_RESOURCE, sizeof(cq_t));
if (q == NULL)
return mk_error(env, "priv_alloc_error");
ERL_NIF_TERM ret = enif_make_resource(env, q);
/* enif_release_resource(ret); */
uint32_t queue_id = 0;
uint32_t queue_size = 0;
uint32_t overflow_size = 0;
if (!enif_get_uint(env, argv[0], &queue_id) ||
!enif_get_uint(env, argv[1], &queue_size) ||
!enif_get_uint(env, argv[2], &overflow_size))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "bad_queue_id");
/* TODO: Check that queue_size is power of 2 */
if (QUEUES[queue_id] != NULL)
return mk_error(env, "queue_id_already_exists");
q->id = queue_id;
q->queue_size = queue_size;
q->overflow_size = overflow_size;
q->tail = 0;
q->head = 0;
q->slots_states = calloc(q->queue_size, CACHE_LINE_SIZE);
q->slots_terms = calloc(q->queue_size, CACHE_LINE_SIZE);
q->slots_envs = calloc(q->queue_size, CACHE_LINE_SIZE);
q->overflow_terms = calloc(q->overflow_size, CACHE_LINE_SIZE);
q->overflow_envs = calloc(q->queue_size, CACHE_LINE_SIZE);
q->push_queue = new_queue();
q->pop_queue = new_queue();
/* TODO: Check calloc return */
for (int i = 0; i < q->queue_size; i++) {
ErlNifEnv *slot_env = enif_alloc_env();
q->slots_envs[i*CACHE_LINE_SIZE] = slot_env;
//q->overflow_envs[i*CACHE_LINE_SIZE] = (ErlNifEnv *) enif_alloc_env();
}
QUEUES[q->id] = q;
return enif_make_tuple2(env, mk_atom(env, "ok"), ret);
}
static ERL_NIF_TERM
queue_free(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
/* TODO: Free all the things! */
QUEUES[queue_id] = NULL;
return enif_make_atom(env, "ok");
}
/* Push to the head of the queue. */
static ERL_NIF_TERM
queue_push(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
/* Load the queue */
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
if (q->id != queue_id)
return mk_error(env, "not_identical_queue_id");
for (int i = 0; i < q->queue_size; i++) {
fprintf(stderr, "queue slot %d, index %d, state %d\n",
i, i*CACHE_LINE_SIZE, q->slots_states[i*CACHE_LINE_SIZE]);
}
/* If there's consumers waiting, the queue must be empty and we
should directly pick a consumer to notify. */
ErlNifPid *waiting_consumer;
int dequeue_ret = dequeue(q->pop_queue, &waiting_consumer);
if (dequeue_ret) {
ErlNifEnv *msg_env = enif_alloc_env();
ERL_NIF_TERM copy = enif_make_copy(msg_env, argv[1]);
ERL_NIF_TERM tuple = enif_make_tuple2(msg_env, mk_atom(env, "pop"), copy);
if (enif_send(env, waiting_consumer, msg_env, tuple)) {
enif_free_env(msg_env);
return mk_atom(env, "ok");
} else {
return mk_error(env, "notify_failed");
}
}
/* Increment head and attempt to claim the slot by marking it as
busy. This ensures no other thread will attempt to modify this
slot. If we cannot lock it, another thread must have */
uint64_t head = __sync_add_and_fetch(&q->head, 1);
size_t size = q->queue_size;
while (1) {
uint64_t index = SLOT_INDEX(head, size);
uint64_t ret = __sync_val_compare_and_swap(&q->slots_states[index],
STATE_EMPTY,
STATE_WRITE);
switch (ret) {
case STATE_EMPTY:
head = __sync_add_and_fetch(&q->head, 1);
case STATE_WRITE:
/* We acquired the write lock, go ahead with the write. */
break;
case STATE_FULL:
/* We have caught up with the tail and the buffer is
full. Block the producer until a consumer reads the
item. */
return mk_error(env, "full_not_implemented");
}
}
/* If head catches up with tail, the queue is full. Add to
overflow instead */
/* Copy term to slot-specific temporary process env. */
ERL_NIF_TERM copy = enif_make_copy(q->slots_envs[SLOT_INDEX(head, size)], argv[1]);
q->slots_terms[SLOT_INDEX(head, size)] = copy;
__sync_synchronize(); /* Or compiler memory barrier? */
/* TODO: Do we need to collect garbage? */
/* Mark the slot ready to be consumed */
if (__sync_bool_compare_and_swap(&q->slots_states[SLOT_INDEX(head, size)],
STATE_WRITE,
STATE_FULL)) {
return mk_atom(env, "ok");
} else {
return mk_error(env, "could_not_update_slots_after_insert");
}
}
static ERL_NIF_TERM
queue_async_pop(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
/* Load queue */
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
if (q->id != queue_id)
return mk_error(env, "not_identical_queue_id");
uint64_t qsize = q->queue_size;
uint64_t tail = q->tail;
uint64_t num_busy = 0;
/* Walk the buffer starting the tail position until we are either
able to consume a term or find an empty slot. */
while (1) {
uint64_t index = SLOT_INDEX(tail, qsize);
uint64_t ret = __sync_val_compare_and_swap(&q->slots_states[index],
STATE_FULL,
STATE_READ);
if (ret == STATE_READ) {
/* We were able to mark the term as read in progress. We
now have an exclusive lock. */
break;
} else if (ret == STATE_WRITE) {
/* We found an item with a write in progress. If that
thread progresses, it will eventually mark the slot as
full. We can spin until that happens.
This can take an arbitrary amount of time and multiple
reading threads will compete for the same slot.
Instead we add the caller to the queue of blocking
consumers. When the next producer comes it will "help"
this thread by calling enif_send on the current
in-progress term *and* handle it's own terms. If
there's no new push to the queue, this will block
forever. */
return mk_atom(env, "write_in_progress_not_implemented");
} else if (ret == STATE_EMPTY) {
/* We found an empty item. Queue must be empty. Add
calling Erlang consumer process to queue of waiting
processes. When the next producer comes along, it first
checks the waiting consumers and calls enif_send
instead of writing to the slots. */
ErlNifPid *pid = enif_alloc(sizeof(ErlNifPid));
pid = enif_self(env, pid);
enqueue(q->pop_queue, pid);
return mk_atom(env, "wait_for_msg");
} else {
tail = __sync_add_and_fetch(&q->tail, 1);
}
}
/* Copy term into calling process env. The NIF env can now be
gargbage collected. */
ERL_NIF_TERM copy = enif_make_copy(env, q->slots_terms[SLOT_INDEX(tail, qsize)]);
/* Mark the slot as free. Note: We don't increment the tail
position here, as another thread also walking the buffer might
have incremented it multiple times */
q->slots_terms[SLOT_INDEX(tail, qsize)] = 0;
if (__sync_bool_compare_and_swap(&q->slots_states[SLOT_INDEX(tail, qsize)],
STATE_READ,
STATE_EMPTY)) {
return enif_make_tuple2(env, mk_atom(env, "ok"), copy);
} else {
return mk_error(env, "could_not_update_slots_after_pop");
}
}
static ERL_NIF_TERM
queue_debug(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
ERL_NIF_TERM *slots_states = enif_alloc(sizeof(ERL_NIF_TERM) * q->queue_size);
ERL_NIF_TERM *slots_terms = enif_alloc(sizeof(ERL_NIF_TERM) * q->queue_size);
for (int i = 0; i < q->queue_size; i++) {
slots_states[i] = enif_make_int(env, q->slots_states[i * CACHE_LINE_SIZE]);
if (q->slots_terms[i * CACHE_LINE_SIZE] == 0) {
slots_terms[i] = mk_atom(env, "null");
} else {
slots_terms[i] = enif_make_copy(env, q->slots_terms[i * CACHE_LINE_SIZE]);
}
}
return enif_make_tuple4(env,
enif_make_uint64(env, q->tail),
enif_make_uint64(env, q->head),
enif_make_list_from_array(env, slots_states, q->queue_size),
enif_make_list_from_array(env, slots_terms, q->queue_size));
}
static ERL_NIF_TERM
queue_debug_poppers(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
uint64_t pop_queue_size = 0;
cq_node_t *node = q->pop_queue->head;
if (node->value == NULL) {
node = node->next;
node = Q_PTR(node);
}
while (node != NULL) {
pop_queue_size++;
node = node->next;
node = Q_PTR(node);
}
ERL_NIF_TERM *pop_queue_pids = enif_alloc(sizeof(ERL_NIF_TERM) * pop_queue_size);
node = q->pop_queue->head;
node = Q_PTR(node);
if (node->value == NULL) {
node = node->next;
node = Q_PTR(node);
}
uint64_t i = 0;
while (node != NULL) {
if (node->value == 0) {
pop_queue_pids[i] = mk_atom(env, "null");
}
else {
pop_queue_pids[i] = enif_make_pid(env, node->value);
}
i++;
node = node->next;
node = Q_PTR(node);
}
ERL_NIF_TERM list = enif_make_list_from_array(env, pop_queue_pids, pop_queue_size);
enif_free(pop_queue_pids);
return list;
}
static ERL_NIF_TERM
print_bits(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint64_t *p1 = malloc(8);
*p1 = 0;
for (int bit = 63; bit >= 0; bit--) {
uint64_t power = 1 << bit;
//uint64_t byte = *p1;
uint64_t byte = p1;
fprintf(stderr, "%d", (byte & power) >> bit);
}
fprintf(stderr, "\n");
//enif_free(p1);
return mk_atom(env, "ok");
}
void free_resource(ErlNifEnv* env, void* arg)
{
//cq_t *cq = (cq_t *) arg;
fprintf(stderr, "free_resource\n");
}
cq_queue_t * new_queue()
{
cq_queue_t *queue = enif_alloc(sizeof(cq_queue_t));
cq_node_t *node = enif_alloc(sizeof(cq_node_t));
node->next = NULL;
//node->env = NULL;
node->value = NULL;
queue->head = node;
queue->tail = node;
return queue;
}
void enqueue(cq_queue_t *queue, ErlNifPid *pid)
{
cq_node_t *node = enif_alloc(sizeof(cq_node_t));
//node->env = enif_alloc_env();
//node->term = enif_make_copy(node->env, term);
node->value = pid;
node->next = NULL;
fprintf(stderr, "node %lu\n", node);
cq_node_t *tail = NULL;
uint64_t tail_count = 0;
while (1) {
tail = queue->tail;
cq_node_t *tail_ptr = Q_PTR(tail);
tail_count = Q_COUNT(tail);
cq_node_t *next = tail->next;
cq_node_t *next_ptr = Q_PTR(next);
uint64_t next_count = Q_COUNT(next);
if (tail == queue->tail) {
fprintf(stderr, "tail == queue->tail\n");
if (next_ptr == NULL) {
fprintf(stderr, "next_ptr == NULL\n");
if (__sync_bool_compare_and_swap(&tail_ptr->next,
next,
Q_SET_COUNT(node, next_count+1)))
fprintf(stderr, "CAS(tail_ptr->next, next, (node, next_count+1)) -> true\n");
break;
} else {
__sync_bool_compare_and_swap(&queue->tail,
tail,
Q_SET_COUNT(next_ptr, next_count+1));
fprintf(stderr, "CAS(queue->tail, tail, (next_ptr, next_count+1))\n");
}
}
}
cq_node_t *node_with_count = Q_SET_COUNT(node, tail_count+1);
int ret = __sync_bool_compare_and_swap(&queue->tail,
tail,
node_with_count);
fprintf(stderr, "CAS(queue->tail, tail, %lu) -> %d\n", node_with_count, ret);
}
int dequeue(cq_queue_t *queue, ErlNifPid **pid)
{
fprintf(stderr, "dequeue\n");
cq_node_t *head, *head_ptr, *tail, *tail_ptr, *next, *next_ptr;
while (1) {
head = queue->head;
head_ptr = Q_PTR(head);
tail = queue->tail;
tail_ptr = Q_PTR(tail);
next = head->next;
next_ptr = Q_PTR(next);
fprintf(stderr, "head %lu, tail %lu, next %lu\n", head, tail, next);
if (head == queue->head) {
if (head_ptr == tail_ptr) {
if (next_ptr == NULL) {
return 0; /* Queue is empty */
}
fprintf(stderr, "CAS(queue->tail, tail, (next_ptr, tail+1))\n");
__sync_bool_compare_and_swap(&queue->tail,
tail,
Q_SET_COUNT(next_ptr, Q_COUNT(tail)+1));
} else {
fprintf(stderr, "next->value %lu\n", next_ptr->value);
*pid = next_ptr->value;
fprintf(stderr, "CAS(queue->head, head, (next_ptr, head+1))\n");
if (__sync_bool_compare_and_swap(&queue->head,
head,
Q_SET_COUNT(next_ptr, Q_COUNT(head)+1)))
break;
}
}
}
// free pid
//enif_free(Q_PTR(head));
return 1;
}
int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) {
/* Initialize global array mapping id to cq_t ptr */
QUEUES = (cq_t **) calloc(8, sizeof(cq_t **));
if (QUEUES == NULL)
return -1;
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER);
CQ_RESOURCE = enif_open_resource_type(env, "cq", "cq",
&free_resource, flags, NULL);
if (CQ_RESOURCE == NULL)
return -1;
return 0;
}
static ErlNifFunc nif_funcs[] = {
{"new" , 3, queue_new},
{"free" , 1, queue_free},
{"push" , 2, queue_push},
{"async_pop", 1, queue_async_pop},
{"debug" , 1, queue_debug},
{"debug_poppers", 1, queue_debug_poppers},
{"print_bits", 0, print_bits}
};
ERL_NIF_INIT(cq, nif_funcs, load, NULL, NULL, NULL);

+ 0
- 71
c_src/cq/cq_nif.h 查看文件

@ -1,71 +0,0 @@
#include <stdint.h>
#include "erl_nif.h"
#define CACHE_LINE_SIZE 64
#define SLOT_INDEX(__index, __size) __index & (__size - 1)
#define Q_MASK 3L
#define Q_PTR(__ptr) (cq_node_t *) (((uint64_t)__ptr) & (~Q_MASK))
#define Q_COUNT(__ptr) ((uint64_t) __ptr & Q_MASK)
#define Q_SET_COUNT(__ptr, __val) (cq_node_t *) ((uint64_t) __ptr | (__val & Q_MASK))
#define STATE_EMPTY 0
#define STATE_WRITE 1
#define STATE_READ 2
#define STATE_FULL 3
ErlNifResourceType* CQ_RESOURCE;
typedef struct cq_node cq_node_t;
struct cq_node {
ErlNifEnv *env;
//ERL_NIF_TERM term;
ErlNifPid *value;
cq_node_t *next;
};
typedef struct cq_queue {
cq_node_t *head;
cq_node_t *tail;
} cq_queue_t;
// TODO: Add padding between the fields
typedef struct cq {
uint32_t id;
uint64_t queue_size;
uint64_t overflow_size;
uint64_t head;
uint64_t tail;
uint8_t *slots_states;
ERL_NIF_TERM *slots_terms;
ErlNifEnv **slots_envs;
cq_queue_t *push_queue;
cq_queue_t *pop_queue;
uint8_t *overflow_states;
ERL_NIF_TERM *overflow_terms;
ErlNifEnv **overflow_envs;
} cq_t;
cq_t **QUEUES = NULL; /* Initialized on nif load */
ERL_NIF_TERM mk_atom(ErlNifEnv* env, const char* atom);
ERL_NIF_TERM mk_error(ErlNifEnv* env, const char* msg);
int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
void free_resource(ErlNifEnv*, void*);
cq_queue_t* new_queue(void);
void enqueue(cq_queue_t *q, ErlNifPid *pid);

+ 0
- 564
c_src/cq1/cq_nif.c 查看文件

@ -1,564 +0,0 @@
#include <stdio.h>
#include <unistd.h>
#include "erl_nif.h"
#include "cq_nif.h"
/* #ifndef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
# error Requires dirty schedulers
#endif */
ERL_NIF_TERM
mk_atom(ErlNifEnv* env, const char* atom)
{
ERL_NIF_TERM ret;
if(!enif_make_existing_atom(env, atom, &ret, ERL_NIF_LATIN1))
return enif_make_atom(env, atom);
return ret;
}
ERL_NIF_TERM
mk_error(ErlNifEnv* env, const char* mesg)
{
return enif_make_tuple2(env, mk_atom(env, "error"), mk_atom(env, mesg));
}
static ERL_NIF_TERM
queue_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
cq_t *q = enif_alloc_resource(CQ_RESOURCE, sizeof(cq_t));
if (q == NULL)
return mk_error(env, "priv_alloc_error");
ERL_NIF_TERM ret = enif_make_resource(env, q);
/* enif_release_resource(ret); */
uint32_t queue_id = 0;
uint32_t queue_size = 0;
uint32_t overflow_size = 0;
if (!enif_get_uint(env, argv[0], &queue_id) ||
!enif_get_uint(env, argv[1], &queue_size) ||
!enif_get_uint(env, argv[2], &overflow_size))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "bad_queue_id");
/* TODO: Check that queue_size is power of 2 */
if (QUEUES[queue_id] != NULL)
return mk_error(env, "queue_id_already_exists");
q->id = queue_id;
q->queue_size = queue_size;
q->overflow_size = overflow_size;
q->tail = 0;
q->head = 0;
q->slots_states = calloc(q->queue_size, CACHE_LINE_SIZE);
q->slots_terms = calloc(q->queue_size, CACHE_LINE_SIZE);
q->slots_envs = calloc(q->queue_size, CACHE_LINE_SIZE);
q->overflow_terms = calloc(q->overflow_size, CACHE_LINE_SIZE);
q->overflow_envs = calloc(q->queue_size, CACHE_LINE_SIZE);
q->push_queue = new_queue();
q->pop_queue = new_queue();
/* TODO: Check calloc return */
for (int i = 0; i < q->queue_size; i++) {
ErlNifEnv *slot_env = enif_alloc_env();
q->slots_envs[i*CACHE_LINE_SIZE] = slot_env;
//q->overflow_envs[i*CACHE_LINE_SIZE] = (ErlNifEnv *) enif_alloc_env();
}
QUEUES[q->id] = q;
return enif_make_tuple2(env, mk_atom(env, "ok"), ret);
}
static ERL_NIF_TERM
queue_free(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
/* TODO: Free all the things! */
QUEUES[queue_id] = NULL;
return enif_make_atom(env, "ok");
}
/* Push to the head of the queue. */
static ERL_NIF_TERM
queue_push(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
/* Load the queue */
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
if (q->id != queue_id)
return mk_error(env, "not_identical_queue_id");
for (int i = 0; i < q->queue_size; i++) {
fprintf(stderr, "queue slot %d, index %d, state %d\n",
i, i*CACHE_LINE_SIZE, q->slots_states[i*CACHE_LINE_SIZE]);
}
/* If there's consumers waiting, the queue must be empty and we
should directly pick a consumer to notify. */
ErlNifPid *waiting_consumer;
int dequeue_ret = dequeue(q->pop_queue, &waiting_consumer);
if (dequeue_ret) {
ErlNifEnv *msg_env = enif_alloc_env();
ERL_NIF_TERM copy = enif_make_copy(msg_env, argv[1]);
ERL_NIF_TERM tuple = enif_make_tuple2(msg_env, mk_atom(env, "pop"), copy);
if (enif_send(env, waiting_consumer, msg_env, tuple)) {
enif_free_env(msg_env);
return mk_atom(env, "ok");
} else {
return mk_error(env, "notify_failed");
}
}
/* Increment head and attempt to claim the slot by marking it as
busy. This ensures no other thread will attempt to modify this
slot. If we cannot lock it, another thread must have */
uint64_t head = __sync_add_and_fetch(&q->head, 1);
size_t size = q->queue_size;
while (1) {
uint64_t index = SLOT_INDEX(head, size);
uint64_t ret = __sync_val_compare_and_swap(&q->slots_states[index],
STATE_EMPTY,
STATE_WRITE);
switch (ret) {
case STATE_EMPTY:
head = __sync_add_and_fetch(&q->head, 1);
case STATE_WRITE:
/* We acquired the write lock, go ahead with the write. */
break;
case STATE_FULL:
/* We have caught up with the tail and the buffer is
full. Block the producer until a consumer reads the
item. */
return mk_error(env, "full_not_implemented");
}
}
/* If head catches up with tail, the queue is full. Add to
overflow instead */
/* Copy term to slot-specific temporary process env. */
ERL_NIF_TERM copy = enif_make_copy(q->slots_envs[SLOT_INDEX(head, size)], argv[1]);
q->slots_terms[SLOT_INDEX(head, size)] = copy;
__sync_synchronize(); /* Or compiler memory barrier? */
/* TODO: Do we need to collect garbage? */
/* Mark the slot ready to be consumed */
if (__sync_bool_compare_and_swap(&q->slots_states[SLOT_INDEX(head, size)],
STATE_WRITE,
STATE_FULL)) {
return mk_atom(env, "ok");
} else {
return mk_error(env, "could_not_update_slots_after_insert");
}
}
static ERL_NIF_TERM
queue_async_pop(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
/* Load queue */
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
if (q->id != queue_id)
return mk_error(env, "not_identical_queue_id");
uint64_t qsize = q->queue_size;
uint64_t tail = q->tail;
uint64_t num_busy = 0;
/* Walk the buffer starting the tail position until we are either
able to consume a term or find an empty slot. */
while (1) {
uint64_t index = SLOT_INDEX(tail, qsize);
uint64_t ret = __sync_val_compare_and_swap(&q->slots_states[index],
STATE_FULL,
STATE_READ);
if (ret == STATE_READ) {
/* We were able to mark the term as read in progress. We
now have an exclusive lock. */
break;
} else if (ret == STATE_WRITE) {
/* We found an item with a write in progress. If that
thread progresses, it will eventually mark the slot as
full. We can spin until that happens.
This can take an arbitrary amount of time and multiple
reading threads will compete for the same slot.
Instead we add the caller to the queue of blocking
consumers. When the next producer comes it will "help"
this thread by calling enif_send on the current
in-progress term *and* handle it's own terms. If
there's no new push to the queue, this will block
forever. */
return mk_atom(env, "write_in_progress_not_implemented");
} else if (ret == STATE_EMPTY) {
/* We found an empty item. Queue must be empty. Add
calling Erlang consumer process to queue of waiting
processes. When the next producer comes along, it first
checks the waiting consumers and calls enif_send
instead of writing to the slots. */
ErlNifPid *pid = enif_alloc(sizeof(ErlNifPid));
pid = enif_self(env, pid);
enqueue(q->pop_queue, pid);
return mk_atom(env, "wait_for_msg");
} else {
tail = __sync_add_and_fetch(&q->tail, 1);
}
}
/* Copy term into calling process env. The NIF env can now be
gargbage collected. */
ERL_NIF_TERM copy = enif_make_copy(env, q->slots_terms[SLOT_INDEX(tail, qsize)]);
/* Mark the slot as free. Note: We don't increment the tail
position here, as another thread also walking the buffer might
have incremented it multiple times */
q->slots_terms[SLOT_INDEX(tail, qsize)] = 0;
if (__sync_bool_compare_and_swap(&q->slots_states[SLOT_INDEX(tail, qsize)],
STATE_READ,
STATE_EMPTY)) {
return enif_make_tuple2(env, mk_atom(env, "ok"), copy);
} else {
return mk_error(env, "could_not_update_slots_after_pop");
}
}
static ERL_NIF_TERM
queue_debug(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
ERL_NIF_TERM *slots_states = enif_alloc(sizeof(ERL_NIF_TERM) * q->queue_size);
ERL_NIF_TERM *slots_terms = enif_alloc(sizeof(ERL_NIF_TERM) * q->queue_size);
for (int i = 0; i < q->queue_size; i++) {
slots_states[i] = enif_make_int(env, q->slots_states[i * CACHE_LINE_SIZE]);
if (q->slots_terms[i * CACHE_LINE_SIZE] == 0) {
slots_terms[i] = mk_atom(env, "null");
} else {
slots_terms[i] = enif_make_copy(env, q->slots_terms[i * CACHE_LINE_SIZE]);
}
}
return enif_make_tuple4(env,
enif_make_uint64(env, q->tail),
enif_make_uint64(env, q->head),
enif_make_list_from_array(env, slots_states, q->queue_size),
enif_make_list_from_array(env, slots_terms, q->queue_size));
}
static ERL_NIF_TERM
queue_debug_poppers(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
uint64_t pop_queue_size = 0;
cq_node_t *node = q->pop_queue->head;
if (node->value == NULL) {
node = node->next;
node = Q_PTR(node);
}
while (node != NULL) {
pop_queue_size++;
node = node->next;
node = Q_PTR(node);
}
ERL_NIF_TERM *pop_queue_pids = enif_alloc(sizeof(ERL_NIF_TERM) * pop_queue_size);
node = q->pop_queue->head;
node = Q_PTR(node);
if (node->value == NULL) {
node = node->next;
node = Q_PTR(node);
}
uint64_t i = 0;
while (node != NULL) {
if (node->value == 0) {
pop_queue_pids[i] = mk_atom(env, "null");
}
else {
pop_queue_pids[i] = enif_make_pid(env, node->value);
}
i++;
node = node->next;
node = Q_PTR(node);
}
ERL_NIF_TERM list = enif_make_list_from_array(env, pop_queue_pids, pop_queue_size);
enif_free(pop_queue_pids);
return list;
}
static ERL_NIF_TERM
print_bits(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint64_t *p1 = malloc(8);
*p1 = 0;
for (int bit = 63; bit >= 0; bit--) {
uint64_t power = 1 << bit;
//uint64_t byte = *p1;
uint64_t byte = p1;
fprintf(stderr, "%d", (byte & power) >> bit);
}
fprintf(stderr, "\n");
//enif_free(p1);
return mk_atom(env, "ok");
}
void free_resource(ErlNifEnv* env, void* arg)
{
//cq_t *cq = (cq_t *) arg;
fprintf(stderr, "free_resource\n");
}
cq_queue_t * new_queue()
{
cq_queue_t *queue = enif_alloc(sizeof(cq_queue_t));
cq_node_t *node = enif_alloc(sizeof(cq_node_t));
node->next = NULL;
//node->env = NULL;
node->value = NULL;
queue->head = node;
queue->tail = node;
return queue;
}
void enqueue(cq_queue_t *queue, ErlNifPid *pid)
{
cq_node_t *node = enif_alloc(sizeof(cq_node_t));
//node->env = enif_alloc_env();
//node->term = enif_make_copy(node->env, term);
node->value = pid;
node->next = NULL;
fprintf(stderr, "node %lu\n", node);
cq_node_t *tail = NULL;
uint64_t tail_count = 0;
while (1) {
tail = queue->tail;
cq_node_t *tail_ptr = Q_PTR(tail);
tail_count = Q_COUNT(tail);
cq_node_t *next = tail->next;
cq_node_t *next_ptr = Q_PTR(next);
uint64_t next_count = Q_COUNT(next);
if (tail == queue->tail) {
fprintf(stderr, "tail == queue->tail\n");
if (next_ptr == NULL) {
fprintf(stderr, "next_ptr == NULL\n");
if (__sync_bool_compare_and_swap(&tail_ptr->next,
next,
Q_SET_COUNT(node, next_count+1)))
fprintf(stderr, "CAS(tail_ptr->next, next, (node, next_count+1)) -> true\n");
break;
} else {
__sync_bool_compare_and_swap(&queue->tail,
tail,
Q_SET_COUNT(next_ptr, next_count+1));
fprintf(stderr, "CAS(queue->tail, tail, (next_ptr, next_count+1))\n");
}
}
}
cq_node_t *node_with_count = Q_SET_COUNT(node, tail_count+1);
int ret = __sync_bool_compare_and_swap(&queue->tail,
tail,
node_with_count);
fprintf(stderr, "CAS(queue->tail, tail, %lu) -> %d\n", node_with_count, ret);
}
int dequeue(cq_queue_t *queue, ErlNifPid **pid)
{
fprintf(stderr, "dequeue\n");
cq_node_t *head, *head_ptr, *tail, *tail_ptr, *next, *next_ptr;
while (1) {
head = queue->head;
head_ptr = Q_PTR(head);
tail = queue->tail;
tail_ptr = Q_PTR(tail);
next = head->next;
next_ptr = Q_PTR(next);
fprintf(stderr, "head %lu, tail %lu, next %lu\n", head, tail, next);
if (head == queue->head) {
if (head_ptr == tail_ptr) {
if (next_ptr == NULL) {
return 0; /* Queue is empty */
}
fprintf(stderr, "CAS(queue->tail, tail, (next_ptr, tail+1))\n");
__sync_bool_compare_and_swap(&queue->tail,
tail,
Q_SET_COUNT(next_ptr, Q_COUNT(tail)+1));
} else {
fprintf(stderr, "next->value %lu\n", next_ptr->value);
*pid = next_ptr->value;
fprintf(stderr, "CAS(queue->head, head, (next_ptr, head+1))\n");
if (__sync_bool_compare_and_swap(&queue->head,
head,
Q_SET_COUNT(next_ptr, Q_COUNT(head)+1)))
break;
}
}
}
// free pid
//enif_free(Q_PTR(head));
return 1;
}
int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) {
/* Initialize global array mapping id to cq_t ptr */
QUEUES = (cq_t **) calloc(8, sizeof(cq_t **));
if (QUEUES == NULL)
return -1;
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER);
CQ_RESOURCE = enif_open_resource_type(env, "cq", "cq",
&free_resource, flags, NULL);
if (CQ_RESOURCE == NULL)
return -1;
return 0;
}
static ErlNifFunc nif_funcs[] = {
{"new" , 3, queue_new},
{"free" , 1, queue_free},
{"push" , 2, queue_push},
{"async_pop", 1, queue_async_pop},
{"debug" , 1, queue_debug},
{"debug_poppers", 1, queue_debug_poppers},
{"print_bits", 0, print_bits}
};
ERL_NIF_INIT(cq, nif_funcs, load, NULL, NULL, NULL);

+ 0
- 71
c_src/cq1/cq_nif.h 查看文件

@ -1,71 +0,0 @@
#include <stdint.h>
#include "erl_nif.h"
#define CACHE_LINE_SIZE 64
#define SLOT_INDEX(__index, __size) __index & (__size - 1)
#define Q_MASK 3L
#define Q_PTR(__ptr) (cq_node_t *) (((uint64_t)__ptr) & (~Q_MASK))
#define Q_COUNT(__ptr) ((uint64_t) __ptr & Q_MASK)
#define Q_SET_COUNT(__ptr, __val) (cq_node_t *) ((uint64_t) __ptr | (__val & Q_MASK))
#define STATE_EMPTY 0
#define STATE_WRITE 1
#define STATE_READ 2
#define STATE_FULL 3
ErlNifResourceType* CQ_RESOURCE;
typedef struct cq_node cq_node_t;
struct cq_node {
ErlNifEnv *env;
//ERL_NIF_TERM term;
ErlNifPid *value;
cq_node_t *next;
};
typedef struct cq_queue {
cq_node_t *head;
cq_node_t *tail;
} cq_queue_t;
// TODO: Add padding between the fields
typedef struct cq {
uint32_t id;
uint64_t queue_size;
uint64_t overflow_size;
uint64_t head;
uint64_t tail;
uint8_t *slots_states;
ERL_NIF_TERM *slots_terms;
ErlNifEnv **slots_envs;
cq_queue_t *push_queue;
cq_queue_t *pop_queue;
uint8_t *overflow_states;
ERL_NIF_TERM *overflow_terms;
ErlNifEnv **overflow_envs;
} cq_t;
cq_t **QUEUES = NULL; /* Initialized on nif load */
ERL_NIF_TERM mk_atom(ErlNifEnv* env, const char* atom);
ERL_NIF_TERM mk_error(ErlNifEnv* env, const char* msg);
int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
void free_resource(ErlNifEnv*, void*);
cq_queue_t* new_queue(void);
void enqueue(cq_queue_t *q, ErlNifPid *pid);

+ 0
- 26
c_src/cq1/rebar.config 查看文件

@ -1,26 +0,0 @@
{port_specs, [
{"../../priv/cq1.so", [
"*.c",
"*.cc"
]}
]}.
%% {port_env, [
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)",
%% "CFLAGS", "$CFLAGS -Ic_src/ -g -Wall -flto -Werror -O3"},
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)",
%% "CXXFLAGS", "$CXXFLAGS -Ic_src/ -g -Wall -flto -Werror -O3"},
%%
%% {"(linux|solaris|freebsd|netbsd|openbsd|dragonfly|darwin|gnu)",
%% "LDFLAGS", "$LDFLAGS -flto -lstdc++"},
%%
%% %% OS X Leopard flags for 64-bit
%% {"darwin9.*-64$", "CXXFLAGS", "-m64"},
%% {"darwin9.*-64$", "LDFLAGS", "-arch x86_64"},
%%
%% %% OS X Snow Leopard flags for 32-bit
%% {"darwin10.*-32$", "CXXFLAGS", "-m32"},
%% {"darwin10.*-32$", "LDFLAGS", "-arch i386"},
%%
%% {"win32", "CXXFLAGS", "$CXXFLAGS /O2 /DNDEBUG"}
%% ]}.

+ 0
- 564
c_src/cq2/cq_nif.c 查看文件

@ -1,564 +0,0 @@
#include <stdio.h>
#include <unistd.h>
#include "erl_nif.h"
#include "cq_nif.h"
/* #ifndef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
# error Requires dirty schedulers
#endif */
ERL_NIF_TERM
mk_atom(ErlNifEnv* env, const char* atom)
{
ERL_NIF_TERM ret;
if(!enif_make_existing_atom(env, atom, &ret, ERL_NIF_LATIN1))
return enif_make_atom(env, atom);
return ret;
}
ERL_NIF_TERM
mk_error(ErlNifEnv* env, const char* mesg)
{
return enif_make_tuple2(env, mk_atom(env, "error"), mk_atom(env, mesg));
}
static ERL_NIF_TERM
queue_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
cq_t *q = enif_alloc_resource(CQ_RESOURCE, sizeof(cq_t));
if (q == NULL)
return mk_error(env, "priv_alloc_error");
ERL_NIF_TERM ret = enif_make_resource(env, q);
/* enif_release_resource(ret); */
uint32_t queue_id = 0;
uint32_t queue_size = 0;
uint32_t overflow_size = 0;
if (!enif_get_uint(env, argv[0], &queue_id) ||
!enif_get_uint(env, argv[1], &queue_size) ||
!enif_get_uint(env, argv[2], &overflow_size))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "bad_queue_id");
/* TODO: Check that queue_size is power of 2 */
if (QUEUES[queue_id] != NULL)
return mk_error(env, "queue_id_already_exists");
q->id = queue_id;
q->queue_size = queue_size;
q->overflow_size = overflow_size;
q->tail = 0;
q->head = 0;
q->slots_states = calloc(q->queue_size, CACHE_LINE_SIZE);
q->slots_terms = calloc(q->queue_size, CACHE_LINE_SIZE);
q->slots_envs = calloc(q->queue_size, CACHE_LINE_SIZE);
q->overflow_terms = calloc(q->overflow_size, CACHE_LINE_SIZE);
q->overflow_envs = calloc(q->queue_size, CACHE_LINE_SIZE);
q->push_queue = new_queue();
q->pop_queue = new_queue();
/* TODO: Check calloc return */
for (int i = 0; i < q->queue_size; i++) {
ErlNifEnv *slot_env = enif_alloc_env();
q->slots_envs[i*CACHE_LINE_SIZE] = slot_env;
//q->overflow_envs[i*CACHE_LINE_SIZE] = (ErlNifEnv *) enif_alloc_env();
}
QUEUES[q->id] = q;
return enif_make_tuple2(env, mk_atom(env, "ok"), ret);
}
static ERL_NIF_TERM
queue_free(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
/* TODO: Free all the things! */
QUEUES[queue_id] = NULL;
return enif_make_atom(env, "ok");
}
/* Push to the head of the queue. */
static ERL_NIF_TERM
queue_push(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
/* Load the queue */
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
if (q->id != queue_id)
return mk_error(env, "not_identical_queue_id");
for (int i = 0; i < q->queue_size; i++) {
fprintf(stderr, "queue slot %d, index %d, state %d\n",
i, i*CACHE_LINE_SIZE, q->slots_states[i*CACHE_LINE_SIZE]);
}
/* If there's consumers waiting, the queue must be empty and we
should directly pick a consumer to notify. */
ErlNifPid *waiting_consumer;
int dequeue_ret = dequeue(q->pop_queue, &waiting_consumer);
if (dequeue_ret) {
ErlNifEnv *msg_env = enif_alloc_env();
ERL_NIF_TERM copy = enif_make_copy(msg_env, argv[1]);
ERL_NIF_TERM tuple = enif_make_tuple2(msg_env, mk_atom(env, "pop"), copy);
if (enif_send(env, waiting_consumer, msg_env, tuple)) {
enif_free_env(msg_env);
return mk_atom(env, "ok");
} else {
return mk_error(env, "notify_failed");
}
}
/* Increment head and attempt to claim the slot by marking it as
busy. This ensures no other thread will attempt to modify this
slot. If we cannot lock it, another thread must have */
uint64_t head = __sync_add_and_fetch(&q->head, 1);
size_t size = q->queue_size;
while (1) {
uint64_t index = SLOT_INDEX(head, size);
uint64_t ret = __sync_val_compare_and_swap(&q->slots_states[index],
STATE_EMPTY,
STATE_WRITE);
switch (ret) {
case STATE_EMPTY:
head = __sync_add_and_fetch(&q->head, 1);
case STATE_WRITE:
/* We acquired the write lock, go ahead with the write. */
break;
case STATE_FULL:
/* We have caught up with the tail and the buffer is
full. Block the producer until a consumer reads the
item. */
return mk_error(env, "full_not_implemented");
}
}
/* If head catches up with tail, the queue is full. Add to
overflow instead */
/* Copy term to slot-specific temporary process env. */
ERL_NIF_TERM copy = enif_make_copy(q->slots_envs[SLOT_INDEX(head, size)], argv[1]);
q->slots_terms[SLOT_INDEX(head, size)] = copy;
__sync_synchronize(); /* Or compiler memory barrier? */
/* TODO: Do we need to collect garbage? */
/* Mark the slot ready to be consumed */
if (__sync_bool_compare_and_swap(&q->slots_states[SLOT_INDEX(head, size)],
STATE_WRITE,
STATE_FULL)) {
return mk_atom(env, "ok");
} else {
return mk_error(env, "could_not_update_slots_after_insert");
}
}
static ERL_NIF_TERM
queue_async_pop(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
/* Load queue */
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
if (q->id != queue_id)
return mk_error(env, "not_identical_queue_id");
uint64_t qsize = q->queue_size;
uint64_t tail = q->tail;
uint64_t num_busy = 0;
/* Walk the buffer starting the tail position until we are either
able to consume a term or find an empty slot. */
while (1) {
uint64_t index = SLOT_INDEX(tail, qsize);
uint64_t ret = __sync_val_compare_and_swap(&q->slots_states[index],
STATE_FULL,
STATE_READ);
if (ret == STATE_READ) {
/* We were able to mark the term as read in progress. We
now have an exclusive lock. */
break;
} else if (ret == STATE_WRITE) {
/* We found an item with a write in progress. If that
thread progresses, it will eventually mark the slot as
full. We can spin until that happens.
This can take an arbitrary amount of time and multiple
reading threads will compete for the same slot.
Instead we add the caller to the queue of blocking
consumers. When the next producer comes it will "help"
this thread by calling enif_send on the current
in-progress term *and* handle it's own terms. If
there's no new push to the queue, this will block
forever. */
return mk_atom(env, "write_in_progress_not_implemented");
} else if (ret == STATE_EMPTY) {
/* We found an empty item. Queue must be empty. Add
calling Erlang consumer process to queue of waiting
processes. When the next producer comes along, it first
checks the waiting consumers and calls enif_send
instead of writing to the slots. */
ErlNifPid *pid = enif_alloc(sizeof(ErlNifPid));
pid = enif_self(env, pid);
enqueue(q->pop_queue, pid);
return mk_atom(env, "wait_for_msg");
} else {
tail = __sync_add_and_fetch(&q->tail, 1);
}
}
/* Copy term into calling process env. The NIF env can now be
gargbage collected. */
ERL_NIF_TERM copy = enif_make_copy(env, q->slots_terms[SLOT_INDEX(tail, qsize)]);
/* Mark the slot as free. Note: We don't increment the tail
position here, as another thread also walking the buffer might
have incremented it multiple times */
q->slots_terms[SLOT_INDEX(tail, qsize)] = 0;
if (__sync_bool_compare_and_swap(&q->slots_states[SLOT_INDEX(tail, qsize)],
STATE_READ,
STATE_EMPTY)) {
return enif_make_tuple2(env, mk_atom(env, "ok"), copy);
} else {
return mk_error(env, "could_not_update_slots_after_pop");
}
}
static ERL_NIF_TERM
queue_debug(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
ERL_NIF_TERM *slots_states = enif_alloc(sizeof(ERL_NIF_TERM) * q->queue_size);
ERL_NIF_TERM *slots_terms = enif_alloc(sizeof(ERL_NIF_TERM) * q->queue_size);
for (int i = 0; i < q->queue_size; i++) {
slots_states[i] = enif_make_int(env, q->slots_states[i * CACHE_LINE_SIZE]);
if (q->slots_terms[i * CACHE_LINE_SIZE] == 0) {
slots_terms[i] = mk_atom(env, "null");
} else {
slots_terms[i] = enif_make_copy(env, q->slots_terms[i * CACHE_LINE_SIZE]);
}
}
return enif_make_tuple4(env,
enif_make_uint64(env, q->tail),
enif_make_uint64(env, q->head),
enif_make_list_from_array(env, slots_states, q->queue_size),
enif_make_list_from_array(env, slots_terms, q->queue_size));
}
static ERL_NIF_TERM
queue_debug_poppers(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint32_t queue_id = 0;
if (!enif_get_uint(env, argv[0], &queue_id))
return mk_error(env, "badarg");
if (queue_id > 8)
return mk_error(env, "badarg");
cq_t *q = QUEUES[queue_id];
if (q == NULL)
return mk_error(env, "bad_queue_id");
uint64_t pop_queue_size = 0;
cq_node_t *node = q->pop_queue->head;
if (node->value == NULL) {
node = node->next;
node = Q_PTR(node);
}
while (node != NULL) {
pop_queue_size++;
node = node->next;
node = Q_PTR(node);
}
ERL_NIF_TERM *pop_queue_pids = enif_alloc(sizeof(ERL_NIF_TERM) * pop_queue_size);
node = q->pop_queue->head;
node = Q_PTR(node);
if (node->value == NULL) {
node = node->next;
node = Q_PTR(node);
}
uint64_t i = 0;
while (node != NULL) {
if (node->value == 0) {
pop_queue_pids[i] = mk_atom(env, "null");
}
else {
pop_queue_pids[i] = enif_make_pid(env, node->value);
}
i++;
node = node->next;
node = Q_PTR(node);
}
ERL_NIF_TERM list = enif_make_list_from_array(env, pop_queue_pids, pop_queue_size);
enif_free(pop_queue_pids);
return list;
}
static ERL_NIF_TERM
print_bits(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
uint64_t *p1 = malloc(8);
*p1 = 0;
for (int bit = 63; bit >= 0; bit--) {
uint64_t power = 1 << bit;
//uint64_t byte = *p1;
uint64_t byte = p1;
fprintf(stderr, "%d", (byte & power) >> bit);
}
fprintf(stderr, "\n");
//enif_free(p1);
return mk_atom(env, "ok");
}
void free_resource(ErlNifEnv* env, void* arg)
{
//cq_t *cq = (cq_t *) arg;
fprintf(stderr, "free_resource\n");
}
cq_queue_t * new_queue()
{
cq_queue_t *queue = enif_alloc(sizeof(cq_queue_t));
cq_node_t *node = enif_alloc(sizeof(cq_node_t));
node->next = NULL;
//node->env = NULL;
node->value = NULL;
queue->head = node;
queue->tail = node;
return queue;
}
void enqueue(cq_queue_t *queue, ErlNifPid *pid)
{
cq_node_t *node = enif_alloc(sizeof(cq_node_t));
//node->env = enif_alloc_env();
//node->term = enif_make_copy(node->env, term);
node->value = pid;
node->next = NULL;
fprintf(stderr, "node %lu\n", node);
cq_node_t *tail = NULL;
uint64_t tail_count = 0;
while (1) {
tail = queue->tail;
cq_node_t *tail_ptr = Q_PTR(tail);
tail_count = Q_COUNT(tail);
cq_node_t *next = tail->next;
cq_node_t *next_ptr = Q_PTR(next);
uint64_t next_count = Q_COUNT(next);
if (tail == queue->tail) {
fprintf(stderr, "tail == queue->tail\n");
if (next_ptr == NULL) {
fprintf(stderr, "next_ptr == NULL\n");
if (__sync_bool_compare_and_swap(&tail_ptr->next,
next,
Q_SET_COUNT(node, next_count+1)))
fprintf(stderr, "CAS(tail_ptr->next, next, (node, next_count+1)) -> true\n");
break;
} else {
__sync_bool_compare_and_swap(&queue->tail,
tail,
Q_SET_COUNT(next_ptr, next_count+1));
fprintf(stderr, "CAS(queue->tail, tail, (next_ptr, next_count+1))\n");
}
}
}
cq_node_t *node_with_count = Q_SET_COUNT(node, tail_count+1);
int ret = __sync_bool_compare_and_swap(&queue->tail,
tail,
node_with_count);
fprintf(stderr, "CAS(queue->tail, tail, %lu) -> %d\n", node_with_count, ret);
}
int dequeue(cq_queue_t *queue, ErlNifPid **pid)
{
fprintf(stderr, "dequeue\n");
cq_node_t *head, *head_ptr, *tail, *tail_ptr, *next, *next_ptr;
while (1) {
head = queue->head;
head_ptr = Q_PTR(head);
tail = queue->tail;
tail_ptr = Q_PTR(tail);
next = head->next;
next_ptr = Q_PTR(next);
fprintf(stderr, "head %lu, tail %lu, next %lu\n", head, tail, next);
if (head == queue->head) {
if (head_ptr == tail_ptr) {
if (next_ptr == NULL) {
return 0; /* Queue is empty */
}
fprintf(stderr, "CAS(queue->tail, tail, (next_ptr, tail+1))\n");
__sync_bool_compare_and_swap(&queue->tail,
tail,
Q_SET_COUNT(next_ptr, Q_COUNT(tail)+1));
} else {
fprintf(stderr, "next->value %lu\n", next_ptr->value);
*pid = next_ptr->value;
fprintf(stderr, "CAS(queue->head, head, (next_ptr, head+1))\n");
if (__sync_bool_compare_and_swap(&queue->head,
head,
Q_SET_COUNT(next_ptr, Q_COUNT(head)+1)))
break;
}
}
}
// free pid
//enif_free(Q_PTR(head));
return 1;
}
int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) {
/* Initialize global array mapping id to cq_t ptr */
QUEUES = (cq_t **) calloc(8, sizeof(cq_t **));
if (QUEUES == NULL)
return -1;
ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER);
CQ_RESOURCE = enif_open_resource_type(env, "cq", "cq",
&free_resource, flags, NULL);
if (CQ_RESOURCE == NULL)
return -1;
return 0;
}
static ErlNifFunc nif_funcs[] = {
{"new" , 3, queue_new},
{"free" , 1, queue_free},
{"push" , 2, queue_push},
{"async_pop", 1, queue_async_pop},
{"debug" , 1, queue_debug},
{"debug_poppers", 1, queue_debug_poppers},
{"print_bits", 0, print_bits}
};
ERL_NIF_INIT(cq, nif_funcs, load, NULL, NULL, NULL);

+ 0
- 71
c_src/cq2/cq_nif.h 查看文件

@ -1,71 +0,0 @@
#include <stdint.h>
#include "erl_nif.h"
#define CACHE_LINE_SIZE 64
#define SLOT_INDEX(__index, __size) __index & (__size - 1)
#define Q_MASK 3L
#define Q_PTR(__ptr) (cq_node_t *) (((uint64_t)__ptr) & (~Q_MASK))
#define Q_COUNT(__ptr) ((uint64_t) __ptr & Q_MASK)
#define Q_SET_COUNT(__ptr, __val) (cq_node_t *) ((uint64_t) __ptr | (__val & Q_MASK))
#define STATE_EMPTY 0
#define STATE_WRITE 1
#define STATE_READ 2
#define STATE_FULL 3
ErlNifResourceType* CQ_RESOURCE;
typedef struct cq_node cq_node_t;
struct cq_node {
ErlNifEnv *env;
//ERL_NIF_TERM term;
ErlNifPid *value;
cq_node_t *next;
};
typedef struct cq_queue {
cq_node_t *head;
cq_node_t *tail;
} cq_queue_t;
// TODO: Add padding between the fields
typedef struct cq {
uint32_t id;
uint64_t queue_size;
uint64_t overflow_size;
uint64_t head;
uint64_t tail;
uint8_t *slots_states;
ERL_NIF_TERM *slots_terms;
ErlNifEnv **slots_envs;
cq_queue_t *push_queue;
cq_queue_t *pop_queue;
uint8_t *overflow_states;
ERL_NIF_TERM *overflow_terms;
ErlNifEnv **overflow_envs;
} cq_t;
cq_t **QUEUES = NULL; /* Initialized on nif load */
ERL_NIF_TERM mk_atom(ErlNifEnv* env, const char* atom);
ERL_NIF_TERM mk_error(ErlNifEnv* env, const char* msg);
int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
void free_resource(ErlNifEnv*, void*);
cq_queue_t* new_queue(void);
void enqueue(cq_queue_t *q, ErlNifPid *pid);

+ 80
- 0
c_src/enlfq/Makefile 查看文件

@ -0,0 +1,80 @@
PROJECT = enlfq
CXXFLAGS = -std=c++11 -O2 -Wextra -Werror -Wno-missing-field-initializers -fno-rtti -fno-exceptions
LDLIBS = -lstdc++
# Based on c_src.mk from erlang.mk by Loic Hoguin <essen@ninenines.eu>
CURDIR := $(shell pwd)
BASEDIR := $(abspath $(CURDIR)/..)
PROJECT ?= $(notdir $(BASEDIR))
PROJECT := $(strip $(PROJECT))
ERTS_INCLUDE_DIR ?= $(shell erl -noshell -s init stop -eval "io:format(\"~ts/erts-~ts/include/\", [code:root_dir(), erlang:system_info(version)]).")
ERL_INTERFACE_INCLUDE_DIR ?= $(shell erl -noshell -s init stop -eval "io:format(\"~ts\", [code:lib_dir(erl_interface, include)]).")
ERL_INTERFACE_LIB_DIR ?= $(shell erl -noshell -s init stop -eval "io:format(\"~ts\", [code:lib_dir(erl_interface, lib)]).")
C_SRC_DIR = $(CURDIR)
C_SRC_OUTPUT ?= $(CURDIR)/../priv/$(PROJECT).so
# System type and C compiler/flags.
UNAME_SYS := $(shell uname -s)
ifeq ($(UNAME_SYS), Darwin)
CC ?= cc
CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
else ifeq ($(UNAME_SYS), FreeBSD)
CC ?= cc
CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
CXXFLAGS ?= -O3 -finline-functions -Wall
else ifeq ($(UNAME_SYS), Linux)
CC ?= gcc
CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
CXXFLAGS ?= -O3 -finline-functions -Wall
endif
CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
LDFLAGS += -shared
# Verbosity.
c_verbose_0 = @echo " C " $(?F);
c_verbose = $(c_verbose_$(V))
cpp_verbose_0 = @echo " CPP " $(?F);
cpp_verbose = $(cpp_verbose_$(V))
link_verbose_0 = @echo " LD " $(@F);
link_verbose = $(link_verbose_$(V))
SOURCES := $(shell find $(C_SRC_DIR) -type f \( -name "*.c" -o -name "*.C" -o -name "*.cc" -o -name "*.cpp" \))
OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
$(C_SRC_OUTPUT): $(OBJECTS)
@mkdir -p $(BASEDIR)/priv/
$(link_verbose) $(CC) $(OBJECTS) $(LDFLAGS) $(LDLIBS) -o $(C_SRC_OUTPUT)
%.o: %.c
$(COMPILE_C) $(OUTPUT_OPTION) $<
%.o: %.cc
$(COMPILE_CPP) $(OUTPUT_OPTION) $<
%.o: %.C
$(COMPILE_CPP) $(OUTPUT_OPTION) $<
%.o: %.cpp
$(COMPILE_CPP) $(OUTPUT_OPTION) $<
clean:
@rm -f $(C_SRC_OUTPUT) $(OBJECTS)

+ 3637
- 0
c_src/enlfq/concurrentqueue.h
文件差異過大導致無法顯示
查看文件


+ 84
- 0
c_src/enlfq/enlfq.cc 查看文件

@ -0,0 +1,84 @@
#include "enlfq.h"
#include "enlfq_nif.h"
#include "nif_utils.h"
#include "concurrentqueue.h"
struct q_item {
ErlNifEnv *env;
ERL_NIF_TERM term;
};
struct squeue {
moodycamel::ConcurrentQueue<q_item> *queue;
};
void nif_enlfq_free(ErlNifEnv *, void *obj) {
squeue *inst = static_cast<squeue *>(obj);
if (inst != nullptr) {
q_item item;
while (inst->queue->try_dequeue(item)) {
enif_free_env(item.env);
}
delete inst->queue;
}
}
ERL_NIF_TERM nif_enlfq_new(ErlNifEnv *env, int, const ERL_NIF_TERM *) {
shared_data *data = static_cast<shared_data *>(enif_priv_data(env));
squeue *qinst = static_cast<squeue *>(enif_alloc_resource(data->resQueueInstance, sizeof(squeue)));
qinst->queue = new moodycamel::ConcurrentQueue<q_item>;
if (qinst == NULL)
return make_error(env, "enif_alloc_resource failed");
ERL_NIF_TERM term = enif_make_resource(env, qinst);
enif_release_resource(qinst);
return enif_make_tuple2(env, ATOMS.atomOk, term);
}
ERL_NIF_TERM nif_enlfq_push(ErlNifEnv *env, int, const ERL_NIF_TERM argv[]) {
shared_data *data = static_cast<shared_data *>(enif_priv_data(env));
squeue *inst;
if (!enif_get_resource(env, argv[0], data->resQueueInstance, (void **) &inst)) {
return enif_make_badarg(env);
}
q_item item;
item.env = enif_alloc_env();
item.term = enif_make_copy(item.env, argv[1]);
inst->queue->enqueue(item);
return ATOMS.atomTrue;
}
ERL_NIF_TERM nif_enlfq_pop(ErlNifEnv *env, int, const ERL_NIF_TERM argv[]) {
shared_data *data = static_cast<shared_data *>(enif_priv_data(env));
squeue *inst = NULL;
if (!enif_get_resource(env, argv[0], data->resQueueInstance, (void **) &inst)) {
return enif_make_badarg(env);
}
ERL_NIF_TERM term;
q_item item;
if (inst->queue->try_dequeue(item)) {
term = enif_make_copy(env, item.term);
enif_free_env(item.env);
return enif_make_tuple2(env, ATOMS.atomOk, term);
} else {
return ATOMS.atomEmpty;
}
}

+ 10
- 0
c_src/enlfq/enlfq.h 查看文件

@ -0,0 +1,10 @@
#pragma once
#include "erl_nif.h"
extern "C" {
void nif_enlfq_free(ErlNifEnv *env, void *obj);
ERL_NIF_TERM nif_enlfq_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
ERL_NIF_TERM nif_enlfq_push(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
ERL_NIF_TERM nif_enlfq_pop(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
}

+ 57
- 0
c_src/enlfq/enlfq_nif.cc 查看文件

@ -0,0 +1,57 @@
#include <string.h>
#include "enlfq_nif.h"
#include "enlfq.h"
#include "nif_utils.h"
const char kAtomOk[] = "ok";
const char kAtomError[] = "error";
const char kAtomTrue[] = "true";
//const char kAtomFalse[] = "false";
//const char kAtomUndefined[] = "undefined";
const char kAtomEmpty[] = "empty";
atoms ATOMS;
void open_resources(ErlNifEnv *env, shared_data *data) {
ErlNifResourceFlags flags = static_cast<ErlNifResourceFlags>(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER);
data->resQueueInstance = enif_open_resource_type(env, NULL, "enlfq_instance", nif_enlfq_free, flags, NULL);
}
int on_nif_load(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM) {
ATOMS.atomOk = make_atom(env, kAtomOk);
ATOMS.atomError = make_atom(env, kAtomError);
ATOMS.atomTrue = make_atom(env, kAtomTrue);
// ATOMS.atomFalse = make_atom(env, kAtomFalse);
// ATOMS.atomUndefined = make_atom(env, kAtomUndefined);
ATOMS.atomEmpty = make_atom(env, kAtomEmpty);
shared_data *data = static_cast<shared_data *>(enif_alloc(sizeof(shared_data)));
open_resources(env, data);
*priv_data = data;
return 0;
}
void on_nif_unload(ErlNifEnv *, void *priv_data) {
shared_data *data = static_cast<shared_data *>(priv_data);
enif_free(data);
}
int on_nif_upgrade(ErlNifEnv *env, void **priv, void **, ERL_NIF_TERM) {
shared_data *data = static_cast<shared_data *>(enif_alloc(sizeof(shared_data)));
open_resources(env, data);
*priv = data;
return 0;
}
static ErlNifFunc nif_funcs[] =
{
{"new", 0, nif_enlfq_new},
{"push", 2, nif_enlfq_push},
{"pop", 1, nif_enlfq_pop}
};
ERL_NIF_INIT(enlfq, nif_funcs, on_nif_load, NULL, on_nif_upgrade, on_nif_unload)

+ 19
- 0
c_src/enlfq/enlfq_nif.h 查看文件

@ -0,0 +1,19 @@
#pragma once
#include "erl_nif.h"
struct atoms
{
ERL_NIF_TERM atomOk;
ERL_NIF_TERM atomError;
ERL_NIF_TERM atomTrue;
// ERL_NIF_TERM atomFalse;
// ERL_NIF_TERM atomUndefined;
ERL_NIF_TERM atomEmpty;
};
struct shared_data
{
ErlNifResourceType* resQueueInstance;
};
extern atoms ATOMS;

+ 27
- 0
c_src/enlfq/nif_utils.cc 查看文件

@ -0,0 +1,27 @@
#include "nif_utils.h"
#include "enlfq_nif.h"
#include <string.h>
ERL_NIF_TERM make_atom(ErlNifEnv* env, const char* name)
{
ERL_NIF_TERM ret;
if(enif_make_existing_atom(env, name, &ret, ERL_NIF_LATIN1))
return ret;
return enif_make_atom(env, name);
}
ERL_NIF_TERM make_binary(ErlNifEnv* env, const char* buff, size_t length)
{
ERL_NIF_TERM term;
unsigned char *destination_buffer = enif_make_new_binary(env, length, &term);
memcpy(destination_buffer, buff, length);
return term;
}
ERL_NIF_TERM make_error(ErlNifEnv* env, const char* error)
{
return enif_make_tuple2(env, ATOMS.atomError, make_binary(env, error, strlen(error)));
}

+ 6
- 0
c_src/enlfq/nif_utils.h 查看文件

@ -0,0 +1,6 @@
#pragma once
#include "erl_nif.h"
ERL_NIF_TERM make_atom(ErlNifEnv* env, const char* name);
ERL_NIF_TERM make_error(ErlNifEnv* env, const char* error);
ERL_NIF_TERM make_binary(ErlNifEnv* env, const char* buff, size_t length);

+ 7
- 0
c_src/enlfq/rebar.config 查看文件

@ -0,0 +1,7 @@
{port_specs, [
{"../../priv/enlfq.so", ["*.cc"]}
]}.

+ 172
- 0
c_src/etsq/etsq.cpp 查看文件

@ -0,0 +1,172 @@
#include "etsq.h"
ErlNifRWLock *qinfo_map_rwlock;
QInfoMap qinfo_map;
// Function finds the queue from map and returns QueueInfo
// Not thread safe.
QueueInfo* get_q_info(char* name)
{
//std::cout<<"Info: "<< name<<std::endl;
QInfoMap::iterator iter = qinfo_map.find(name);
if (iter != qinfo_map.end())
{
//std::cout<<" Fetched ";
return iter->second;
}
return NULL;
}
void new_q(char* name)
{
//std::cout<<"Create: " << name<<std::endl;
WriteLock write_lock(qinfo_map_rwlock);
QueueInfo *queue_info = new QueueInfo(name);
qinfo_map.insert(QInfoMapPair(name, queue_info));
//std::cout<<"Created: " << name<<std::endl;
}
bool push(char* name, ErlTerm *erl_term)
{
QueueInfo *pqueue_info = NULL;
ReadLock read_lock(qinfo_map_rwlock);
if (NULL != (pqueue_info = get_q_info(name)))
{
Mutex mutex(pqueue_info->pmutex);
pqueue_info->queue.push(erl_term);
return true;
}
return false;
}
// Returns new ErlTerm. Caller should delete it
ErlTerm* pop(char* name, bool read_only)
{
QueueInfo *pqueue_info = NULL;
ReadLock read_lock(qinfo_map_rwlock);
if (NULL != (pqueue_info = get_q_info(name)))
{
Mutex mutex(pqueue_info->pmutex);
if (!pqueue_info->queue.empty())
{
ErlTerm *erl_term = pqueue_info->queue.front();
if(read_only)
{
return new ErlTerm(erl_term);
}
pqueue_info->queue.pop();
return erl_term;
}
return new ErlTerm("empty");
}
return NULL;
}
static ERL_NIF_TERM new_queue(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int size = 100;
char *name = new char(size);
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1);
{
QueueInfo *pqueue_info = NULL;
ReadLock read_lock(qinfo_map_rwlock);
if (NULL != (pqueue_info = get_q_info(name)))
{
return enif_make_error(env, "already_exists");
}
}
new_q(name);
return enif_make_atom(env, "ok");
}
static ERL_NIF_TERM info(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int size = 100;
char name[100];
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1);
int queue_size = 0;
{
QueueInfo *pqueue_info = NULL;
ReadLock read_lock(qinfo_map_rwlock);
if (NULL == (pqueue_info = get_q_info(name)))
return enif_make_badarg(env);
queue_size = pqueue_info->queue.size();
}
return enif_make_list2(env,
enif_make_tuple2(env, enif_make_atom(env, "name"), enif_make_atom(env, name)),
enif_make_tuple2(env, enif_make_atom(env, "size"), enif_make_int(env, queue_size)));
}
static ERL_NIF_TERM push_back(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int size = 100;
char name[100];
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1);
ErlTerm *erl_term = new ErlTerm(argv[1]);
if (push(name, erl_term))
return enif_make_atom(env, "ok");
delete erl_term;
return enif_make_badarg(env);
}
static ERL_NIF_TERM pop_front(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int size = 100;
char name[100];
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1);
ErlTerm *erl_term = NULL;
if (NULL == (erl_term = pop(name, false)))
return enif_make_badarg(env);
ERL_NIF_TERM return_term = enif_make_copy(env, erl_term->term);
delete erl_term;
return return_term;
}
static ERL_NIF_TERM get_front(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int size = 100;
char name[100];
enif_get_atom(env, argv[0], name, size, ERL_NIF_LATIN1);
ErlTerm *erl_term = NULL;
if (NULL == (erl_term = pop(name, true)))
return enif_make_badarg(env);
ERL_NIF_TERM return_term = enif_make_copy(env, erl_term->term);
delete erl_term;
return return_term;
}
static int is_ok_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info)
{
int i;
return enif_get_int(env, load_info, &i) && i == 1;
}
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
if (!is_ok_load_info(env, load_info))
return -1;
qinfo_map_rwlock = enif_rwlock_create((char*)"qinfo");
return 0;
}
static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
{
if (!is_ok_load_info(env, load_info))
return -1;
return 0;
}
static void unload(ErlNifEnv* env, void* priv_data)
{
enif_rwlock_destroy(qinfo_map_rwlock);
}
static ErlNifFunc nif_funcs[] = {
{"new", 1, new_queue},
{"info", 1, info},
{"push_back", 2, push_back},
{"pop_front", 1, pop_front},
{"get_front", 1, get_front}
};
ERL_NIF_INIT(etsq, nif_funcs, load, NULL, upgrade, unload)

+ 130
- 0
c_src/etsq/etsq.h 查看文件

@ -0,0 +1,130 @@
/*
* etsq.h
*
* Created on: Mar 21, 2016
* Author: Vinod
*/
#ifndef ETSQ_H_
#define ETSQ_H_
#include <iostream> // std::cin, std::cout
#include <map> // std::map
#include <queue> // std::queue
#include <string.h>
#include "erl_nif.h"
#define enif_make_error(env, error) enif_make_tuple2(env, \
enif_make_atom(env, "error"), enif_make_atom(env, error))
struct cmp_str
{
bool operator()(char *a, char *b) const
{
return strcmp(a, b) < 0;
}
};
class ErlTerm
{
public:
ErlNifEnv *term_env;
ERL_NIF_TERM term;
public:
ErlTerm(ERL_NIF_TERM erl_nif_term)
{
term_env = enif_alloc_env();
this->term = enif_make_copy(term_env, erl_nif_term);
}
ErlTerm(ErlTerm *erl_term)
{
term_env = enif_alloc_env();
this->term = enif_make_copy(term_env, erl_term->term);
}
ErlTerm(int value)
{
term_env = enif_alloc_env();
this->term = enif_make_int(term_env, value);
}
ErlTerm(const char *error)
{
term_env = enif_alloc_env();
this->term = enif_make_error(term_env, error);
}
~ErlTerm()
{
enif_free_env(term_env);
term_env = NULL;
}
};
typedef std::queue<ErlTerm*> ErlQueue;
class QueueInfo
{
public:
ErlNifMutex* pmutex;
ErlQueue queue;
public:
QueueInfo(char* name)
{
pmutex = enif_mutex_create(name);
}
~QueueInfo()
{
enif_mutex_destroy(pmutex);
}
};
typedef std::map<char *, QueueInfo*, cmp_str> QInfoMap;
typedef std::pair<char *, QueueInfo*> QInfoMapPair;
// Class to handle Read lock
class ReadLock
{
ErlNifRWLock *pread_lock;
public:
ReadLock(ErlNifRWLock *pread_lock)
{
this->pread_lock = pread_lock;
enif_rwlock_rlock(this->pread_lock);
};
~ReadLock()
{
enif_rwlock_runlock(pread_lock);
};
};
// Class to handle Write lock
class WriteLock
{
ErlNifRWLock *pwrite_lock;
public:
WriteLock(ErlNifRWLock *pwrite_lock)
{
this->pwrite_lock = pwrite_lock;
enif_rwlock_rwlock(this->pwrite_lock);
};
~WriteLock()
{
enif_rwlock_rwunlock(pwrite_lock);
};
};
// Class to handle Mutex lock and unlock
class Mutex
{
ErlNifMutex *pmtx;
public:
Mutex(ErlNifMutex *pmtx)
{
this->pmtx = pmtx;
enif_mutex_lock(this->pmtx);
};
~Mutex()
{
enif_mutex_unlock(pmtx);
};
};
#endif /* ETSQ_H_ */

+ 7
- 0
c_src/etsq/rebar.config 查看文件

@ -0,0 +1,7 @@
{port_specs, [
{"../../priv/etsq.so", ["*.cpp"]}
]}.

+ 103
- 0
c_src/gb_lru/binary.h 查看文件

@ -0,0 +1,103 @@
#include <iostream>
#include <algorithm>
#include <string.h>
class Binary {
public:
unsigned char *bin;
size_t size;
bool allocated;
Binary() : bin(NULL), size(0), allocated(false) { }
Binary(const char *data) {
bin = (unsigned char *) data;
size = strlen(data);
allocated = false;
}
Binary(const Binary &b) {
bin = b.bin;
size = b.size;
allocated = false;
}
~Binary() {
if (allocated) {
delete bin;
}
}
operator std::string() {
return (const char *) bin;
}
friend std::ostream & operator<<(std::ostream & str, Binary const &b) {
return str << b.bin;
}
bool operator<(const Binary &b) {
if(size < b.size) {
return true;
} else if (size > b.size) {
return false;
} else {
return memcmp(bin,b.bin,size) < 0;
}
}
bool operator<(Binary &b) {
if(size < b.size) {
return true;
} else if (size > b.size) {
return false;
} else {
return memcmp(bin,b.bin,size) < 0;
}
}
bool operator>(const Binary &b) {
if(size > b.size) {
return true;
} else if (size < b.size) {
return false;
} else {
return memcmp(bin,b.bin,size) > 0;
}
}
bool operator== (const Binary &b) {
if (size == b.size ) {
return memcmp(bin,b.bin, std::min(size, b.size)) == 0;
} else {
return false;
}
}
operator std::string() const {
return (const char*) bin;
}
Binary& set_data(const char *data) {
bin = (unsigned char *) data;
size = strlen(data);
return *this;
}
void copy(char *inbin, size_t insize) {
bin = (unsigned char *) operator new(insize);
allocated = true;
size = insize;
memcpy(bin, inbin, size);
}
};
inline bool operator < (const Binary &a, const Binary &b) {
if(a.size < b.size) {
return true;
} else if (a.size > b.size) {
return false;
} else {
return memcmp(a.bin,b.bin, std::min(a.size, b.size)) < 0;
}
}

+ 2394
- 0
c_src/gb_lru/btree.h
文件差異過大導致無法顯示
查看文件


+ 349
- 0
c_src/gb_lru/btree_container.h 查看文件

@ -0,0 +1,349 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef UTIL_BTREE_BTREE_CONTAINER_H__
#define UTIL_BTREE_BTREE_CONTAINER_H__
#include <iosfwd>
#include <utility>
#include "btree.h"
namespace btree {
// A common base class for btree_set, btree_map, btree_multiset and
// btree_multimap.
template <typename Tree>
class btree_container {
typedef btree_container<Tree> self_type;
public:
typedef typename Tree::params_type params_type;
typedef typename Tree::key_type key_type;
typedef typename Tree::value_type value_type;
typedef typename Tree::key_compare key_compare;
typedef typename Tree::allocator_type allocator_type;
typedef typename Tree::pointer pointer;
typedef typename Tree::const_pointer const_pointer;
typedef typename Tree::reference reference;
typedef typename Tree::const_reference const_reference;
typedef typename Tree::size_type size_type;
typedef typename Tree::difference_type difference_type;
typedef typename Tree::iterator iterator;
typedef typename Tree::const_iterator const_iterator;
typedef typename Tree::reverse_iterator reverse_iterator;
typedef typename Tree::const_reverse_iterator const_reverse_iterator;
public:
// Default constructor.
btree_container(const key_compare &comp, const allocator_type &alloc)
: tree_(comp, alloc) {
}
// Copy constructor.
btree_container(const self_type &x)
: tree_(x.tree_) {
}
// Iterator routines.
iterator begin() { return tree_.begin(); }
const_iterator begin() const { return tree_.begin(); }
iterator end() { return tree_.end(); }
const_iterator end() const { return tree_.end(); }
reverse_iterator rbegin() { return tree_.rbegin(); }
const_reverse_iterator rbegin() const { return tree_.rbegin(); }
reverse_iterator rend() { return tree_.rend(); }
const_reverse_iterator rend() const { return tree_.rend(); }
// Lookup routines.
iterator lower_bound(const key_type &key) {
return tree_.lower_bound(key);
}
const_iterator lower_bound(const key_type &key) const {
return tree_.lower_bound(key);
}
iterator upper_bound(const key_type &key) {
return tree_.upper_bound(key);
}
const_iterator upper_bound(const key_type &key) const {
return tree_.upper_bound(key);
}
std::pair<iterator,iterator> equal_range(const key_type &key) {
return tree_.equal_range(key);
}
std::pair<const_iterator,const_iterator> equal_range(const key_type &key) const {
return tree_.equal_range(key);
}
// Utility routines.
void clear() {
tree_.clear();
}
void swap(self_type &x) {
tree_.swap(x.tree_);
}
void dump(std::ostream &os) const {
tree_.dump(os);
}
void verify() const {
tree_.verify();
}
// Size routines.
size_type size() const { return tree_.size(); }
size_type max_size() const { return tree_.max_size(); }
bool empty() const { return tree_.empty(); }
size_type height() const { return tree_.height(); }
size_type internal_nodes() const { return tree_.internal_nodes(); }
size_type leaf_nodes() const { return tree_.leaf_nodes(); }
size_type nodes() const { return tree_.nodes(); }
size_type bytes_used() const { return tree_.bytes_used(); }
static double average_bytes_per_value() {
return Tree::average_bytes_per_value();
}
double fullness() const { return tree_.fullness(); }
double overhead() const { return tree_.overhead(); }
bool operator==(const self_type& x) const {
if (size() != x.size()) {
return false;
}
for (const_iterator i = begin(), xi = x.begin(); i != end(); ++i, ++xi) {
if (*i != *xi) {
return false;
}
}
return true;
}
bool operator!=(const self_type& other) const {
return !operator==(other);
}
protected:
Tree tree_;
};
template <typename T>
inline std::ostream& operator<<(std::ostream &os, const btree_container<T> &b) {
b.dump(os);
return os;
}
// A common base class for btree_set and safe_btree_set.
template <typename Tree>
class btree_unique_container : public btree_container<Tree> {
typedef btree_unique_container<Tree> self_type;
typedef btree_container<Tree> super_type;
public:
typedef typename Tree::key_type key_type;
typedef typename Tree::value_type value_type;
typedef typename Tree::size_type size_type;
typedef typename Tree::key_compare key_compare;
typedef typename Tree::allocator_type allocator_type;
typedef typename Tree::iterator iterator;
typedef typename Tree::const_iterator const_iterator;
public:
// Default constructor.
btree_unique_container(const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(comp, alloc) {
}
// Copy constructor.
btree_unique_container(const self_type &x)
: super_type(x) {
}
// Range constructor.
template <class InputIterator>
btree_unique_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(comp, alloc) {
insert(b, e);
}
// Lookup routines.
iterator find(const key_type &key) {
return this->tree_.find_unique(key);
}
const_iterator find(const key_type &key) const {
return this->tree_.find_unique(key);
}
size_type count(const key_type &key) const {
return this->tree_.count_unique(key);
}
// Insertion routines.
std::pair<iterator,bool> insert(const value_type &x) {
return this->tree_.insert_unique(x);
}
iterator insert(iterator position, const value_type &x) {
return this->tree_.insert_unique(position, x);
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
this->tree_.insert_unique(b, e);
}
// Deletion routines.
int erase(const key_type &key) {
return this->tree_.erase_unique(key);
}
// Erase the specified iterator from the btree. The iterator must be valid
// (i.e. not equal to end()). Return an iterator pointing to the node after
// the one that was erased (or end() if none exists).
iterator erase(const iterator &iter) {
return this->tree_.erase(iter);
}
void erase(const iterator &first, const iterator &last) {
this->tree_.erase(first, last);
}
};
// A common base class for btree_map and safe_btree_map.
template <typename Tree>
class btree_map_container : public btree_unique_container<Tree> {
typedef btree_map_container<Tree> self_type;
typedef btree_unique_container<Tree> super_type;
public:
typedef typename Tree::key_type key_type;
typedef typename Tree::data_type data_type;
typedef typename Tree::value_type value_type;
typedef typename Tree::mapped_type mapped_type;
typedef typename Tree::key_compare key_compare;
typedef typename Tree::allocator_type allocator_type;
private:
// A pointer-like object which only generates its value when
// dereferenced. Used by operator[] to avoid constructing an empty data_type
// if the key already exists in the map.
struct generate_value {
generate_value(const key_type &k)
: key(k) {
}
value_type operator*() const {
return std::make_pair(key, data_type());
}
const key_type &key;
};
public:
// Default constructor.
btree_map_container(const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(comp, alloc) {
}
// Copy constructor.
btree_map_container(const self_type &x)
: super_type(x) {
}
// Range constructor.
template <class InputIterator>
btree_map_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(b, e, comp, alloc) {
}
// Insertion routines.
data_type& operator[](const key_type &key) {
return this->tree_.insert_unique(key, generate_value(key)).first->second;
}
};
// A common base class for btree_multiset and btree_multimap.
template <typename Tree>
class btree_multi_container : public btree_container<Tree> {
typedef btree_multi_container<Tree> self_type;
typedef btree_container<Tree> super_type;
public:
typedef typename Tree::key_type key_type;
typedef typename Tree::value_type value_type;
typedef typename Tree::size_type size_type;
typedef typename Tree::key_compare key_compare;
typedef typename Tree::allocator_type allocator_type;
typedef typename Tree::iterator iterator;
typedef typename Tree::const_iterator const_iterator;
public:
// Default constructor.
btree_multi_container(const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(comp, alloc) {
}
// Copy constructor.
btree_multi_container(const self_type &x)
: super_type(x) {
}
// Range constructor.
template <class InputIterator>
btree_multi_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(comp, alloc) {
insert(b, e);
}
// Lookup routines.
iterator find(const key_type &key) {
return this->tree_.find_multi(key);
}
const_iterator find(const key_type &key) const {
return this->tree_.find_multi(key);
}
size_type count(const key_type &key) const {
return this->tree_.count_multi(key);
}
// Insertion routines.
iterator insert(const value_type &x) {
return this->tree_.insert_multi(x);
}
iterator insert(iterator position, const value_type &x) {
return this->tree_.insert_multi(position, x);
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
this->tree_.insert_multi(b, e);
}
// Deletion routines.
int erase(const key_type &key) {
return this->tree_.erase_multi(key);
}
// Erase the specified iterator from the btree. The iterator must be valid
// (i.e. not equal to end()). Return an iterator pointing to the node after
// the one that was erased (or end() if none exists).
iterator erase(const iterator &iter) {
return this->tree_.erase(iter);
}
void erase(const iterator &first, const iterator &last) {
this->tree_.erase(first, last);
}
};
} // namespace btree
#endif // UTIL_BTREE_BTREE_CONTAINER_H__

+ 130
- 0
c_src/gb_lru/btree_map.h 查看文件

@ -0,0 +1,130 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// A btree_map<> implements the STL unique sorted associative container
// interface and the pair associative container interface (a.k.a map<>) using a
// btree. A btree_multimap<> implements the STL multiple sorted associative
// container interface and the pair associtive container interface (a.k.a
// multimap<>) using a btree. See btree.h for details of the btree
// implementation and caveats.
#ifndef UTIL_BTREE_BTREE_MAP_H__
#define UTIL_BTREE_BTREE_MAP_H__
#include <algorithm>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "btree.h"
#include "btree_container.h"
namespace btree {
// The btree_map class is needed mainly for its constructors.
template <typename Key, typename Value,
typename Compare = std::less<Key>,
typename Alloc = std::allocator<std::pair<const Key, Value> >,
int TargetNodeSize = 256>
class btree_map : public btree_map_container<
btree<btree_map_params<Key, Value, Compare, Alloc, TargetNodeSize> > > {
typedef btree_map<Key, Value, Compare, Alloc, TargetNodeSize> self_type;
typedef btree_map_params<
Key, Value, Compare, Alloc, TargetNodeSize> params_type;
typedef btree<params_type> btree_type;
typedef btree_map_container<btree_type> super_type;
public:
typedef typename btree_type::key_compare key_compare;
typedef typename btree_type::allocator_type allocator_type;
public:
// Default constructor.
btree_map(const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(comp, alloc) {
}
// Copy constructor.
btree_map(const self_type &x)
: super_type(x) {
}
// Range constructor.
template <class InputIterator>
btree_map(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(b, e, comp, alloc) {
}
};
template <typename K, typename V, typename C, typename A, int N>
inline void swap(btree_map<K, V, C, A, N> &x,
btree_map<K, V, C, A, N> &y) {
x.swap(y);
}
// The btree_multimap class is needed mainly for its constructors.
template <typename Key, typename Value,
typename Compare = std::less<Key>,
typename Alloc = std::allocator<std::pair<const Key, Value> >,
int TargetNodeSize = 256>
class btree_multimap : public btree_multi_container<
btree<btree_map_params<Key, Value, Compare, Alloc, TargetNodeSize> > > {
typedef btree_multimap<Key, Value, Compare, Alloc, TargetNodeSize> self_type;
typedef btree_map_params<
Key, Value, Compare, Alloc, TargetNodeSize> params_type;
typedef btree<params_type> btree_type;
typedef btree_multi_container<btree_type> super_type;
public:
typedef typename btree_type::key_compare key_compare;
typedef typename btree_type::allocator_type allocator_type;
typedef typename btree_type::data_type data_type;
typedef typename btree_type::mapped_type mapped_type;
public:
// Default constructor.
btree_multimap(const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(comp, alloc) {
}
// Copy constructor.
btree_multimap(const self_type &x)
: super_type(x) {
}
// Range constructor.
template <class InputIterator>
btree_multimap(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: super_type(b, e, comp, alloc) {
}
};
template <typename K, typename V, typename C, typename A, int N>
inline void swap(btree_multimap<K, V, C, A, N> &x,
btree_multimap<K, V, C, A, N> &y) {
x.swap(y);
}
} // namespace btree
#endif // UTIL_BTREE_BTREE_MAP_H__

+ 619
- 0
c_src/gb_lru/btreelru_nif.cpp 查看文件

@ -0,0 +1,619 @@
#include <string>
#include <iostream>
#include <vector>
#include "erl_nif.h"
#include "erlterm.h"
#include "lru.h"
using namespace std;
namespace { /* anonymous namespace starts */
typedef struct _obj_resource {
bool allocated;
void *object;
ErlNifMutex *emtx;
} object_resource;
ErlNifResourceFlags resource_flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER);
ErlNifResourceType* lruResource;
ErlNifResourceType* iteratorResource;
/* atoms */
ERL_NIF_TERM atom_ok;
ERL_NIF_TERM atom_key;
ERL_NIF_TERM atom_error;
ERL_NIF_TERM atom_invalid;
ERL_NIF_TERM atom_value;
ERL_NIF_TERM atom_max_size;
ERL_NIF_TERM atom_tab;
ERL_NIF_TERM atom_lru_old;
void lru_dtor(ErlNifEnv* env, void *lru);
void iterator_dtor(ErlNifEnv* env, void *it);
int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info){
lruResource = enif_open_resource_type(env,
"btreelru_nif",
"lru",
lru_dtor,
resource_flags,
NULL);
iteratorResource = enif_open_resource_type(env,
"btreelru_nif",
"iterator",
iterator_dtor,
resource_flags,
NULL);
atom_ok = enif_make_atom(env, "ok");
atom_key = enif_make_atom(env, "key");
atom_error = enif_make_atom(env, "error");
atom_invalid = enif_make_atom(env, "invalid");
atom_value = enif_make_atom(env, "value");
atom_max_size = enif_make_atom(env, "max_size");
atom_tab = enif_make_atom(env, "tab");
atom_lru_old = enif_make_atom(env, "lru_old");
return 0;
}
int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info){
return 0;
}
int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data,ERL_NIF_TERM load_info){
return 0;
}
void lru_dtor(ErlNifEnv* env, void* _lru_btree) {
object_resource *lru_btree = (object_resource*) _lru_btree;
if (lru_btree->allocated)
delete (LRUBtree<ErlTerm,ErlTerm>*) lru_btree->object;
}
void iterator_dtor(ErlNifEnv* env, void* _lru_iterator) {
object_resource *lru_iterator = (object_resource*) _lru_iterator;
if (lru_iterator->allocated)
delete (LRUBtree<ErlTerm,ErlTerm>::iterator*) lru_iterator->object;
}
void node_free(LRUBtree<ErlTerm,ErlTerm> *bt_lru, LRUNode<ErlTerm,ErlTerm> *node) {
enif_free_env((ErlNifEnv*)node->kvenv);
return;
}
void node_kickout(LRUBtree<ErlTerm,ErlTerm> *bt_lru, LRUNode<ErlTerm,ErlTerm> *node, void *currenv) {
ErlNifEnv *env = (ErlNifEnv *) currenv;
if (bt_lru->pid_set) {
enif_send(env, &bt_lru->pid, NULL, enif_make_tuple3(env, atom_lru_old, node->key.t, node->data.t));
}
return;
}
ERL_NIF_TERM next(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
LRUNode<ErlTerm,ErlTerm> *node;
ErlTerm key;
ErlTerm value;
if (argc != 2) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
key.t = argv[1];
node = bt_lru->get(key);
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
node = node->next;
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
key.t = enif_make_copy(env, node->key.t);
value.t = enif_make_copy(env, node->data.t);
return enif_make_tuple2(env, key.t, value.t);
}
ERL_NIF_TERM prev(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
LRUNode<ErlTerm,ErlTerm> *node;
ErlTerm key;
ErlTerm value;
if (argc != 2) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
key.t = argv[1];
node = bt_lru->get(key);
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
node = node->prev;
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
key.t = enif_make_copy(env, node->key.t);
value.t = enif_make_copy(env, node->data.t);
return enif_make_tuple2(env, key.t, value.t);
}
ERL_NIF_TERM create(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
unsigned long max_size;
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
ERL_NIF_TERM lru_term;
/* get max_size */
if (enif_get_ulong(env, argv[0], &max_size) < 1){
return enif_make_tuple2(env, atom_error, atom_max_size);
}
if (!(bt_lru = new LRUBtree<ErlTerm,ErlTerm>(max_size, node_free, node_kickout))) {
return enif_make_tuple2(env, atom_error, enif_make_atom(env, "alloction"));
}
lru = (object_resource *) enif_alloc_resource(lruResource, sizeof(object_resource));
lru->object = bt_lru;
lru->allocated = true;
lru_term = enif_make_resource(env, lru);
enif_release_resource(lru);
return enif_make_tuple2(env, atom_ok, lru_term);
}
ERL_NIF_TERM seek(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
object_resource *it;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
LRUBtree<ErlTerm,ErlTerm>::iterator *bt_it_;
LRUBtree<ErlTerm,ErlTerm>::iterator bt_it;
ErlTerm key;
ERL_NIF_TERM it_term;
ERL_NIF_TERM kv;
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
key.t = argv[1];
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *)lru->object;
bt_it = bt_lru->bmap.lower_bound(key);
if ( bt_it == bt_lru->bmap.end() ) {
return enif_make_tuple2(env, atom_error, atom_invalid);
}
bt_it_ = new LRUBtree<ErlTerm,ErlTerm>::iterator;
*bt_it_ = bt_it;
it = (object_resource *) enif_alloc_resource(iteratorResource, sizeof(object_resource));
it->object = bt_it_;
it->allocated = true;
it_term = enif_make_resource(env, it);
enif_release_resource(it);
kv = enif_make_tuple2(env, bt_it->second->key.t, bt_it->second->data.t);
return enif_make_tuple2(env, kv, it_term);
}
ERL_NIF_TERM iterate_next(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
object_resource *it;
LRUBtree<ErlTerm,ErlTerm>::iterator *bt_it_;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
ERL_NIF_TERM kv;
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[1], iteratorResource, (void **) &it)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *)lru->object;
bt_it_ = (LRUBtree<ErlTerm,ErlTerm>::iterator *) it->object;
if (bt_it_ == NULL)
return enif_make_tuple2(env, atom_error, atom_invalid);
(*bt_it_)++;
if ( *bt_it_ == bt_lru->bmap.end() ) {
it->allocated = false;
delete bt_it_;
it->object = NULL;
return enif_make_tuple2(env, atom_error, atom_invalid);
}
kv = enif_make_tuple2(env, (*bt_it_)->second->key.t, (*bt_it_)->second->data.t);
return enif_make_tuple2(env, atom_ok, kv);
}
ERL_NIF_TERM close(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *)lru->object;
lru->allocated = false;
delete bt_lru;
return atom_ok;
}
ERL_NIF_TERM read(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
LRUNode<ErlTerm,ErlTerm> *node;
ErlTerm key;
ERL_NIF_TERM kv;
if (argc != 2) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
key.t = argv[1];
node = bt_lru->get(key);
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
kv = enif_make_tuple2(env, enif_make_copy(env, node->key.t), enif_make_copy(env, node->data.t));
return enif_make_tuple2(env, atom_ok, kv);
}
ERL_NIF_TERM remove(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
ErlTerm key;
if (argc != 2) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
key.t = argv[1];
bt_lru->erase(key);
return atom_ok;
}
ERL_NIF_TERM oldest(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
LRUNode<ErlTerm,ErlTerm> *node;
ERL_NIF_TERM key;
ERL_NIF_TERM value;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
node = bt_lru->getOldest();
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
key = enif_make_copy(env, node->key.t);
value = enif_make_copy(env, node->data.t);
return enif_make_tuple2(env, key, value);
}
ERL_NIF_TERM latest(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
LRUNode<ErlTerm,ErlTerm> *node;
ERL_NIF_TERM key;
ERL_NIF_TERM value;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
// last is "last in" in the lru
node = bt_lru->getLatest();
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
key = enif_make_copy(env, node->key.t);
value = enif_make_copy(env, node->data.t);
return enif_make_tuple2(env, key, value);
}
ERL_NIF_TERM last(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
LRUNode<ErlTerm,ErlTerm> *node;
ERL_NIF_TERM key;
ERL_NIF_TERM value;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
node = bt_lru->bmap.rbegin()->second;
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
key = enif_make_copy(env, node->key.t);
value = enif_make_copy(env, node->data.t);
return enif_make_tuple2(env, key, value);
}
ERL_NIF_TERM first(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
LRUNode<ErlTerm,ErlTerm> *node;
ERL_NIF_TERM key;
ERL_NIF_TERM value;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
node = bt_lru->bmap.begin()->second;
if (!node)
return enif_make_tuple2(env, atom_error, atom_invalid);
key = enif_make_copy(env, node->key.t);
value = enif_make_copy(env, node->data.t);
return enif_make_tuple2(env, key, value);
}
ERL_NIF_TERM write(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
ErlTerm key;
ErlTerm value;
ErlNifEnv *kv_env;
size_t size;
if (argc != 3) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm, ErlTerm> *) lru->object;
kv_env = enif_alloc_env();
key.t = enif_make_copy(kv_env, argv[1]);
value.t = enif_make_copy(kv_env, argv[2]);
/* do not use the size of term
size = enif_size_term(key.t);
size += enif_size_term(value.t);
*/
/* size based on entries */
size = 1;
bt_lru->put(key, value, kv_env, env, size);
return atom_ok;
}
ERL_NIF_TERM register_pid(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
if (argc != 2) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
if (!enif_get_local_pid(env, argv[1], &(bt_lru->pid))) {
return enif_make_badarg(env);
}
bt_lru->pid_set = true;
return atom_ok;
}
ERL_NIF_TERM unregister_pid(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
bt_lru->pid_set = false;
return atom_ok;
}
ERL_NIF_TERM get_registered_pid(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
if (!bt_lru->pid_set) {
return enif_make_tuple2(env, atom_error, atom_invalid);
}
return enif_make_pid(env, &(bt_lru->pid));
}
ERL_NIF_TERM get_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
return enif_make_ulong(env, bt_lru->getSize());
}
ERL_NIF_TERM get_max_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
if (argc != 1) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
return enif_make_ulong(env, bt_lru->getMaxSize());
}
ERL_NIF_TERM set_max_size(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
object_resource *lru;
unsigned long max_size;
LRUBtree<ErlTerm,ErlTerm> *bt_lru;
if (argc != 2) {
return enif_make_badarg(env);
}
if (!enif_get_resource(env, argv[0], lruResource, (void **) &lru)) {
return enif_make_badarg(env);
}
/* get max_size */
if (enif_get_ulong(env, argv[1], &max_size) < 1){
return enif_make_tuple2(env, atom_error, atom_max_size);
}
bt_lru = (LRUBtree<ErlTerm,ErlTerm> *) lru->object;
bt_lru->setMaxSize(max_size);
return atom_ok;
}
ErlNifFunc nif_funcs[] = {
{"create", 1, create},
{"close", 1, close, ERL_NIF_DIRTY_JOB_IO_BOUND},
{"register_pid", 2, register_pid},
{"unregister_pid", 1, unregister_pid},
{"get_registered_pid", 1, get_registered_pid},
{"get_size", 1, get_size},
{"get_max_size", 1, get_max_size},
{"set_max_size", 2, set_max_size},
{"oldest", 1, oldest},
{"latest", 1, latest},
{"last", 1, last},
{"first", 1, first},
{"read", 2, read},
{"next", 2, next},
{"prev", 2, prev},
{"seek", 2, seek},
{"iterate_next", 2, iterate_next},
{"remove", 2, remove},
{"write", 3, write}
};
} /* anonymouse namespace ends */
ERL_NIF_INIT(btree_lru, nif_funcs, load, reload, upgrade, NULL)

+ 71
- 0
c_src/gb_lru/erlterm.h 查看文件

@ -0,0 +1,71 @@
#include "erl_nif.h"
class ErlTerm {
public:
ERL_NIF_TERM t;
static void *operator new(size_t size) {
return enif_alloc(size);
}
static void operator delete(void *block) {
enif_free(block);
}
bool operator< (const ErlTerm &term) {
if (enif_compare(t, term.t) < 0)
return true;
return false;
}
bool operator< (ErlTerm &term) {
if (enif_compare(t, term.t) < 0)
return true;
return false;
}
bool operator> (const ErlTerm &term) {
if (enif_compare(t, term.t) > 0)
return true;
return false;
}
bool operator> (ErlTerm &term) {
if (enif_compare(t, term.t) > 0)
return true;
return false;
}
bool operator== (const ErlTerm &term) {
if (enif_compare(t, term.t) == 0)
return true;
return false;
}
bool operator== (ErlTerm &term) {
if (enif_compare(t, term.t) == 0)
return true;
return false;
}
};
inline bool operator < (const ErlTerm &a, const ErlTerm &b) {
if (enif_compare(a.t, b.t) < 0)
return true;
return false;
}
#if 0
// extend std::hash to understand ErlTerm used by hashmap not btree
namespace std {
template <>
struct hash<ErlTerm>
{
size_t operator()(const ErlTerm& term) const
{
return (size_t) enif_hash_term(term.t);
}
};
}
#endif

+ 266
- 0
c_src/gb_lru/lru.h 查看文件

@ -0,0 +1,266 @@
#include "btree_map.h"
#include <algorithm>
#include <iostream>
#include "murmurhash2.h"
#include "binary.h"
#include "erl_nif.h"
// extend std::hash to understand Binary type
namespace std {
template <>
struct hash<Binary>
{
size_t operator()(const Binary& b) const
{
return MurmurHash2(b.bin, b.size, 4242);
}
};
}
template <typename K, typename V>
struct LRUNode
{
K key;
V data;
void *kvenv;
LRUNode<K,V> *prev;
LRUNode<K,V> *next;
size_t size;
LRUNode(void *kvenv = NULL, size_t size=0) : kvenv(kvenv), prev(NULL), next(NULL), size(size) { }
/*
static void *LRUNode<ErlTerm,ErlTerm>::operator new(size_t size) {
return enif_alloc(size);
}
static void operator delete(void *block) {
enif_free(block);
}
*/
void printChain() {
LRUNode<K,V>* node;
int i=11;
std::cout << "(";
for(node = this; node && i; node = node->next, i--) {
std::cout << node->key << " -> ";
}
if (node) {
std::cout << " loop detection end ";
} else {
std::cout << " end ";
}
std::cout << ")" << std::endl;
}
void printNextPrevKey() {
std::cout << "(";
printNextKey();
printPrevKey();
std::cout << ")";
}
void printNextKey() {
if (next) {
std::cout << "next key " << next->key << " ";
}
}
void printPrevKey() {
if (prev) {
std::cout << "prev key " << prev->key << " ";
}
}
};
template <class K,class V>
class LRUBtree {
private:
LRUNode<K,V> *oldest;
LRUNode<K,V> *latest;
unsigned long size;
unsigned long max_size;
void (*node_free)(LRUBtree<K,V> *lru, LRUNode<K,V> *node);
void (*node_kickout)(LRUBtree<K,V> *lru, LRUNode<K,V> *node, void *call_env);
typedef btree::btree_map<K, LRUNode<K,V>*> LRUBtree_map;
public:
LRUBtree_map bmap;
bool pid_set = false;
ErlNifPid pid;
typedef typename LRUBtree_map::iterator iterator;
typedef typename LRUBtree_map::reverse_iterator reverse_iterator;
void printLatest() {
if (latest) {
std::cout << " latest " << latest->key;
} else {
std::cout << " no data in lru ";
}
}
private:
LRUNode<K,V>* erase(LRUNode<K,V> *node) {
if (node->next) {
node->next->prev = node->prev;
}
if (node->prev) {
node->prev->next = node->next;
}
if (node == oldest) {
oldest = node->prev;
}
if (node == latest) {
latest = node->next;
}
if (node_free) {
node_free(this, node);
}
node->next = NULL;
node->prev = NULL;
return node;
}
void printOldest() {
if(oldest) {
std::cout << " oldest " << oldest->key;
} else {
std::cout << " no data in lru ";
}
}
void check_size(void *call_env) {
if (size > max_size) {
if (oldest) { // remove check if oldest exist and rely on max_size always being positive
if (node_kickout)
node_kickout(this, oldest, call_env);
erase(oldest->key);
}
}
}
#define SIZE_100MB 100*1024*1024
public:
LRUBtree(unsigned long max_size = SIZE_100MB,
void (*node_free)(LRUBtree<K,V> *lru, LRUNode<K,V> *node) = NULL,
void (*node_kickout)(LRUBtree<K,V> *lru, LRUNode<K,V> *node, void *call_env) = NULL)
: oldest(NULL), latest(NULL), size(0), max_size(max_size), node_free(node_free),
node_kickout(node_kickout) { }
~LRUBtree() {
LRUNode<K,V> *node;
LRUNode<K,V> *next;
node = latest;
while(node) {
if (node_free) {
node_free(this, node);
}
next = node->next;
delete node;
node = next;
}
}
void printSize() {
std::cout << "size " << size << std::endl;
}
unsigned long getSize() {
return size;
}
unsigned long getMaxSize() {
return max_size;
}
void setMaxSize(unsigned long max_size) {
this->max_size = max_size;
}
void erase(K key) {
LRUNode<K,V> *node;
if ((node = bmap[key])) {
erase(node);
bmap.erase(key);
size -= node->size;
delete node;
}
}
inline void put(K key, V data,
void *kvenv = NULL, void *call_env = NULL,
size_t size = 1) {
LRUNode<K,V> *node;
this->size += size;
check_size(call_env);
// overwrite already existing key
if ((node = bmap[key])) {
this->size -= node->size;
erase(node);
node->kvenv = kvenv;
node->next = latest;
node->size = size;
if (node->next) {
node->next->prev = node;
}
if (!oldest) {
oldest = node;
}
latest = node;
node->key = key;
node->data = data;
}
else if (!oldest) {
node = new LRUNode<K,V>;
node->key = key;
node->data = data;
node->kvenv = kvenv;
node->size = size;
oldest = node;
latest = node;
bmap[node->key] = node;
}
else {
node = new LRUNode<K,V>;
node->key = key;
node->data = data;
node->kvenv = kvenv;
node->size = size;
latest->prev = node;
node->next = latest;
latest = node;
bmap[node->key] = node;
}
}
LRUNode<K,V>* get(K key) {
return bmap[key];
}
LRUNode<K,V>* getOldest() {
return oldest;
}
LRUNode<K,V>* getLatest() {
return latest;
}
LRUNode<K,V>* getNext(LRUNode<K,V> *node) {
return node->next;
}
LRUNode<K,V>* getPrev(LRUNode<K,V> *node) {
return node->prev;
}
};

+ 73
- 0
c_src/gb_lru/murmurhash2.h 查看文件

@ -0,0 +1,73 @@
//-----------------------------------------------------------------------------
// MurmurHash2, by Austin Appleby
// Note - This code makes a few assumptions about how your machine behaves -
// 1. We can read a 4-byte value from any address without crashing
// 2. sizeof(int) == 4
// And it has a few limitations -
// 1. It will not wo
//
// rk incrementally.
// 2. It will not produce the same results on little-endian and big-endian
// machines.
unsigned int MurmurHash2 ( const void * key, int len, unsigned int seed )
{
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
const unsigned int m = 0x5bd1e995;
const int r = 24;
// Initialize the hash to a 'random' value
unsigned int h = seed ^ len;
// Mix 4 bytes at a time into the hash
const unsigned char * data = (const unsigned char *)key;
while(len >= 4)
{
unsigned int k = *(unsigned int *)data;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
// Handle the last few bytes of the input array
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
// Do a few final mixes of t
//
//
//
// he hash to ensure the last few
// bytes are well-incorporated.
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}

+ 7
- 0
c_src/gb_lru/rebar.config 查看文件

@ -0,0 +1,7 @@
{port_specs, [
{"../../priv/btreelru_nif.so", ["btreelru_nif.cpp"]}
]}.

+ 90
- 0
c_src/native_array/native_array_nif.c 查看文件

@ -0,0 +1,90 @@
#include "erl_nif.h"
#define A_OK(env) enif_make_atom(env, "ok")
#define assert_badarg(S, Env) if (! S) { return enif_make_badarg(env); }
static ErlNifResourceType* array_handle = NULL;
static void array_handle_cleanup(ErlNifEnv* env, void* arg) {}
static int load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info)
{
ErlNifResourceFlags flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER;
array_handle = enif_open_resource_type(env, "native_array_nif", "array_handle",
&array_handle_cleanup, flags, 0);
// , 1000array
*priv = enif_alloc(1000 * sizeof(void*));
return 0;
}
static void unload(ErlNifEnv* env, void* priv)
{
enif_free(priv);
}
static ERL_NIF_TERM new_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
//
int refindex;
assert_badarg(enif_get_int(env, argv[0], &refindex), env);
// length
unsigned long length;
assert_badarg(enif_get_ulong(env, argv[1], &length), env);
//
// unsigned char* ref = enif_alloc_resource(array_handle, length);
unsigned char* ref = enif_alloc(length);
//
*((unsigned char**)enif_priv_data(env) + refindex) = ref;
return A_OK(env);
}
static ERL_NIF_TERM get_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
// ref
int refindex;
assert_badarg(enif_get_int(env, argv[0], &refindex), env);
unsigned char* ref = *((unsigned char**)enif_priv_data(env) + refindex);
assert_badarg(ref, env);
// offset
unsigned long offset;
assert_badarg(enif_get_ulong(env, argv[1], &offset), env);
return enif_make_int(env, (int)(*(ref + offset - 1)));
}
static ERL_NIF_TERM put_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
// ref
int refindex;
assert_badarg(enif_get_int(env, argv[0], &refindex), env);
unsigned char* ref = *((unsigned char**)enif_priv_data(env) + refindex);
// offset
unsigned long offset;
assert_badarg(enif_get_ulong(env, argv[1], &offset), env);
// newval
unsigned int newval;
assert_badarg(enif_get_uint(env, argv[2], &newval), env);
//
*(ref + offset - 1) = (unsigned char)newval;
return A_OK(env);
}
static ERL_NIF_TERM delete_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
// ref
int refindex;
assert_badarg(enif_get_int(env, argv[0], &refindex), env);
unsigned char* ref = *((unsigned char**)enif_priv_data(env) + refindex);
//enif_release_resource(ref);
enif_free(ref);
return A_OK(env);
}
static ErlNifFunc nif_funcs[] = {
{"new", 2, new_nif},
{"get", 2, get_nif},
{"put", 3, put_nif},
{"delete", 1, delete_nif},
};
ERL_NIF_INIT(native_array, nif_funcs, &load, NULL, NULL, &unload)

+ 7
- 0
c_src/native_array/rebar.config 查看文件

@ -0,0 +1,7 @@
{port_specs, [
{"../../priv/native_array_nif.so", ["*.c"]}
]}.

+ 905
- 0
c_src/neural/NeuralTable.cpp 查看文件

@ -0,0 +1,905 @@
#include "NeuralTable.h"
/* !!!! A NOTE ON KEYS !!!!
* Keys should be integer values passed from the erlang emulator,
* and should be generated by a hashing function. There is no easy
* way to hash an erlang term from a NIF, but ERTS is more than
* capable of doing so.
*
* Additionally, this workaround means that traditional collision
* handling mechanisms for hash tables will not work without
* special consideration. For instance, to compare keys as you
* would by storing linked lists, you must retrieve the stored
* tuple and call enif_compare or enif_is_identical on the key
* elements of each tuple.
*/
table_set NeuralTable::tables;
atomic<bool> NeuralTable::running(true);
ErlNifMutex *NeuralTable::table_mutex;
NeuralTable::NeuralTable(unsigned int kp) {
for (int i = 0; i < BUCKET_COUNT; ++i) {
ErlNifEnv *env = enif_alloc_env();
env_buckets[i] = env;
locks[i] = enif_rwlock_create("neural_table");
garbage_cans[i] = 0;
reclaimable[i] = enif_make_list(env, 0);
}
start_gc();
start_batch();
key_pos = kp;
}
NeuralTable::~NeuralTable() {
stop_batch();
stop_gc();
for (int i = 0; i < BUCKET_COUNT; ++i) {
enif_rwlock_destroy(locks[i]);
enif_free_env(env_buckets[i]);
}
}
/* ================================================================
* MakeTable
* Allocates a new table, assuming a unique atom identifier. This
* table is stored in a static container. All interactions with
* the table must be performed through the static class API.
*/
ERL_NIF_TERM NeuralTable::MakeTable(ErlNifEnv *env, ERL_NIF_TERM name, ERL_NIF_TERM key_pos) {
char *atom;
string key;
unsigned int len = 0,
pos = 0;
ERL_NIF_TERM ret;
// Allocate space for the name of the table
enif_get_atom_length(env, name, &len, ERL_NIF_LATIN1);
atom = (char*)enif_alloc(len + 1);
// Fetch the value of the atom and store it in a string (because I can, that's why)
enif_get_atom(env, name, atom, len + 1, ERL_NIF_LATIN1);
key = atom;
// Deallocate that space
enif_free(atom);
// Get the key position value
enif_get_uint(env, key_pos, &pos);
enif_mutex_lock(table_mutex);
if (NeuralTable::tables.find(key) != NeuralTable::tables.end()) {
// Table already exists? Bad monkey!
ret = enif_make_badarg(env);
} else {
// All good. Make the table
NeuralTable::tables[key] = new NeuralTable(pos);
ret = enif_make_atom(env, "ok");
}
enif_mutex_unlock(table_mutex);
return ret;
}
/* ================================================================
* GetTable
* Retrieves a handle to the table referenced by name, assuming
* such a table exists. If not, throw badarg.
*/
NeuralTable* NeuralTable::GetTable(ErlNifEnv *env, ERL_NIF_TERM name) {
char *atom = NULL;
string key;
unsigned len = 0;
NeuralTable *ret = NULL;
table_set::const_iterator it;
// Allocate space for the table name
enif_get_atom_length(env, name, &len, ERL_NIF_LATIN1);
atom = (char*)enif_alloc(len + 1);
// Copy the table name into a string
enif_get_atom(env, name, atom, len + 1, ERL_NIF_LATIN1);
key = atom;
// Deallocate that space
enif_free(atom);
// Look for the table and return its pointer if found
it = NeuralTable::tables.find(key);
if (it != NeuralTable::tables.end()) {
ret = it->second;
}
return ret;
}
/* ================================================================
* Insert
* Inserts a tuple into the table with key.
*/
ERL_NIF_TERM NeuralTable::Insert(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object) {
NeuralTable *tb;
ERL_NIF_TERM ret, old;
unsigned long int entry_key = 0;
// Grab table or bail.
tb = GetTable(env, table);
if (tb == NULL) {
return enif_make_badarg(env);
}
// Get key value.
enif_get_ulong(env, key, &entry_key);
// Lock the key.
tb->rwlock(entry_key);
// Attempt to lookup the value. If nonempty, increment
// discarded term counter and return a copy of the
// old value
if (tb->find(entry_key, old)) {
tb->reclaim(entry_key, old);
ret = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_copy(env, old));
} else {
ret = enif_make_atom(env, "ok");
}
// Write that shit out
tb->put(entry_key, object);
// Oh, and unlock the key if you would.
tb->rwunlock(entry_key);
return ret;
}
/* ================================================================
* InsertNew
* Inserts a tuple into the table with key, assuming there is not
* a value with key already. Returns true if there was no value
* for key, or false if there was.
*/
ERL_NIF_TERM NeuralTable::InsertNew(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object) {
NeuralTable *tb;
ERL_NIF_TERM ret, old;
unsigned long int entry_key = 0;
// Get the table or bail
tb = GetTable(env, table);
if (tb == NULL) {
return enif_make_badarg(env);
}
// Get the key value
enif_get_ulong(env, key, &entry_key);
// Get write lock for the key
tb->rwlock(entry_key);
if (tb->find(entry_key, old)) {
// Key was found. Return false and do not insert
ret = enif_make_atom(env, "false");
} else {
// Key was not found. Return true and insert
tb->put(entry_key, object);
ret = enif_make_atom(env, "true");
}
// Release write lock for the key
tb->rwunlock(entry_key);
return ret;
}
/* ================================================================
* Increment
* Processes a list of update operations. Each operation specifies
* a position in the stored tuple to update and an integer to add
* to it.
*/
ERL_NIF_TERM NeuralTable::Increment(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) {
NeuralTable *tb;
ERL_NIF_TERM ret, old;
ERL_NIF_TERM it;
unsigned long int entry_key = 0;
// Get table handle or bail
tb = GetTable(env, table);
if (tb == NULL) {
return enif_make_badarg(env);
}
// Get key value
enif_get_ulong(env, key, &entry_key);
// Acquire read/write lock for key
tb->rwlock(entry_key);
// Try to read the value as it is
if (tb->find(entry_key, old)) {
// Value exists
ERL_NIF_TERM op_cell;
const ERL_NIF_TERM *tb_tpl;
const ERL_NIF_TERM *op_tpl;
ERL_NIF_TERM *new_tpl;
ErlNifEnv *bucket_env = tb->get_env(entry_key);
unsigned long int pos = 0;
long int incr = 0;
unsigned int ops_length = 0;
int op_arity = 0,
tb_arity = 0;
// Expand tuple to work on elements
enif_get_tuple(bucket_env, old, &tb_arity, &tb_tpl);
// Allocate space for a copy the contents of the table
// tuple and copy it in. All changes are to be made to
// the copy of the tuple.
new_tpl = (ERL_NIF_TERM*)enif_alloc(sizeof(ERL_NIF_TERM) * tb_arity);
memcpy(new_tpl, tb_tpl, sizeof(ERL_NIF_TERM) * tb_arity);
// Create empty list cell for return value.
ret = enif_make_list(env, 0);
// Set iterator to first cell of ops
it = ops;
while(!enif_is_empty_list(env, it)) {
long int value = 0;
enif_get_list_cell(env, it, &op_cell, &it); // op_cell = hd(it), it = tl(it)
enif_get_tuple(env, op_cell, &op_arity, &op_tpl); // op_arity = tuple_size(op_cell), op_tpl = [TplPos1, TplPos2]
enif_get_ulong(env, op_tpl[0], &pos); // pos = (uint64)op_tpl[0]
enif_get_long(env, op_tpl[1], &incr); // incr = (uint64)op_tpl[1]
// Is the operation trying to modify a nonexistant
// position?
if (pos <= 0 || pos > tb_arity) {
ret = enif_make_badarg(env);
goto bailout;
}
// Is the operation trying to add to a value that's
// not a number?
if (!enif_is_number(bucket_env, new_tpl[pos - 1])) {
ret = enif_make_badarg(env);
goto bailout;
}
// Update the value stored in the tuple.
enif_get_long(env, new_tpl[pos - 1], &value);
tb->reclaim(entry_key, new_tpl[pos - 1]);
new_tpl[pos - 1] = enif_make_long(bucket_env, value + incr);
// Copy the new value to the head of the return list
ret = enif_make_list_cell(env, enif_make_copy(env, new_tpl[pos - 1]), ret);
}
tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity));
// Bailout allows cancelling the update opertion
// in case something goes wrong. It must always
// come after tb->put and before enif_free and
// rwunlock
bailout:
enif_free(new_tpl);
} else {
ret = enif_make_badarg(env);
}
// Release the rwlock for entry_key
tb->rwunlock(entry_key);
return ret;
}
/* ================================================================
* Unshift
* Processes a list of update operations. Each update operation is
* a tuple specifying the position of a list in the stored value to
* update and a list of values to append. Elements are shifted from
* the input list to the stored list, so:
*
* unshift([a,b,c,d]) results in [d,c,b,a]
*/
ERL_NIF_TERM NeuralTable::Unshift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) {
NeuralTable *tb;
ERL_NIF_TERM ret, old, it;
unsigned long int entry_key;
ErlNifEnv *bucket_env;
tb = GetTable(env, table);
if (tb == NULL) {
return enif_make_badarg(env);
}
enif_get_ulong(env, key, &entry_key);
tb->rwlock(entry_key);
bucket_env = tb->get_env(entry_key);
if (tb->find(entry_key, old)) {
const ERL_NIF_TERM *old_tpl,
*op_tpl;
ERL_NIF_TERM *new_tpl;
int tb_arity = 0,
op_arity = 0;
unsigned long pos = 0;
unsigned int new_length = 0;
ERL_NIF_TERM op,
unshift,
copy_it,
copy_val;
enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl);
new_tpl = (ERL_NIF_TERM*)enif_alloc(sizeof(ERL_NIF_TERM) * tb_arity);
memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity);
it = ops;
ret = enif_make_list(env, 0);
while (!enif_is_empty_list(env, it)) {
// Examine the operation.
enif_get_list_cell(env, it, &op, &it); // op = hd(it), it = tl(it)
enif_get_tuple(env, op, &op_arity, &op_tpl); // op_arity = tuple_size(op), op_tpl = [TplPos1, TplPos2]
enif_get_ulong(env, op_tpl[0], &pos); // Tuple position to modify
unshift = op_tpl[1]; // Values to unshfit
// Argument 1 of the operation tuple is position;
// make sure it's within the bounds of the tuple
// in the table.
if (pos <= 0 || pos > tb_arity) {
ret = enif_make_badarg(env);
goto bailout;
}
// Make sure we were passed a list of things to push
// onto the posth element of the entry
if (!enif_is_list(env, unshift)) {
ret = enif_make_badarg(env);
}
// Now iterate over unshift, moving its values to
// the head of new_tpl[pos - 1] one by one
copy_it = unshift;
while (!enif_is_empty_list(env, copy_it)) {
enif_get_list_cell(env, copy_it, &copy_val, &copy_it);
new_tpl[pos - 1] = enif_make_list_cell(bucket_env, enif_make_copy(bucket_env, copy_val), new_tpl[pos - 1]);
}
enif_get_list_length(bucket_env, new_tpl[pos - 1], &new_length);
ret = enif_make_list_cell(env, enif_make_uint(env, new_length), ret);
}
tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity));
bailout:
enif_free(new_tpl);
} else {
ret = enif_make_badarg(env);
}
tb->rwunlock(entry_key);
return ret;
}
ERL_NIF_TERM NeuralTable::Shift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) {
NeuralTable *tb;
ERL_NIF_TERM ret, old, it;
unsigned long int entry_key;
ErlNifEnv *bucket_env;
tb = GetTable(env, table);
if (tb == NULL) {
return enif_make_badarg(env);
}
enif_get_ulong(env, key, &entry_key);
tb->rwlock(entry_key);
bucket_env = tb->get_env(entry_key);
if (tb->find(entry_key, old)) {
const ERL_NIF_TERM *old_tpl;
const ERL_NIF_TERM *op_tpl;
ERL_NIF_TERM *new_tpl;
int tb_arity = 0,
op_arity = 0;
unsigned long pos = 0,
count = 0;
ERL_NIF_TERM op, list, shifted, reclaim;
enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl);
new_tpl = (ERL_NIF_TERM*)enif_alloc(tb_arity * sizeof(ERL_NIF_TERM));
memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity);
it = ops;
ret = enif_make_list(env, 0);
reclaim = enif_make_list(bucket_env, 0);
while(!enif_is_empty_list(env, it)) {
enif_get_list_cell(env, it, &op, &it);
enif_get_tuple(env, op, &op_arity, &op_tpl);
enif_get_ulong(env, op_tpl[0], &pos);
enif_get_ulong(env, op_tpl[1], &count);
if (pos <= 0 || pos > tb_arity) {
ret = enif_make_badarg(env);
goto bailout;
}
if (!enif_is_list(env, new_tpl[pos -1])) {
ret = enif_make_badarg(env);
goto bailout;
}
shifted = enif_make_list(env, 0);
if (count > 0) {
ERL_NIF_TERM copy_it = new_tpl[pos - 1],
val;
int i = 0;
while (i < count && !enif_is_empty_list(bucket_env, copy_it)) {
enif_get_list_cell(bucket_env, copy_it, &val, &copy_it);
++i;
shifted = enif_make_list_cell(env, enif_make_copy(env, val), shifted);
reclaim = enif_make_list_cell(env, val, reclaim);
}
new_tpl[pos - 1] = copy_it;
} else if (count < 0) {
ERL_NIF_TERM copy_it = new_tpl[pos - 1],
val;
while (!enif_is_empty_list(bucket_env, copy_it)) {
enif_get_list_cell(bucket_env, copy_it, &val, &copy_it);
shifted = enif_make_list_cell(env, enif_make_copy(env, val), shifted);
reclaim = enif_make_list_cell(env, val, reclaim);
}
new_tpl[pos - 1] = copy_it;
}
ret = enif_make_list_cell(env, shifted, ret);
}
tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity));
tb->reclaim(entry_key, reclaim);
bailout:
enif_free(new_tpl);
} else {
ret = enif_make_badarg(env);
}
tb->rwunlock(entry_key);
return ret;
}
ERL_NIF_TERM NeuralTable::Swap(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) {
NeuralTable *tb;
ERL_NIF_TERM ret, old, it;
unsigned long int entry_key;
ErlNifEnv *bucket_env;
tb = GetTable(env, table);
if (tb == NULL) {
return enif_make_badarg(env);
}
enif_get_ulong(env, key, &entry_key);
tb->rwlock(entry_key);
bucket_env = tb->get_env(entry_key);
if (tb->find(entry_key, old)) {
const ERL_NIF_TERM *old_tpl;
const ERL_NIF_TERM *op_tpl;
ERL_NIF_TERM *new_tpl;
int tb_arity = 0,
op_arity = 0;
unsigned long pos = 0;
ERL_NIF_TERM op, list, shifted, reclaim;
enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl);
new_tpl = (ERL_NIF_TERM*)enif_alloc(tb_arity * sizeof(ERL_NIF_TERM));
memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity);
it = ops;
ret = enif_make_list(env, 0);
reclaim = enif_make_list(bucket_env, 0);
while (!enif_is_empty_list(env, it)) {
enif_get_list_cell(env, it, &op, &it);
enif_get_tuple(env, op, &op_arity, &op_tpl);
enif_get_ulong(env, op_tpl[0], &pos);
if (pos <= 0 || pos > tb_arity) {
ret = enif_make_badarg(env);
goto bailout;
}
reclaim = enif_make_list_cell(bucket_env, new_tpl[pos - 1], reclaim);
ret = enif_make_list_cell(env, enif_make_copy(env, new_tpl[pos -1]), ret);
new_tpl[pos - 1] = enif_make_copy(bucket_env, op_tpl[1]);
}
tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity));
tb->reclaim(entry_key, reclaim);
bailout:
enif_free(new_tpl);
} else {
ret = enif_make_badarg(env);
}
tb->rwunlock(entry_key);
return ret;
}
ERL_NIF_TERM NeuralTable::Delete(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key) {
NeuralTable *tb;
ERL_NIF_TERM val, ret;
unsigned long int entry_key;
tb = GetTable(env, table);
if (tb == NULL) { return enif_make_badarg(env); }
enif_get_ulong(env, key, &entry_key);
tb->rwlock(entry_key);
if (tb->erase(entry_key, val)) {
tb->reclaim(entry_key, val);
ret = enif_make_copy(env, val);
} else {
ret = enif_make_atom(env, "undefined");
}
tb->rwunlock(entry_key);
return ret;
}
ERL_NIF_TERM NeuralTable::Empty(ErlNifEnv *env, ERL_NIF_TERM table) {
NeuralTable *tb;
int n = 0;
tb = GetTable(env, table);
if (tb == NULL) { return enif_make_badarg(env); }
// First, lock EVERY bucket. We want this to be an isolated operation.
for (n = 0; n < BUCKET_COUNT; ++n) {
enif_rwlock_rwlock(tb->locks[n]);
}
// Now clear the table
for (n = 0; n < BUCKET_COUNT; ++n) {
tb->hash_buckets[n].clear();
enif_clear_env(tb->env_buckets[n]);
tb->garbage_cans[n] = 0;
tb->reclaimable[n] = enif_make_list(tb->env_buckets[n], 0);
}
// Now unlock every bucket.
for (n = 0; n < BUCKET_COUNT; ++n) {
enif_rwlock_rwunlock(tb->locks[n]);
}
return enif_make_atom(env, "ok");
}
ERL_NIF_TERM NeuralTable::Get(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key) {
NeuralTable *tb;
ERL_NIF_TERM ret, val;
unsigned long int entry_key;
// Acquire table handle, or quit if the table doesn't exist.
tb = GetTable(env, table);
if (tb == NULL) { return enif_make_badarg(env); }
// Get key value
enif_get_ulong(env, key, &entry_key);
// Lock the key
tb->rlock(entry_key);
// Read current value
if (!tb->find(entry_key, val)) {
ret = enif_make_atom(env, "undefined");
} else {
ret = enif_make_copy(env, val);
}
tb->runlock(entry_key);
return ret;
}
ERL_NIF_TERM NeuralTable::Dump(ErlNifEnv *env, ERL_NIF_TERM table) {
NeuralTable *tb = GetTable(env, table);
ErlNifPid self;
ERL_NIF_TERM ret;
if (tb == NULL) { return enif_make_badarg(env); }
enif_self(env, &self);
tb->add_batch_job(self, &NeuralTable::batch_dump);
return enif_make_atom(env, "$neural_batch_wait");
}
ERL_NIF_TERM NeuralTable::Drain(ErlNifEnv *env, ERL_NIF_TERM table) {
NeuralTable *tb = GetTable(env, table);
ErlNifPid self;
int ret;
if (tb == NULL) { return enif_make_badarg(env); }
enif_self(env, &self);
tb->add_batch_job(self, &NeuralTable::batch_drain);
return enif_make_atom(env, "$neural_batch_wait");
}
ERL_NIF_TERM NeuralTable::GetKeyPosition(ErlNifEnv *env, ERL_NIF_TERM table) {
NeuralTable *tb = GetTable(env, table);
if (tb == NULL) { return enif_make_badarg(env); }
return enif_make_uint(env, tb->key_pos);
}
ERL_NIF_TERM NeuralTable::GarbageCollect(ErlNifEnv *env, ERL_NIF_TERM table) {
NeuralTable *tb = GetTable(env, table);
if (tb == NULL) { return enif_make_badarg(env); }
enif_cond_signal(tb->gc_cond);
return enif_make_atom(env, "ok");
}
ERL_NIF_TERM NeuralTable::GarbageSize(ErlNifEnv *env, ERL_NIF_TERM table) {
NeuralTable *tb = GetTable(env, table);
unsigned long int size = 0;
if (tb == NULL) { return enif_make_badarg(env); }
size = tb->garbage_size();
return enif_make_ulong(env, size);
}
void* NeuralTable::DoGarbageCollection(void *table) {
NeuralTable *tb = (NeuralTable*)table;
enif_mutex_lock(tb->gc_mutex);
while (running.load(memory_order_acquire)) {
while (running.load(memory_order_acquire) && tb->garbage_size() < RECLAIM_THRESHOLD) {
enif_cond_wait(tb->gc_cond, tb->gc_mutex);
}
tb->gc();
}
enif_mutex_unlock(tb->gc_mutex);
return NULL;
}
void* NeuralTable::DoReclamation(void *table) {
const int max_eat = 5;
NeuralTable *tb = (NeuralTable*)table;
int i = 0, c = 0, t = 0;;
ERL_NIF_TERM tl, hd;
ErlNifEnv *env;
while (running.load(memory_order_acquire)) {
for (i = 0; i < BUCKET_COUNT; ++i) {
c = 0;
t = 0;
tb->rwlock(i);
env = tb->get_env(i);
tl = tb->reclaimable[i];
while (c++ < max_eat && !enif_is_empty_list(env, tl)) {
enif_get_list_cell(env, tl, &hd, &tl);
tb->garbage_cans[i] += estimate_size(env, hd);
t += tb->garbage_cans[i];
}
tb->rwunlock(i);
if (t >= RECLAIM_THRESHOLD) {
enif_cond_signal(tb->gc_cond);
}
}
#ifdef _WIN32
Sleep(50);
#else
usleep(50000);
#endif
}
return NULL;
}
void* NeuralTable::DoBatchOperations(void *table) {
NeuralTable *tb = (NeuralTable*)table;
enif_mutex_lock(tb->batch_mutex);
while (running.load(memory_order_acquire)) {
while (running.load(memory_order_acquire) && tb->batch_jobs.empty()) {
enif_cond_wait(tb->batch_cond, tb->batch_mutex);
}
BatchJob job = tb->batch_jobs.front();
(tb->*job.fun)(job.pid);
tb->batch_jobs.pop();
}
enif_mutex_unlock(tb->batch_mutex);
return NULL;
}
void NeuralTable::start_gc() {
int ret;
gc_mutex = enif_mutex_create("neural_table_gc");
gc_cond = enif_cond_create("neural_table_gc");
ret = enif_thread_create("neural_garbage_collector", &gc_tid, NeuralTable::DoGarbageCollection, (void*)this, NULL);
if (ret != 0) {
printf("[neural_gc] Can't create GC thread. Error Code: %d\r\n", ret);
}
// Start the reclaimer after the garbage collector.
ret = enif_thread_create("neural_reclaimer", &rc_tid, NeuralTable::DoReclamation, (void*)this, NULL);
if (ret != 0) {
printf("[neural_gc] Can't create reclamation thread. Error Code: %d\r\n", ret);
}
}
void NeuralTable::stop_gc() {
enif_cond_signal(gc_cond);
// Join the reclaimer before the garbage collector.
enif_thread_join(rc_tid, NULL);
enif_thread_join(gc_tid, NULL);
}
void NeuralTable::start_batch() {
int ret;
batch_mutex = enif_mutex_create("neural_table_batch");
batch_cond = enif_cond_create("neural_table_batch");
ret = enif_thread_create("neural_batcher", &batch_tid, NeuralTable::DoBatchOperations, (void*)this, NULL);
if (ret != 0) {
printf("[neural_batch] Can't create batch thread. Error Code: %d\r\n", ret);
}
}
void NeuralTable::stop_batch() {
enif_cond_signal(batch_cond);
enif_thread_join(batch_tid, NULL);
}
void NeuralTable::put(unsigned long int key, ERL_NIF_TERM tuple) {
ErlNifEnv *env = get_env(key);
hash_buckets[GET_BUCKET(key)][key] = enif_make_copy(env, tuple);
}
ErlNifEnv* NeuralTable::get_env(unsigned long int key) {
return env_buckets[GET_BUCKET(key)];
}
bool NeuralTable::find(unsigned long int key, ERL_NIF_TERM &ret) {
hash_table *bucket = &hash_buckets[GET_BUCKET(key)];
hash_table::iterator it = bucket->find(key);
if (bucket->end() == it) {
return false;
} else {
ret = it->second;
return true;
}
}
bool NeuralTable::erase(unsigned long int key, ERL_NIF_TERM &val) {
hash_table *bucket = &hash_buckets[GET_BUCKET(key)];
hash_table::iterator it = bucket->find(key);
bool ret = false;
if (it != bucket->end()) {
ret = true;
val = it->second;
bucket->erase(it);
}
return ret;
}
void NeuralTable::add_batch_job(ErlNifPid pid, BatchFunction fun) {
BatchJob job;
job.pid = pid;
job.fun = fun;
enif_mutex_lock(batch_mutex);
batch_jobs.push(job);
enif_mutex_unlock(batch_mutex);
enif_cond_signal(batch_cond);
}
void NeuralTable::batch_drain(ErlNifPid pid) {
ErlNifEnv *env = enif_alloc_env();
ERL_NIF_TERM msg, value;
value = enif_make_list(env, 0);
for (int i = 0; i < BUCKET_COUNT; ++i) {
enif_rwlock_rwlock(locks[i]);
for (hash_table::iterator it = hash_buckets[i].begin(); it != hash_buckets[i].end(); ++it) {
value = enif_make_list_cell(env, enif_make_copy(env, it->second), value);
}
enif_clear_env(env_buckets[i]);
hash_buckets[i].clear();
garbage_cans[i] = 0;
reclaimable[i] = enif_make_list(env_buckets[i], 0);
enif_rwlock_rwunlock(locks[i]);
}
msg = enif_make_tuple2(env, enif_make_atom(env, "$neural_batch_response"), value);
enif_send(NULL, &pid, env, msg);
enif_free_env(env);
}
void NeuralTable::batch_dump(ErlNifPid pid) {
ErlNifEnv *env = enif_alloc_env();
ERL_NIF_TERM msg, value;
value = enif_make_list(env, 0);
for (int i = 0; i < BUCKET_COUNT; ++i) {
enif_rwlock_rlock(locks[i]);
for (hash_table::iterator it = hash_buckets[i].begin(); it != hash_buckets[i].end(); ++it) {
value = enif_make_list_cell(env, enif_make_copy(env, it->second), value);
}
enif_rwlock_runlock(locks[i]);
}
msg = enif_make_tuple2(env, enif_make_atom(env, "$neural_batch_response"), value);
enif_send(NULL, &pid, env, msg);
enif_free_env(env);
}
void NeuralTable::reclaim(unsigned long int key, ERL_NIF_TERM term) {
int bucket = GET_BUCKET(key);
ErlNifEnv *env = get_env(key);
reclaimable[bucket] = enif_make_list_cell(env, term, reclaimable[bucket]);
}
void NeuralTable::gc() {
ErlNifEnv *fresh = NULL,
*old = NULL;
hash_table *bucket = NULL;
hash_table::iterator it;
unsigned int gc_curr = 0;
for (; gc_curr < BUCKET_COUNT; ++gc_curr) {
bucket = &hash_buckets[gc_curr];
old = env_buckets[gc_curr];
fresh = enif_alloc_env();
enif_rwlock_rwlock(locks[gc_curr]);
for (it = bucket->begin(); it != bucket->end(); ++it) {
it->second = enif_make_copy(fresh, it->second);
}
garbage_cans[gc_curr] = 0;
env_buckets[gc_curr] = fresh;
reclaimable[gc_curr] = enif_make_list(fresh, 0);
enif_free_env(old);
enif_rwlock_rwunlock(locks[gc_curr]);
}
}
unsigned long int NeuralTable::garbage_size() {
unsigned long int size = 0;
for (int i = 0; i < BUCKET_COUNT; ++i) {
enif_rwlock_rlock(locks[i]);
size += garbage_cans[i];
enif_rwlock_runlock(locks[i]);
}
return size;
}

+ 121
- 0
c_src/neural/NeuralTable.h 查看文件

@ -0,0 +1,121 @@
#ifndef NEURALTABLE_H
#define NEURALTABLE_H
#include "erl_nif.h"
#include "neural_utils.h"
#include <string>
#include <stdio.h>
#include <string.h>
#include <unordered_map>
#include <queue>
#include <atomic>
#ifdef _WIN32
#include <windows.h>
#include <io.h>
#include <process.h>
#else
#include <unistd.h>
#endif
#define BUCKET_COUNT 64
#define BUCKET_MASK (BUCKET_COUNT - 1)
#define GET_BUCKET(key) key & BUCKET_MASK
#define GET_LOCK(key) key & BUCKET_MASK
#define RECLAIM_THRESHOLD 1048576
using namespace std;
class NeuralTable;
typedef unordered_map<string, NeuralTable*> table_set;
typedef unordered_map<unsigned long int, ERL_NIF_TERM> hash_table;
typedef void (NeuralTable::*BatchFunction)(ErlNifPid pid);
class NeuralTable {
public:
static ERL_NIF_TERM MakeTable(ErlNifEnv *env, ERL_NIF_TERM name, ERL_NIF_TERM keypos);
static ERL_NIF_TERM Insert(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object);
static ERL_NIF_TERM InsertNew(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object);
static ERL_NIF_TERM Delete(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key);
static ERL_NIF_TERM Empty(ErlNifEnv *env, ERL_NIF_TERM table);
static ERL_NIF_TERM Get(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key);
static ERL_NIF_TERM Increment(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops);
static ERL_NIF_TERM Shift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops);
static ERL_NIF_TERM Unshift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops);
static ERL_NIF_TERM Swap(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops);
static ERL_NIF_TERM Dump(ErlNifEnv *env, ERL_NIF_TERM table);
static ERL_NIF_TERM Drain(ErlNifEnv *env, ERL_NIF_TERM table);
static ERL_NIF_TERM GetKeyPosition(ErlNifEnv *env, ERL_NIF_TERM table);
static ERL_NIF_TERM GarbageCollect(ErlNifEnv *env, ERL_NIF_TERM table);
static ERL_NIF_TERM GarbageSize(ErlNifEnv *env, ERL_NIF_TERM table);
static NeuralTable* GetTable(ErlNifEnv *env, ERL_NIF_TERM name);
static void* DoGarbageCollection(void *table);
static void* DoBatchOperations(void *table);
static void* DoReclamation(void *table);
static void Initialize() {
table_mutex = enif_mutex_create("neural_table_maker");
}
static void Shutdown() {
running = false;
table_set::iterator it(tables.begin());
while (it != tables.end()) {
delete it->second;
tables.erase(it);
it = tables.begin();
}
enif_mutex_destroy(table_mutex);
}
void rlock(unsigned long int key) { enif_rwlock_rlock(locks[GET_LOCK(key)]); }
void runlock(unsigned long int key) { enif_rwlock_runlock(locks[GET_LOCK(key)]); }
void rwlock(unsigned long int key) { enif_rwlock_rwlock(locks[GET_LOCK(key)]); }
void rwunlock(unsigned long int key) { enif_rwlock_rwunlock(locks[GET_LOCK(key)]); }
ErlNifEnv *get_env(unsigned long int key);
bool erase(unsigned long int key, ERL_NIF_TERM &ret);
bool find(unsigned long int key, ERL_NIF_TERM &ret);
void put(unsigned long int key, ERL_NIF_TERM tuple);
void batch_dump(ErlNifPid pid);
void batch_drain(ErlNifPid pid);
void start_gc();
void stop_gc();
void start_batch();
void stop_batch();
void gc();
void reclaim(unsigned long int key, ERL_NIF_TERM reclaim);
unsigned long int garbage_size();
void add_batch_job(ErlNifPid pid, BatchFunction fun);
protected:
static table_set tables;
static atomic<bool> running;
static ErlNifMutex *table_mutex;
struct BatchJob {
ErlNifPid pid;
BatchFunction fun;
};
NeuralTable(unsigned int kp);
~NeuralTable();
unsigned int garbage_cans[BUCKET_COUNT];
hash_table hash_buckets[BUCKET_COUNT];
ErlNifEnv *env_buckets[BUCKET_COUNT];
ERL_NIF_TERM reclaimable[BUCKET_COUNT];
ErlNifRWLock *locks[BUCKET_COUNT];
ErlNifCond *gc_cond;
ErlNifMutex *gc_mutex;
ErlNifTid gc_tid;
ErlNifTid rc_tid;
ErlNifCond *batch_cond;
ErlNifMutex *batch_mutex;
queue<BatchJob> batch_jobs;
ErlNifTid batch_tid;
unsigned int key_pos;
};
#endif

+ 134
- 0
c_src/neural/neural.cpp 查看文件

@ -0,0 +1,134 @@
#include "erl_nif.h"
#include "NeuralTable.h"
#include <stdio.h>
// Prototypes
static ERL_NIF_TERM neural_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_put(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_put_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_increment(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_unshift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_shift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_swap(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_get(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_delete(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_garbage(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_garbage_size(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_empty(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_drain(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_dump(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM neural_key_pos(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
static ErlNifFunc nif_funcs[] =
{
{"make_table", 2, neural_new},
{"do_fetch", 2, neural_get},
{"do_delete", 2, neural_delete},
{"do_dump", 1, neural_dump},
{"do_drain", 1, neural_drain},
{"empty", 1, neural_empty},
{"insert", 3, neural_put},
{"insert_new", 3, neural_put_new},
{"do_increment", 3, neural_increment},
{"do_unshift", 3, neural_unshift},
{"do_shift", 3, neural_shift},
{"do_swap", 3, neural_swap},
{"garbage", 1, neural_garbage},
{"garbage_size", 1, neural_garbage_size},
{"key_pos", 1, neural_key_pos}
};
static ERL_NIF_TERM neural_key_pos(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
// This function is directly exposed, so no strict guards or patterns protecting us.
if (argc != 1 || !enif_is_atom(env, argv[0])) { return enif_make_badarg(env); }
return NeuralTable::GetKeyPosition(env, argv[0]);
}
static ERL_NIF_TERM neural_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
return NeuralTable::MakeTable(env, argv[0], argv[1]);
}
static ERL_NIF_TERM neural_put(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
return NeuralTable::Insert(env, argv[0], argv[1], argv[2]);
}
static ERL_NIF_TERM neural_put_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
return NeuralTable::InsertNew(env, argv[0], argv[1], argv[2]);
}
static ERL_NIF_TERM neural_increment(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
if (!enif_is_atom(env, argv[0]) || !enif_is_number(env, argv[1]) || !enif_is_list(env, argv[2])) {
return enif_make_badarg(env);
}
return NeuralTable::Increment(env, argv[0], argv[1], argv[2]);
}
static ERL_NIF_TERM neural_shift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
return NeuralTable::Shift(env, argv[0], argv[1], argv[2]);
}
static ERL_NIF_TERM neural_unshift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
return NeuralTable::Unshift(env, argv[0], argv[1], argv[2]);
}
static ERL_NIF_TERM neural_swap(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]){
return NeuralTable::Swap(env, argv[0], argv[1], argv[2]);
}
static ERL_NIF_TERM neural_get(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
return NeuralTable::Get(env, argv[0], argv[1]);
}
static ERL_NIF_TERM neural_delete(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
return NeuralTable::Delete(env, argv[0], argv[1]);
}
static ERL_NIF_TERM neural_empty(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); }
return NeuralTable::Empty(env, argv[0]);
}
static ERL_NIF_TERM neural_dump(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); }
return NeuralTable::Dump(env, argv[0]);
}
static ERL_NIF_TERM neural_drain(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); }
return NeuralTable::Drain(env, argv[0]);
}
static ERL_NIF_TERM neural_garbage(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); }
return NeuralTable::GarbageCollect(env, argv[0]);
}
static ERL_NIF_TERM neural_garbage_size(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); }
return NeuralTable::GarbageSize(env, argv[0]);
}
static void neural_resource_cleanup(ErlNifEnv* env, void* arg)
{
/* Delete any dynamically allocated memory stored in neural_handle */
/* neural_handle* handle = (neural_handle*)arg; */
}
static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
NeuralTable::Initialize();
return 0;
}
static void on_unload(ErlNifEnv *env, void *priv_data) {
NeuralTable::Shutdown();
}
ERL_NIF_INIT(neural, nif_funcs, &on_load, NULL, NULL, &on_unload);

+ 46
- 0
c_src/neural/neural_utils.cpp 查看文件

@ -0,0 +1,46 @@
#include "neural_utils.h"
unsigned long int estimate_size(ErlNifEnv *env, ERL_NIF_TERM term) {
if (enif_is_atom(env, term)) {
return WORD_SIZE;
}
// Treating all numbers like longs.
if (enif_is_number(env, term)) {
return 2 * WORD_SIZE;
}
if (enif_is_binary(env, term)) {
ErlNifBinary bin;
enif_inspect_binary(env, term, &bin);
return bin.size + (6 * WORD_SIZE);
}
if (enif_is_list(env, term)) {
unsigned long int size = 0;
ERL_NIF_TERM it, curr;
it = term;
size += WORD_SIZE;
while (!enif_is_empty_list(env, it)) {
enif_get_list_cell(env, it, &curr, &it);
size += estimate_size(env, curr) + WORD_SIZE;
}
return size;
}
if (enif_is_tuple(env, term)) {
unsigned long int size = 0;
const ERL_NIF_TERM *tpl;
int arity;
enif_get_tuple(env, term, &arity, &tpl);
for (int i = 0; i < arity; ++i) {
size += estimate_size(env, tpl[i]);
}
return size;
}
// Return 1 word by default
return WORD_SIZE;
}

+ 9
- 0
c_src/neural/neural_utils.h 查看文件

@ -0,0 +1,9 @@
#ifndef NEURAL_UTILS_H
#define NEURAL_UTILS_H
#include "erl_nif.h"
#define WORD_SIZE sizeof(int)
unsigned long int estimate_size(ErlNifEnv *env, ERL_NIF_TERM term);
#endif

+ 14
- 0
c_src/neural/rebar.config 查看文件

@ -0,0 +1,14 @@
{port_specs, [
{"../../priv/neural.so", ["*.cpp"]}
]}.
{port_env, [
{".*", "CXXFLAGS", "$CXXFLAGS -std=c++11 -O3"},
{".*", "LDFLAGS", "$LDFLAGS -lstdc++ -shared"}
]}.

+ 1
- 1
src/dataType/utTermSize.erl 查看文件

@ -85,7 +85,7 @@ internal_test() ->
32 = byteSize(<<$a, $b, $c>>, 8),
8 = byteSize([], 8),
24 = byteSize([0|[]], 8),
24 = byteSize([1|2], 8), % improper list
24 = byteSize([1|2], 8), % itime_tmproper list
16 = byteSize({}, 8),
24 = byteSize({0}, 8),
8 = byteSize(0, 8),

+ 20
- 0
src/nifSrc/bitmap_filter/bitmap_filter.erl 查看文件

@ -0,0 +1,20 @@
-module(bitmap_filter).
-export([init/0, filter/1]).
-on_load(init/0).
init() ->
PrivDir = case code:priv_dir(?MODULE) of
{error, _} ->
EbinDir = filename:dirname(code:which(?MODULE)),
AppPath = filename:dirname(EbinDir),
filename:join(AppPath, "priv");
Path ->
Path
end,
erlang:load_nif(filename:join(PrivDir, "bitmap_filter"), 0).
% Hack - overriden by init, which is called in on_load.
% I couldn't find another way that the compiler or code load didn't complain about.
filter(DefaultArgs) ->
DefaultArgs.

+ 77
- 0
src/nifSrc/bsn/bsn.erl 查看文件

@ -0,0 +1,77 @@
-module(bsn).
%% API
-export([hash/2, compare/2]).
-export([new/2, add/2, all/1, chains/1, in/2, count/1, clear/2]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
%-include_lib("triq/include/triq.hrl").
-endif.
%% Create new resource, `CellCount' is the size of the painters' store.
new('int_quadric', CellsCount) when CellsCount > 0 ->
{'bsn_int', bsn_int:new(-CellsCount)};
new('int_linear', CellsCount) when CellsCount > 0 ->
{'bsn_int', bsn_int:new(CellsCount)};
new('ext', CellsCount) when CellsCount > 0 ->
{'bsn_ext', bsn_ext:new(CellsCount)}.
%% Add new element.
%% If the result is a negative integer
%% then object was already added.
%% We found this object with (result) steps.
%%
%% If the result is a positive integer
%% then object was added after (result) elements.
add({Type, Res}, Bin) ->
Type:add(Res, Bin).
all({Type, Res}) ->
Type:all(Res).
chains({Type, Res}) ->
Type:chains(Res).
%% Add new element.
%% If the result is a negative integer
%% then object was found with (-result) steps.
%%
%% If the result is a positive integer
%% then object was not found with (result) steps.
in({Type, Res}, Bin) ->
Type:in(Res, Bin).
clear({Type, Res}, Bin) ->
Type:clear(Res, Bin).
%% Return the count of elements stored in this resource.
count({Type, Res}) ->
Type:count(Res).
%% Calculate the hash of the binary
hash(Bin, Max) ->
bsn_ext:hash(Bin, Max).
compare(Bin1, Bin2) ->
bsn_ext:compare(Bin1, Bin2).
-ifdef(TEST).
-ifdef(FORALL).
prop_compare_test_() ->
{"Binary compare testing.",
{timeout, 60,
fun() -> triq:check(prop_compare()) end}}.
prop_compare() ->
?FORALL({Xs},{binary()},
compare(Xs, Xs)).
-endif.
-endif.

+ 56
- 0
src/nifSrc/bsn/bsn_ext.erl 查看文件

@ -0,0 +1,56 @@
-module(bsn_ext).
-on_load(init/0).
-export([init/0]).
%% API
-export([hash/2, compare/2]).
-export([new/1, add/2, all/1, chains/1, in/2, count/1, clear/2]).
-define(NIF_NOT_LOADED, erlang:nif_error(nif_not_loaded)).
init() ->
erlang:load_nif(code:priv_dir('bsn')++"/bsn_ext", 0).
%% Create new resource, `CellCount' is the size of the painters' store.
new(CellsCount) ->
?NIF_NOT_LOADED.
%% Add new element.
%% If the result is a negative integer
%% then object was already added.
%% We found this object with (result) steps.
%%
%% If the result is a positive integer
%% then object was added after (result) elements.
add(Res, Bin) ->
?NIF_NOT_LOADED.
all(Res) ->
?NIF_NOT_LOADED.
chains(Res) ->
?NIF_NOT_LOADED.
%% Add new element.
%% If the result is a negative integer
%% then object was found with (-result) steps.
%%
%% If the result is a positive integer
%% then object was not found with (result) steps.
in(Res, Bin) ->
?NIF_NOT_LOADED.
%% Return the count of elements stored in this resource.
count(Res) ->
?NIF_NOT_LOADED.
%% Calculate the hash of the binary
hash(Bin, Max) ->
?NIF_NOT_LOADED.
compare(Bin1, Bin2) ->
?NIF_NOT_LOADED.
clear(Res, Bin) ->
?NIF_NOT_LOADED.

+ 45
- 0
src/nifSrc/bsn/bsn_int.erl 查看文件

@ -0,0 +1,45 @@
-module(bsn_int).
-on_load(init/0).
-export([init/0]).
%% API
-export([new/1, add/2, all/1, in/2, count/1, clear/2]).
-define(NIF_NOT_LOADED, erlang:nif_error(nif_not_loaded)).
init() ->
erlang:load_nif(code:priv_dir('bsn')++"/bsn_int", 0).
%% Create new resource, `CellCount' is the size of the painters' store.
new(CellsCount) ->
?NIF_NOT_LOADED.
%% Add new element.
%% If the result is a negative integer
%% then object was already added.
%% We found this object with (result) steps.
%%
%% If the result is a positive integer
%% then object was added after (result) elements.
add(Res, Bin) ->
?NIF_NOT_LOADED.
all(Res) ->
?NIF_NOT_LOADED.
%% Add new element.
%% If the result is a negative integer
%% then object was found with (-result) steps.
%%
%% If the result is a positive integer
%% then object was not found with (result) steps.
in(Res, Bin) ->
?NIF_NOT_LOADED.
%% Return the count of elements stored in this resource.
count(Res) ->
?NIF_NOT_LOADED.
clear(Res, Bin) ->
?NIF_NOT_LOADED.

+ 236
- 0
src/nifSrc/bsn/bsn_measure.erl 查看文件

@ -0,0 +1,236 @@
-module(bsn_measure).
-export([test/0, test2/0, test3/0, print/0]).
-export([gen/2, check_type/4]).
-export([check_type/3, get_type/3, test_type/2]).
-export([check_degrade/0, test_filled/1]).
-ifndef(TEST).
-define(TEST, e).
-endif.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
%-include_lib("triq/include/triq.hrl").
-endif.
% InOutK is (success / failure) checks.
% Return {TestCases, Elements}.
gen(ElemCount, InOutK)
when ElemCount>0 ->
Nums = lists:seq(0, erlang:round(ElemCount*100)),
filter(ElemCount, InOutK, Nums, [], []).
filter(EC, InOutK, [H|T], AllAcc, ElemAcc)
when EC>0 ->
case random:uniform() of
X when X<InOutK ->
filter(EC-1, InOutK,
T, [H|AllAcc], [H|ElemAcc]);
_X ->
filter(EC, InOutK,
T, [H|AllAcc], ElemAcc)
end;
filter(_ElemCount, _InOutK, _Acc, AllAcc, ElemAcc) ->
{AllAcc, ElemAcc}.
check_type(Type, Size, InOutK) ->
check_type(fun average/1, Type, Size, InOutK).
get_type(Type, Size, InOutK) ->
check_type(fun(X) -> X end, Type, Size, InOutK).
check_type(OutF, Type, Size, InOutK) ->
% Build resourse
F = fun() -> bsn:new(Type, Size) end,
[do_check(OutF, F, Size, InOutK, 0.1),
do_check(OutF, F, Size, InOutK, 0.25),
do_check(OutF, F, Size, InOutK, 0.5),
do_check(OutF, F, Size, InOutK, 0.75),
do_check(OutF, F, Size, InOutK, 0.9),
do_check(OutF, F, Size, InOutK, 1)].
do_check(OutF, F, Size, InOutK, CapacityK) ->
Res = F(),
ElemCount = Size * CapacityK,
{CaseList, ElemList} = gen(ElemCount, InOutK),
fill_values(Res, ElemList),
VaList = check_values(Res, CaseList, []),
{MissList, InNegList} = lists:partition(fun(X) -> X>0 end, VaList),
InList = lists:map(fun erlang:'-'/1, InNegList),
AllList = InList ++ MissList,
{CapacityK,
{size, Size},
{real_count, bsn:count(Res)},
{miss, OutF(MissList)},
{in, OutF(InList)},
{all, OutF(AllList)}}.
average([]) ->
false;
average([X|Tail]) ->
average1(Tail, X, 1).
% @private
average1([X|Tail], Sum, Count) ->
average1(Tail, Sum + X, Count + 1);
average1([], Sum, Count) ->
round4(Sum / Count).
round4(X) when is_number(X) ->
erlang:round(X * 1000) / 1000;
round4(X) ->
X.
check_values(Res, [H|T], Acc) ->
X = bsn:in(Res, integer_to_binary(H)),
check_values(Res, T, [X|Acc]);
check_values(_Res, [], Acc) ->
Acc.
fill_values(Res, [H|T]) ->
case bsn:add(Res, integer_to_binary(H)) of
no_more ->
Res;
X ->
fill_values(Res, T)
end;
fill_values(Res, []) ->
Res.
fill_values(Res, [H|T], Acc) ->
case bsn:add(Res, integer_to_binary(H)) of
no_more ->
Acc;
X ->
fill_values(Res, T, [H|Acc])
end;
fill_values(_Res, [], Acc) ->
Acc.
integer_to_binary(X) ->
erlang:list_to_binary(erlang:integer_to_list(X)).
test() ->
[{ext, check_type(ext, 100, 0.5)}
,{int_linear, check_type(int_linear, 100, 0.5)}
,{int_quadric, check_type(int_quadric, 100, 0.5)}].
%% All values.
test2() ->
[{ext, get_type(ext, 100, 0.5)}
,{int_linear, get_type(int_linear, 100, 0.5)}
,{int_quadric, get_type(int_quadric, 100, 0.5)}].
%% Counts of values.
test3() ->
F = fun anal_values/1,
[{ext, check_type(F, ext, 100, 0.5)}
,{int_linear, check_type(F, int_linear, 100, 0.5)}
,{int_quadric, check_type(F, int_quadric, 100, 0.5)}].
print() ->
do_print(test3()).
do_print([{Type, Vals}|T]) ->
io:format("Type ~w~n", [Type]),
lists:map(fun({K,
{real_count,RC},
{miss, M},
{in, I},
{all, A}}) ->
io:format("K=~w, RC=~w~n", [K, RC]),
io:format("count,miss,in,all\n"),
print_mia(lists:seq(1, 100), M, I, A),
io:format("\n")
end, Vals),
do_print(T);
do_print([]) ->
ok.
print_mia([H|T], [{H,0}|T1], [{H,0}|T2], [{H,0}|T3]) ->
print_mia(T, T1, T2, T3);
print_mia([H|T], [{H,C1}|T1], [{H,C2}|T2], [{H,C3}|T3]) ->
io:format("~w,~w,~w,~w\n", [H, C1, C2, C3]),
print_mia(T, T1, T2, T3);
print_mia([H|_]=L, [{X,_}|_]=L1, L2, L3)
when X =/= H ->
print_mia(L, [{H,0}|L1], L2, L3);
print_mia([H|_]=L, [], L2, L3) ->
print_mia(L, [{H,0}], L2, L3);
print_mia([H|_]=L, L1, [{X,_}|_]=L2, L3)
when X =/= H ->
print_mia(L, L1, [{H,0}|L2], L3);
print_mia([H|_]=L, L1, [], L3) ->
print_mia(L, L1, [{H,0}], L3);
print_mia([H|_]=L, L1, L2, L3) ->
print_mia(L, L1, L2, [{H,0}|L3]);
print_mia([], _, _, _) ->
ok.
anal_values(L) ->
do_anal(lists:sort(L), 1, []).
do_anal([H,H|T], C, Acc) ->
do_anal([H|T], C+1, Acc);
do_anal([OldH|T], C, Acc) ->
do_anal(T, 1, [{OldH, C}|Acc]);
do_anal([], C, Acc) ->
lists:reverse(Acc).
avg(L) -> do_avg(L, 0, 0).
do_avg([H|T], Cnt, Sum) ->
do_avg(T, Cnt+1, Sum+H);
do_avg([], Cnt, Sum) ->
Sum / Cnt.
check_degrade() ->
[do_check_degrade(ext)
,do_check_degrade(int_linear)
,do_check_degrade(int_quadric)
].
do_check_degrade(Type) ->
OutF = fun avg/1,
[Type,
lists:map(fun(Size) ->
F = fun() -> bsn:new(Type, Size) end,
do_check(OutF, F, Size, 0.5, 1)
end, [10, 100, 500, 1000, 5000, 10000])].
test_filled(ElemCount) ->
Res = bsn:new(ext, ElemCount),
{CaseList, ElemList} = gen(ElemCount, 1),
Vals = fill_values(Res, ElemList, []),
{bsn_ext, R} = Res,
R.
-ifdef(TEST).
do_test_() ->
[?_assert(test_type(bsn:new(ext, 100), 100))
,?_assert(test_type(bsn:new(int_linear, 100), 100))
,?_assert(test_type(bsn:new(int_quadric, 100), 100))
].
-endif.
test_type(Res, ElemCount) ->
{CaseList, ElemList} = gen(ElemCount, 1),
Vals = fill_values(Res, ElemList, []),
%Vals = ElemList,
lists:all(fun(X) -> bsn:in(Res, integer_to_binary(X)) < 0 end, Vals).

+ 160
- 0
src/nifSrc/couchdb_hqeue/hqueue.erl 查看文件

@ -0,0 +1,160 @@
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http:%www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(hqueue).
-on_load(init/0).
-export([
new/0,
new/1,
extract_max/1,
insert/3,
from_list/1,
from_list/2,
to_list/1,
heap_size/1,
info/1,
is_empty/1,
max_elems/1,
size/1,
resize_heap/2,
scale_by/2,
set_max_elems/2
]).
-define(NOT_LOADED, not_loaded(?LINE)).
-type hqueue() :: term().
-type hqueue_priority() :: float(). %% this should be non_neg_float()
-type hqueue_val() :: term().
-type hqueue_elem() :: {hqueue_priority(), hqueue_val()}.
-type hqueue_option() :: {max_elems, pos_integer()}
| {heap_size, pos_integer()}.
-type hqueue_stat() :: {max_elems, pos_integer()}
| {heap_size, pos_integer()}
| {size, non_neg_integer()}.
-export_type([hqueue/0]).
-spec new() -> {ok, hqueue()}.
new() ->
new([]).
-spec new([hqueue_option()]) -> {ok, hqueue()}.
new(_Options) ->
?NOT_LOADED.
%% Extraction order is undefined for entries with duplicate priorities
-spec extract_max(hqueue()) -> hqueue_elem() | {error, empty}.
extract_max(_HQ) ->
?NOT_LOADED.
-spec insert(hqueue(), hqueue_priority(), hqueue_val()) -> ok | {error, full}.
insert(_HQ, _Priority, _Val) ->
?NOT_LOADED.
-spec size(hqueue()) -> integer().
size(_HQ) ->
?NOT_LOADED.
-spec max_elems(hqueue()) -> integer().
max_elems(_HQ) ->
?NOT_LOADED.
%% Returns old max elems or error if NewMaxElems < size(HQ)
-spec set_max_elems(hqueue(), pos_integer()) -> pos_integer()
| {error, too_small}.
set_max_elems(_HQ, _NewMaxElems) ->
?NOT_LOADED.
-spec is_empty(hqueue()) -> boolean().
is_empty(HQ) ->
hqueue:size(HQ) =:= 0.
-spec to_list(hqueue()) -> [hqueue_elem()].
to_list(_HQ) ->
?NOT_LOADED.
-spec from_list([hqueue_elem()]) -> {ok, hqueue()}.
from_list(Elems) ->
from_list(Elems, []).
-spec from_list([hqueue_elem()], [hqueue_option()]) -> {ok, hqueue()}.
from_list(Elems, Options) ->
{ok, HQ} = ?MODULE:new(Options),
lists:foreach(fun({Priority, Val}) ->
?MODULE:insert(HQ, Priority, Val)
end, Elems),
{ok, HQ}.
-spec scale_by(hqueue(), float()) -> ok.
scale_by(_HQ, _Factor) ->
?NOT_LOADED.
%% Returns old heap size or error if NewHeapSize < size(HQ)
-spec resize_heap(hqueue(), pos_integer()) -> pos_integer()
| {error, too_small}.
resize_heap(_HQ, _NewHeapSize) ->
?NOT_LOADED.
-spec heap_size(hqueue()) -> pos_integer().
heap_size(_HQ) ->
?NOT_LOADED.
-spec info(hqueue()) -> [hqueue_stat()].
info(HQ) ->
[
{heap_size, hqueue:heap_size(HQ)},
{max_elems, hqueue:max_elems(HQ)},
{size, hqueue:size(HQ)}
].
init() ->
PrivDir = case code:priv_dir(?MODULE) of
{error, _} ->
EbinDir = filename:dirname(code:which(?MODULE)),
AppPath = filename:dirname(EbinDir),
filename:join(AppPath, "priv");
Path ->
Path
end,
erlang:load_nif(filename:join(PrivDir, "hqueue"), 0).
not_loaded(Line) ->
erlang:nif_error({not_loaded, [{module, ?MODULE}, {line, Line}]}).

+ 0
- 0
src/nifSrc/cq/cq.erl 查看文件


+ 51
- 0
src/nifSrc/enlfq/enlfq.erl 查看文件

@ -0,0 +1,51 @@
-module(enlfq).
-on_load(load_nif/0).
-define(NOT_LOADED, not_loaded(?LINE)).
%% API exports
-export([new/0, push/2, pop/1]).
%%====================================================================
%% API functions
%%====================================================================
-spec(new() -> {ok, QueueRef :: reference()} | badarg | {error, Reason :: binary()}).
new() ->
?NOT_LOADED.
-spec(push(QueueRef :: reference(), Data :: any()) ->
true | {error, Reason :: binary()}).
push(_QueueRef, _Data) ->
?NOT_LOADED.
-spec(pop(QueueRef :: reference()) ->
{ok, Data :: any()} | empty | {error, Reason :: binary()}).
pop(_QueueRef) ->
?NOT_LOADED.
%%====================================================================
%% Internal functions
%%====================================================================
%% nif functions
load_nif() ->
SoName = get_priv_path(?MODULE),
io:format(<<"Loading library: ~p ~n">>, [SoName]),
ok = erlang:load_nif(SoName, 0).
get_priv_path(File) ->
case code:priv_dir(?MODULE) of
{error, bad_name} ->
Ebin = filename:dirname(code:which(?MODULE)),
filename:join([filename:dirname(Ebin), "priv", File]);
Dir ->
filename:join(Dir, File)
end.
not_loaded(Line) ->
erlang:nif_error({not_loaded, [{module, ?MODULE}, {line, Line}]}).

+ 71
- 0
src/nifSrc/enlfq/testing/benchmark.erl 查看文件

@ -0,0 +1,71 @@
-module(benchmark).
-author("silviu.caragea").
-export([
benchmark_serial/2,
benchmark_concurrent/3
]).
benchmark_serial(Elements, MaxPriority) ->
rand:uniform(), %just to init the seed
{ok, Q} = enlfq:new(),
{T0, ok} = timer:tc(fun() -> insert_none(Elements, MaxPriority) end),
{T1, ok} = timer:tc(fun() -> insert_item(Elements, Q, MaxPriority) end),
{T2, ok} = timer:tc(fun() -> remove_item(Q) end),
T0Ms = T0/1000,
T1Ms = T1/1000,
T2Ms = T2/1000,
io:format(<<"insert overhead: ~p ms insert time: ~p ms pop time: ~p ms ~n">>, [T0Ms, T1Ms, T2Ms]).
benchmark_concurrent(Procs, Elements, MaxPriority) ->
{ok, Q} = enlfq:new(),
ElsPerProcess = round(Elements/Procs),
InsertNoneWorkFun = fun() ->
insert_none(ElsPerProcess, MaxPriority)
end,
InsertWorkFun = fun() ->
insert_item(ElsPerProcess, Q, MaxPriority)
end,
RemoveWorkFun = fun() ->
remove_item(Q)
end,
{T0, _} = timer:tc(fun()-> multi_spawn:do_work(InsertNoneWorkFun, Procs) end),
{T1, _} = timer:tc(fun()-> multi_spawn:do_work(InsertWorkFun, Procs) end),
{T2, _} = timer:tc(fun()-> multi_spawn:do_work(RemoveWorkFun, Procs) end),
T0Ms = T0/1000,
T1Ms = T1/1000,
T2Ms = T2/1000,
io:format(<<"insert overhead: ~p ms insert time: ~p ms pop time: ~p ms ~n">>, [T0Ms, T1Ms, T2Ms]).
insert_item(0, _Q, _Max) ->
ok;
insert_item(N, Q, Max) ->
%% El = rand:uniform(Max),
true = enlfq:push(Q,{}),
insert_item(N-1, Q, Max).
remove_item(Q) ->
case enlfq:pop(Q) of
empty ->
ok;
{ok, _} ->
remove_item(Q)
end.
insert_none(0, _Max) ->
ok;
insert_none(N, Max) ->
%% rand:uniform(Max),
insert_none(N-1, Max).

+ 23
- 0
src/nifSrc/enlfq/testing/multi_spawn.erl 查看文件

@ -0,0 +1,23 @@
-module(multi_spawn).
-author("silviu.caragea").
-export([do_work/2]).
do_work(Fun, Count) ->
process_flag(trap_exit, true),
spawn_childrens(Fun, Count),
wait_responses(Count).
spawn_childrens(_Fun, 0) ->
ok;
spawn_childrens(Fun, Count) ->
spawn_link(Fun),
spawn_childrens(Fun, Count -1).
wait_responses(0) ->
ok;
wait_responses(Count) ->
receive
{'EXIT',_FromPid, _Reason} ->
wait_responses(Count -1)
end.

+ 159
- 0
src/nifSrc/enq/enq.erl 查看文件

@ -0,0 +1,159 @@
%%%-----------------------------------------------------------------------------
%%% @author s@shuvatov.ru
%%% @copyright 2018 Sergei Shuvatov
%%% @doc
%%% Native implemented queue with TTL.
%%% By default queue type is FIFO and TTL is 0 (disabled), size unlimited.
%%% Usage:
%%% {ok, Q} = enq:new([fifo,
%%% {ttl, 10000}, % 10 seconds
%%% {max_size, 1000}]), % maximum 1000 elements
%%% ok = enq:push(Q, test), % push atom 'test' to the queue
%%% [test] = enq:pop(Q), % pop one element from the queue
%%% [] = enq:pop(Q), % pop returns empty list if the queue is empty
%%% % pushed item can be any term
%%% ok = enq:push(Q, fun() -> io:format("some important job~n") end),
%%% 1 = enq:size(Q), % you can take length of the queue as efficiently as O(1)
%%% @end
%%%-----------------------------------------------------------------------------
-module(enq).
-author("Sergei Shuvatov").
%% API
-export([new/0,
new/1,
push/2,
pop/1,
size/1]).
-export_type([queue/0, option/0, error/0]).
-type queue() :: reference().
-type option() :: fifo |
lifo |
{ttl, Microseconds :: non_neg_integer()} |
{max_size, Count :: non_neg_integer()}.
-type error() :: max_size.
%%==============================================================================
%% API
%%==============================================================================
%% Same as enq:new([fifo, {ttl, 0}]).
-spec new() -> {ok, enq:queue()} | {error, enq:error()}.
new() ->
new([]).
%% Returns a new queue or error in case of memory allocation error.
-spec new([option()]) -> {ok, enq:queue()} | {error, enq:error()}.
new(Options) ->
enq_nif:new(Options).
%% Pushes Item on top (LIFO) or tail (FIFO) of Queue.
-spec push(Queue :: enq:queue(), Item :: any()) -> ok | {error, enq:error()}.
push(Queue, Item) ->
enq_nif:push(Queue, erlang:term_to_binary(Item)).
%% Returns next item from the Queue.
-spec pop(Queue :: enq:queue()) -> [] | [any()].
pop(Queue) ->
[ erlang:binary_to_term(I) || I <- enq_nif:pop(Queue) ].
%% Returns Queue length. Speed does not depend on number of elements.
-spec size(Queue :: enq:queue()) -> non_neg_integer().
size(Queue) ->
enq_nif:size(Queue).
%%==============================================================================
%% Tests
%%==============================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-define(log(F, A), io:format(standard_error, "~p:line ~p: " F "~n", [?FILE, ?LINE | A])).
-define(log(F), ?log(F, [])).
fifo_test() ->
fifo_test(1000000).
fifo_test(N) ->
{ok, Q} = enq:new(),
T1 = erlang:timestamp(),
% fill the queue with N elements
fill(Q, N),
Diff1 = timer:now_diff(erlang:timestamp(), T1),
?log("FIFO fill time: ~p ms", [Diff1 / 1000]),
% ensure that size of queue matches N
N = enq:size(Q),
T2 = erlang:timestamp(),
% pop all elements
fifo_pop_all(Q, N),
Diff2 = timer:now_diff(erlang:timestamp(), T2),
?log("FIFO pop time: ~p ms", [Diff2 / 1000]),
% size of the queue must be 0
0 = enq:size(Q).
fill(_Q, 0) ->
ok;
fill(Q, N) ->
ok = enq:push(Q, N),
fill(Q, N - 1).
fifo_pop_all(Q, 0) ->
[] = enq:pop(Q);
fifo_pop_all(Q, N) ->
[N] = enq:pop(Q),
fifo_pop_all(Q, N - 1).
ttl_test() ->
{ok, Q} = enq:new([{ttl, 100}]),
enq:push(Q, test),
timer:sleep(95),
[test] = enq:pop(Q),
[] = enq:pop(Q),
enq:push(Q, test),
timer:sleep(105),
[] = enq:pop(Q).
lifo_test() ->
lifo_test(1000000).
lifo_test(N) ->
{ok, Q} = enq:new([lifo]),
T1 = erlang:timestamp(),
% fill the queue with N elements
fill(Q, N),
Diff1 = timer:now_diff(erlang:timestamp(), T1),
?log("LIFO fill time: ~p ms", [Diff1 / 1000]),
% ensure that size of queue matches N
N = enq:size(Q),
T2 = erlang:timestamp(),
% pop all elements
lifo_pop_all(Q, N),
Diff2 = timer:now_diff(erlang:timestamp(), T2),
?log("LIFO pop time: ~p ms", [Diff2 / 1000]),
% size of the queue must be 0
0 = enq:size(Q).
lifo_pop_all(Q, N) ->
lifo_pop_all(Q, 1, N).
lifo_pop_all(Q, I, N) when I > N ->
[] = enq:pop(Q);
lifo_pop_all(Q, I, N) ->
[I] = enq:pop(Q),
lifo_pop_all(Q, I + 1, N).
max_size_test() ->
{ok, Q} = enq:new([{ttl, 100}, {max_size, 1}]),
ok = enq:push(Q, test),
timer:sleep(50),
{error, max_size} = enq:push(Q, 123),
timer:sleep(55),
ok = enq:push(Q, 321),
[321] = enq:pop(Q),
[] = enq:pop(Q).
-endif. % TEST

+ 63
- 0
src/nifSrc/enq/enq_nif.erl 查看文件

@ -0,0 +1,63 @@
%%%-------------------------------------------------------------------
%%% @author s@shuvatov.ru
%%% @copyright 2018 Sergei Shuvatov
%%%-------------------------------------------------------------------
-module(enq_nif).
-author("Sergei Shuvatov").
%% API
-export([new/1,
push/2,
pop/1,
size/1]).
-on_load(load_nif/0).
-define(app, enq).
-define(log(F, A), io:format(standard_error, "~p:~p: " F, [?MODULE, ?LINE | A])).
-define(not_loaded(), not_loaded(?LINE)).
%%==============================================================================
%% API
%%==============================================================================
new(_Options) ->
?not_loaded().
push(_Queue, _Item) ->
?not_loaded().
pop(_Queue) ->
?not_loaded().
size(_Queue) ->
?not_loaded().
%%==============================================================================
%% Internal functions
%%==============================================================================
load_nif() ->
SoName = get_priv_path(?MODULE),
% ?log("Loading library: ~p ~n", [SoName]),
ok = erlang:load_nif(SoName, 0).
get_priv_path(File) ->
case code:priv_dir(get_app()) of
{error, bad_name} ->
Ebin = filename:dirname(code:which(?MODULE)),
filename:join([filename:dirname(Ebin), "priv", File]);
Dir ->
filename:join(Dir, File)
end.
get_app() ->
case application:get_application(?MODULE) of
{ok, App} ->
App;
_ ->
?app
end.
not_loaded(Line) ->
erlang:nif_error({not_loaded, [{module, ?MODULE}, {line, Line}]}).

+ 103
- 0
src/nifSrc/etsq/etsq.erl 查看文件

@ -0,0 +1,103 @@
%% @author vinod
%% @doc @todo Add description to ets_queue.
-module(etsq).
-on_load(load_nif/0).
-export([load_nif/0,
new/1,
info/1,
push/2,
pop/1,
front/1]).
%% ====================================================================
%% API functions
%% ====================================================================
-define(LIB_BASE_NAME, "etsq").
-define(LIB_NIF_VSN, 1).
-define(LIB_APP_NAME, etsq).
-spec new(atom()) -> ok | {error, already_exists}.
new(_Name) ->
erlang:nif_error({nif_not_loaded,module,?MODULE,line,?LINE}).
-spec info(atom()) -> ok.
info(_Name) ->
erlang:nif_error({nif_not_loaded,module,?MODULE,line,?LINE}).
-spec push(atom(), term()) -> ok.
push(Name, Term) ->
push_back(Name, term_to_binary(Term)).
-spec pop(atom()) -> ok | {error, empty}.
pop(Name) ->
get_val(pop_front(Name)).
-spec front(atom()) -> ok | {error, empty}.
front(Name) ->
get_val(get_front(Name)).
get_val(Value) when is_binary(Value) ->
binary_to_term(Value);
get_val(Value) ->
Value.
push_back(_Name, _Bin) ->
erlang:nif_error({nif_not_loaded,module,?MODULE,line,?LINE}).
pop_front(_Name) ->
erlang:nif_error({nif_not_loaded,module,?MODULE,line,?LINE}).
get_front(_Name) ->
erlang:nif_error({nif_not_loaded,module,?MODULE,line,?LINE}).
-spec load_nif() -> ok | {error, term()}.
load_nif() ->
LibBaseName = ?LIB_BASE_NAME,
PrivDir = code:priv_dir(etsq),
LibName = case erlang:system_info(build_type) of
opt ->
LibBaseName;
Type ->
LibTypeName = LibBaseName ++ "." ++ atom_to_list(Type),
case (filelib:wildcard(
filename:join(
[PrivDir,
"lib",
LibTypeName ++ "*"])) /= []) orelse
(filelib:wildcard(
filename:join(
[PrivDir,
"lib",
erlang:system_info(system_architecture),
LibTypeName ++ "*"])) /= []) of
true -> LibTypeName;
false -> LibBaseName
end
end,
Lib = filename:join([PrivDir, "lib", LibName]),
Status = case erlang:load_nif(Lib, ?LIB_NIF_VSN) of
ok -> ok;
{error, {load_failed, _}}=Error1 ->
ArchLibDir =
filename:join([PrivDir, "lib",
erlang:system_info(system_architecture)]),
Candidate =
filelib:wildcard(filename:join([ArchLibDir,LibName ++ "*" ])),
case Candidate of
[] -> Error1;
_ ->
ArchLib = filename:join([ArchLibDir, LibName]),
erlang:load_nif(ArchLib, ?LIB_NIF_VSN)
end;
Error1 -> Error1
end,
case Status of
ok -> ok;
{error, {E, Str}} ->
error_logger:error_msg("Unable to load ~p nif library. "
"Failed with error:~n\"~p, ~s\"~n", [?LIB_APP_NAME, E, Str]),
Status
end.

+ 65
- 0
src/nifSrc/etsq/etsq_tests.erl 查看文件

@ -0,0 +1,65 @@
%% @author vinod
%% @doc @todo Add description to etsq_tests.
-module(etsq_tests).
-compile(export_all).
-export([init/0,
time/3,
stats/3]).
-type microseconds() :: pos_integer().
-type milliseconds() :: pos_integer().
%% ====================================================================
%% API functions
%% ====================================================================
init() ->
etsq:new(queue),
ets:new(tab, [named_table, public]).
-spec time(run_ets | run_queue, pos_integer()) -> microseconds().
time(Op, NumOp) ->
{Time, _} = timer:tc(?MODULE, Op, [NumOp]),
Time.
-spec time(pos_integer(), run_ets | run_queue, pos_integer()) -> microseconds().
time(NumProc, Op, NumOp) ->
{Time, _} = timer:tc(?MODULE, spawn, [NumProc, Op, NumOp]),
Time.
-spec stats(run_ets | run_queue, pos_integer()) -> milliseconds().
stats(Op, NumOp) ->
erlang:statistics(runtime),
?MODULE:Op(NumOp),
{_, Time} = erlang:statistics(runtime),
Time.
-spec stats(pos_integer(), run_ets | run_queue, pos_integer()) -> milliseconds().
stats(NumProc, Op, NumOp) ->
erlang:statistics(runtime),
?MODULE:spawn(NumProc, Op, NumOp),
{_, Time} = erlang:statistics(runtime),
Time.
run_ets(Num) ->
Self = self(),
Data = lists:seq(1, 100),
L = lists:seq(1, Num),
[ets:insert(tab, {{Self, K}, Data}) || K <- L],
[ets:take(tab, {Self, K}) || K <- L].
run_queue(Num) ->
Self = self(),
Data = lists:seq(1, 100),
L = lists:seq(1, Num),
[etsq:push(queue, {{Self, K}, Data}) || K <- L],
[etsq:pop(queue) || _ <- L].
spawn(NumProc, Op, NumOp) ->
Pid = self(),
L = lists:seq(1, NumProc),
[spawn_link(fun() -> ?MODULE:Op(NumOp), Pid ! done end) || _ <- L],
[receive done -> ok end || _ <- L].

+ 102
- 0
src/nifSrc/gb_lru/btree_lru.erl 查看文件

@ -0,0 +1,102 @@
-module(btree_lru).
-export([create/1,
close/1,
register_pid/2,
unregister_pid/1,
get_registered_pid/1,
set_max_size/2,
get_max_size/1,
get_size/1,
write/2,
write/3,
read/2,
next/2,
prev/2,
remove/2,
seek/2,
iterate_next/2,
oldest/1,
latest/1,
last/1,
first/1]).
-on_load(init/0).
init() ->
Dir = "../priv",
PrivDir =
case code:priv_dir(?MODULE) of
{error, _} ->
case code:which(?MODULE) of
Filename when is_list(Filename) ->
filename:join([filename:dirname(Filename), Dir]);
_ ->
Dir
end;
Path -> Path
end,
Lib = filename:join(PrivDir, "btreelru_nif"),
erlang:load_nif(Lib, 0).
write(Tab, {Key, Value}) ->
write(Tab, Key, Value).
create(_Maxsize) ->
erlang:nif_error(nif_library_not_loaded).
register_pid(_Tab, _Pid) ->
erlang:nif_error(nif_library_not_loaded).
unregister_pid(_Tab) ->
erlang:nif_error(nif_library_not_loaded).
get_registered_pid(_Tab) ->
erlang:nif_error(nif_library_not_loaded).
set_max_size(_Tab, _MaxSize) ->
erlang:nif_error(nif_library_not_loaded).
get_max_size(_Tab) ->
erlang:nif_error(nif_library_not_loaded).
get_size(_Tab) ->
erlang:nif_error(nif_library_not_loaded).
write(_Tab, _Key, _Value) ->
erlang:nif_error(nif_library_not_loaded).
read(_Tab, _Key) ->
erlang:nif_error(nif_library_not_loaded).
next(_Tab, _Key) ->
erlang:nif_error(nif_library_not_loaded).
prev(_Tab, _Key) ->
erlang:nif_error(nif_library_not_loaded).
remove(_Tab, _Key) ->
erlang:nif_error(nif_library_not_loaded).
seek(_Tab, _Key) ->
erlang:nif_error(nif_library_not_loaded).
iterate_next(_Tab, _It) ->
erlang:nif_error(nif_library_not_loaded).
oldest(_Tab) ->
erlang:nif_error(nif_library_not_loaded).
latest(_Tab) ->
erlang:nif_error(nif_library_not_loaded).
close(_Tab) ->
erlang:nif_error(nif_library_not_loaded).
last(_Tab) ->
erlang:nif_error(nif_library_not_loaded).
first(_Tab) ->
erlang:nif_error(nif_library_not_loaded).

+ 59
- 0
src/nifSrc/gb_lru/btree_lru_test.erl 查看文件

@ -0,0 +1,59 @@
-module(btree_lru_test).
-compile(export_all).
-export([create/0,
create/1]).
create() ->
create(1024*1024*1024*1000).
create(Size) ->
{ok, _Tab} = btree_lru:create(Size).
write(Tab) ->
Objs = [{X,X} || X <- lists:seq(1,10000000)],
write(Tab, Objs).
write(Tab, [Obj | Objs]) ->
ok = btree_lru:write(Tab, Obj),
write(Tab, Objs);
write(_Tab, []) ->
ok.
read(Tab, [{K,D} | Objs]) ->
{K,D} = btree_lru:read(Tab, K),
read(Tab, Objs);
read(_Tab, []) ->
ok.
timing_write(Tab) ->
Objs = [{X,X} || X <- lists:seq(1,10000000)],
timer:tc(?MODULE, write, [Tab, Objs]).
timing_read(Tab) ->
Objs = [{X,X} || X <- lists:seq(1,10000000)],
timer:tc(?MODULE, read, [Tab, Objs]).
timing_ets_write(Tab) ->
Objs = [{X,X} || X <- lists:seq(1,10000000)],
timer:tc(?MODULE, ets_write, [Tab, Objs]).
timing_ets_read(Tab) ->
Objs = [{X,X} || X <- lists:seq(1,10000000)],
timer:tc(?MODULE, ets_read, [Tab, Objs]).
ets_write(Tab, [Obj | Objs]) ->
true = ets:insert(Tab, Obj),
ets_write(Tab, Objs);
ets_write(_Tab, []) ->
ok.
ets_read(Tab, [{K,D} | Objs]) ->
[{K,D}] = ets:lookup(Tab, K),
ets_read(Tab, Objs);
ets_read(_Tab, []) ->
ok.

+ 6
- 0
src/nifSrc/gb_lru/gb_lru.app.src 查看文件

@ -0,0 +1,6 @@
{application, gb_lru,
[{description, "gb_lru"},
{vsn, "0.1"},
{registered, []},
{applications, []}
]}.

+ 19
- 0
src/nifSrc/native_array/native_array.erl 查看文件

@ -0,0 +1,19 @@
-module(native_array).
-export([new/2, get/2, put/3, delete/1]).
-on_load(init/0).
init() ->
ok = erlang:load_nif("./native_array_nif", 0).
new(_Idx, _Length) ->
exit(nif_library_not_loaded).
get(_Idx, _Offset) ->
exit(nif_library_not_loaded).
put(_Idx, _Offset, _NewVal) ->
exit(nif_library_not_loaded).
delete(_Idx) ->
exit(nif_library_not_loaded).

Loading…
取消
儲存