multibit, fatbit: make _size build-time only

This commit makes mmbit_size() and fatbit_size compile-time only, and
adds a resource limit for very large multibits.
This commit is contained in:
Justin Viiret
2016-12-05 16:20:52 +11:00
committed by Matthew Barr
parent 8b7b06d2a4
commit e271781d95
19 changed files with 201 additions and 129 deletions

View File

@@ -43,17 +43,19 @@
#include "nfa/nfa_api_queue.h"
#include "rose/rose_internal.h"
#include "util/fatbit.h"
#include "util/multibit.h"
/**
* Determine the space required for a correctly aligned array of fatbit
* structure, laid out as:
*
* - an array of num_entries pointers, each to a fatbit.
* - an array of fatbit structures, each of size fatbit_size(num_keys).
* - an array of fatbit structures, each of size fatbit_len.
*
* fatbit_len should have been determined at compile time, via the
* fatbit_size() call.
*/
static
size_t fatbit_array_size(u32 num_entries, u32 num_keys) {
size_t fatbit_array_size(u32 num_entries, u32 fatbit_len) {
size_t len = 0;
// Array of pointers to each fatbit entry.
@@ -61,7 +63,7 @@ size_t fatbit_array_size(u32 num_entries, u32 num_keys) {
// Fatbit entries themselves.
len = ROUNDUP_N(len, alignof(struct fatbit));
len += (size_t)fatbit_size(num_keys) * num_entries;
len += (size_t)fatbit_len * num_entries;
return ROUNDUP_N(len, 8); // Round up for potential padding.
}
@@ -71,17 +73,19 @@ size_t fatbit_array_size(u32 num_entries, u32 num_keys) {
static
hs_error_t alloc_scratch(const hs_scratch_t *proto, hs_scratch_t **scratch) {
u32 queueCount = proto->queueCount;
u32 deduperCount = proto->deduper.log_size;
u32 activeQueueArraySize = proto->activeQueueArraySize;
u32 deduperCount = proto->deduper.dkey_count;
u32 deduperLogSize = proto->deduper.log_size;
u32 bStateSize = proto->bStateSize;
u32 tStateSize = proto->tStateSize;
u32 fullStateSize = proto->fullStateSize;
u32 anchored_literal_region_len = proto->anchored_literal_region_len;
u32 anchored_literal_region_width = proto->anchored_literal_count;
u32 anchored_literal_fatbit_size = proto->anchored_literal_fatbit_size;
u32 som_store_size = proto->som_store_count * sizeof(u64a);
u32 som_attempted_store_size = proto->som_store_count * sizeof(u64a);
u32 som_now_size = fatbit_size(proto->som_store_count);
u32 som_attempted_size = fatbit_size(proto->som_store_count);
u32 som_now_size = proto->som_fatbit_size;
u32 som_attempted_size = proto->som_fatbit_size;
struct hs_scratch *s;
struct hs_scratch *s_tmp;
@@ -91,18 +95,18 @@ hs_error_t alloc_scratch(const hs_scratch_t *proto, hs_scratch_t **scratch) {
assert(anchored_literal_region_len < 8 * sizeof(s->al_log_sum));
size_t anchored_literal_region_size = fatbit_array_size(
anchored_literal_region_len, anchored_literal_region_width);
anchored_literal_region_len, proto->anchored_literal_fatbit_size);
size_t delay_region_size =
fatbit_array_size(DELAY_SLOT_COUNT, proto->delay_count);
fatbit_array_size(DELAY_SLOT_COUNT, proto->delay_fatbit_size);
// the size is all the allocated stuff, not including the struct itself
size_t size = queue_size + 63
+ bStateSize + tStateSize
+ fullStateSize + 63 /* cacheline padding */
+ fatbit_size(proto->handledKeyCount) /* handled roles */
+ fatbit_size(queueCount) /* active queue array */
+ 2 * fatbit_size(deduperCount) /* need odd and even logs */
+ 2 * fatbit_size(deduperCount) /* ditto som logs */
+ proto->handledKeyFatbitSize /* handled roles */
+ activeQueueArraySize /* active queue array */
+ 2 * deduperLogSize /* need odd and even logs */
+ 2 * deduperLogSize /* ditto som logs */
+ 2 * sizeof(u64a) * deduperCount /* start offsets for som */
+ anchored_literal_region_size + qmpq_size
+ delay_region_size
@@ -157,7 +161,7 @@ hs_error_t alloc_scratch(const hs_scratch_t *proto, hs_scratch_t **scratch) {
for (u32 i = 0; i < DELAY_SLOT_COUNT; i++) {
s->delay_slots[i] = (struct fatbit *)current;
assert(ISALIGNED(s->delay_slots[i]));
current += fatbit_size(proto->delay_count);
current += proto->delay_fatbit_size;
}
current = ROUNDUP_PTR(current, alignof(struct fatbit *));
@@ -167,7 +171,7 @@ hs_error_t alloc_scratch(const hs_scratch_t *proto, hs_scratch_t **scratch) {
for (u32 i = 0; i < anchored_literal_region_len; i++) {
s->al_log[i] = (struct fatbit *)current;
assert(ISALIGNED(s->al_log[i]));
current += fatbit_size(anchored_literal_region_width);
current += anchored_literal_fatbit_size;
}
current = ROUNDUP_PTR(current, 8);
@@ -193,22 +197,22 @@ hs_error_t alloc_scratch(const hs_scratch_t *proto, hs_scratch_t **scratch) {
assert(ISALIGNED_N(current, 8));
s->aqa = (struct fatbit *)current;
current += fatbit_size(queueCount);
current += activeQueueArraySize;
s->handled_roles = (struct fatbit *)current;
current += fatbit_size(proto->handledKeyCount);
current += proto->handledKeyFatbitSize;
s->deduper.log[0] = (struct fatbit *)current;
current += fatbit_size(deduperCount);
current += deduperLogSize;
s->deduper.log[1] = (struct fatbit *)current;
current += fatbit_size(deduperCount);
current += deduperLogSize;
s->deduper.som_log[0] = (struct fatbit *)current;
current += fatbit_size(deduperCount);
current += deduperLogSize;
s->deduper.som_log[1] = (struct fatbit *)current;
current += fatbit_size(deduperCount);
current += deduperLogSize;
s->som_set_now = (struct fatbit *)current;
current += som_now_size;
@@ -293,19 +297,19 @@ hs_error_t hs_alloc_scratch(const hs_database_t *db, hs_scratch_t **scratch) {
proto->anchored_literal_region_len = rose->anchoredDistance;
}
if (rose->anchored_count > proto->anchored_literal_count) {
if (rose->anchored_fatbit_size > proto->anchored_literal_fatbit_size) {
resize = 1;
proto->anchored_literal_count = rose->anchored_count;
proto->anchored_literal_fatbit_size = rose->anchored_fatbit_size;
}
if (rose->delay_count > proto->delay_count) {
if (rose->delay_fatbit_size > proto->delay_fatbit_size) {
resize = 1;
proto->delay_count = rose->delay_count;
proto->delay_fatbit_size = rose->delay_fatbit_size;
}
if (rose->handledKeyCount > proto->handledKeyCount) {
if (rose->handledKeyFatbitSize > proto->handledKeyFatbitSize) {
resize = 1;
proto->handledKeyCount = rose->handledKeyCount;
proto->handledKeyFatbitSize = rose->handledKeyFatbitSize;
}
if (rose->tStateSize > proto->tStateSize) {
@@ -319,12 +323,22 @@ hs_error_t hs_alloc_scratch(const hs_database_t *db, hs_scratch_t **scratch) {
proto->som_store_count = som_store_count;
}
if (rose->somLocationFatbitSize > proto->som_fatbit_size) {
resize = 1;
proto->som_fatbit_size = rose->somLocationFatbitSize;
}
u32 queueCount = rose->queueCount;
if (queueCount > proto->queueCount) {
resize = 1;
proto->queueCount = queueCount;
}
if (rose->activeQueueArraySize > proto->activeQueueArraySize) {
resize = 1;
proto->activeQueueArraySize = rose->activeQueueArraySize;
}
u32 bStateSize = 0;
if (rose->mode == HS_MODE_BLOCK) {
bStateSize = rose->stateOffsets.end;
@@ -344,9 +358,10 @@ hs_error_t hs_alloc_scratch(const hs_database_t *db, hs_scratch_t **scratch) {
proto->fullStateSize = fullStateSize;
}
if (rose->dkeyCount > proto->deduper.log_size) {
if (rose->dkeyCount > proto->deduper.dkey_count) {
resize = 1;
proto->deduper.log_size = rose->dkeyCount;
proto->deduper.dkey_count = rose->dkeyCount;
proto->deduper.log_size = rose->dkeyLogSize;
}
if (resize) {