clearing the mempool can now keep more then a single element reserved.

This commit is contained in:
2013-08-25 16:16:38 +00:00
parent 28243b2e5f
commit e2bd3a4644
6 changed files with 120 additions and 50 deletions

View File

@@ -113,7 +113,7 @@ static void free_treehash_group(void *key) {
void *BKE_treehash_rebuild_from_treestore(void *treehash, BLI_mempool *treestore)
{
BLI_ghash_clear(treehash, NULL, free_treehash_group);
BLI_ghash_clear_ex(treehash, NULL, free_treehash_group, BLI_mempool_count(treestore));
fill_treehash(treehash, treestore);
return treehash;
}

View File

@@ -68,6 +68,8 @@ void *BLI_ghash_lookup(GHash *gh, const void *key);
void **BLI_ghash_lookup_p(GHash *gh, const void *key);
bool BLI_ghash_remove(GHash *gh, void *key, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp);
void BLI_ghash_clear(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp);
void BLI_ghash_clear_ex(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
const unsigned int nentries_reserve);
void *BLI_ghash_pop(GHash *gh, void *key, GHashKeyFreeFP keyfreefp);
bool BLI_ghash_haskey(GHash *gh, const void *key);
int BLI_ghash_size(GHash *gh);

View File

@@ -73,6 +73,12 @@ void BLI_mempool_free(BLI_mempool *pool, void *addr)
__attribute__((nonnull(1, 2)))
#endif
;
void BLI_mempool_clear_ex(BLI_mempool *pool,
const int totelem_reserve)
#ifdef __GNUC__
__attribute__((nonnull(1)))
#endif
;
void BLI_mempool_clear(BLI_mempool *pool)
#ifdef __GNUC__
__attribute__((nonnull(1)))

View File

@@ -95,6 +95,16 @@ BLI_INLINE bool ghash_test_expand_buckets(const unsigned int nentries, const uns
return (nentries > nbuckets * 3);
}
/**
* Increase initial bucket size to match a reserved ammount.
*/
BLI_INLINE void ghash_buckets_reserve(GHash *gh, const unsigned int nentries_reserve)
{
while (ghash_test_expand_buckets(nentries_reserve, gh->nbuckets)) {
gh->nbuckets = hashsizes[++gh->cursize];
}
}
/**
* Get the hash for a key.
*/
@@ -197,9 +207,7 @@ GHash *BLI_ghash_new_ex(GHashHashFP hashfp, GHashCmpFP cmpfp, const char *info,
/* if we have reserved the number of elements that this hash will contain */
if (nentries_reserve) {
while (ghash_test_expand_buckets(nentries_reserve, gh->nbuckets)) {
gh->nbuckets = hashsizes[++gh->cursize];
}
ghash_buckets_reserve(gh, nentries_reserve);
}
gh->buckets = MEM_callocN(gh->nbuckets * sizeof(*gh->buckets), "buckets");
@@ -375,8 +383,10 @@ bool BLI_ghash_haskey(GHash *gh, const void *key)
*
* \param keyfreefp Optional callback to free the key.
* \param valfreefp Optional callback to free the value.
* \param nentries_reserve Optionally reserve the number of members that the hash will hold.
*/
void BLI_ghash_clear(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
void BLI_ghash_clear_ex(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp,
const unsigned int nentries_reserve)
{
unsigned int i;
@@ -395,14 +405,26 @@ void BLI_ghash_clear(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfree
}
}
gh->cursize = 0;
gh->nbuckets = hashsizes[0]; /* gh->cursize */
gh->nentries = 0;
gh->nbuckets = hashsizes[gh->cursize];
gh->cursize = 0;
if (nentries_reserve) {
ghash_buckets_reserve(gh, nentries_reserve);
}
MEM_freeN(gh->buckets);
gh->buckets = MEM_callocN(gh->nbuckets * sizeof(*gh->buckets), "buckets");
BLI_mempool_clear(gh->entrypool);
BLI_mempool_clear_ex(gh->entrypool, nentries_reserve ? (int)nentries_reserve : -1);
}
/**
* Wraps #BLI_ghash_clear_ex with zero entries reserved.
*/
void BLI_ghash_clear(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
{
BLI_ghash_clear_ex(gh, keyfreefp, valfreefp, 0);
}
/**

View File

@@ -87,6 +87,7 @@ struct BLI_mempool {
/* keeps aligned to 16 bits */
BLI_freenode *free; /* free element list. Interleaved into chunk datas. */
int maxchunks; /* use to know how many chunks to keep for BLI_mempool_clear */
int totused; /* number of elements currently in use */
#ifdef USE_TOTALLOC
int totalloc; /* number of elements allocated in total */
@@ -101,6 +102,14 @@ struct BLI_mempool {
# define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1))
#endif
/**
* \return the number of chunks to allocate based on how many elements are needed.
*/
BLI_INLINE int mempool_maxchunks(const int totelem, const int pchunk)
{
return totelem / pchunk + 1;
}
static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool)
{
BLI_mempool_chunk *mpchunk;
@@ -183,35 +192,37 @@ static BLI_freenode *mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpc
return curnode;
}
static void mempool_chunk_free_all(BLI_mempool *pool)
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk, const int flag)
{
if (flag & BLI_MEMPOOL_SYSMALLOC) {
#ifdef USE_DATA_PTR
free(CHUNK_DATA(mpchunk));
#endif
free(mpchunk);
}
else {
#ifdef USE_DATA_PTR
MEM_freeN(CHUNK_DATA(mpchunk));
#endif
MEM_freeN(mpchunk);
}
}
static void mempool_chunk_free_all(ListBase *chunks, const int flag)
{
BLI_mempool_chunk *mpchunk, *mpchunk_next;
if (pool->flag & BLI_MEMPOOL_SYSMALLOC) {
for (mpchunk = pool->chunks.first; mpchunk; mpchunk = mpchunk_next) {
mpchunk_next = mpchunk->next;
#ifdef USE_DATA_PTR
free(CHUNK_DATA(mpchunk));
#endif
free(mpchunk);
}
for (mpchunk = chunks->first; mpchunk; mpchunk = mpchunk_next) {
mpchunk_next = mpchunk->next;
mempool_chunk_free(mpchunk, flag);
}
else {
for (mpchunk = pool->chunks.first; mpchunk; mpchunk = mpchunk_next) {
mpchunk_next = mpchunk->next;
#ifdef USE_DATA_PTR
MEM_freeN(CHUNK_DATA(mpchunk));
#endif
MEM_freeN(mpchunk);
}
}
pool->chunks.first = pool->chunks.last = NULL;
chunks->first = chunks->last = NULL;
}
BLI_mempool *BLI_mempool_create(int esize, int totelem, int pchunk, int flag)
{
BLI_mempool *pool = NULL;
BLI_freenode *lasttail = NULL, *curnode = NULL;
BLI_freenode *lasttail = NULL;
int i, maxchunks;
/* allocate the pool structure */
@@ -234,29 +245,23 @@ BLI_mempool *BLI_mempool_create(int esize, int totelem, int pchunk, int flag)
pool->esize = esize;
}
maxchunks = mempool_maxchunks(totelem, pchunk);
pool->flag = flag;
pool->pchunk = pchunk;
pool->csize = esize * pchunk;
pool->chunks.first = pool->chunks.last = NULL;
pool->free = NULL; /* mempool_chunk_add assigns */
pool->maxchunks = maxchunks;
#ifdef USE_TOTALLOC
pool->totalloc = 0;
#endif
pool->totused = 0;
maxchunks = totelem / pchunk + 1;
if (maxchunks == 0) {
maxchunks = 1;
}
/* allocate the actual chunks */
for (i = 0; i < maxchunks; i++) {
BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
curnode = mempool_chunk_add(pool, mpchunk, lasttail);
/* set the end of this chunks memory to the new tail for next iteration */
lasttail = curnode;
lasttail = mempool_chunk_add(pool, mpchunk, lasttail);
}
return pool;
@@ -338,7 +343,7 @@ void BLI_mempool_free(BLI_mempool *pool, void *addr)
BLI_mempool_chunk *first = pool->chunks.first;
BLI_remlink(&pool->chunks, first);
mempool_chunk_free_all(pool);
mempool_chunk_free_all(&pool->chunks, pool->flag);
BLI_addtail(&pool->chunks, first);
#ifdef USE_TOTALLOC
pool->totalloc = pool->pchunk;
@@ -504,22 +509,49 @@ void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
#endif
void BLI_mempool_clear(BLI_mempool *pool)
void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
{
BLI_mempool_chunk *first = pool->chunks.first;
BLI_mempool_chunk *mpchunk;
BLI_mempool_chunk *mpchunk_next;
int maxchunks;
BLI_remlink(&pool->chunks, first);
ListBase chunks_temp;
BLI_freenode *lasttail = NULL;
mempool_chunk_free_all(pool);
if (totelem_reserve == -1) {
maxchunks = pool->maxchunks;
}
else {
maxchunks = mempool_maxchunks(totelem_reserve, pool->pchunk);
}
/* important for re-initializing */
/* free all after pool->maxchunks */
for (mpchunk = BLI_findlink(&pool->chunks, maxchunks); mpchunk; mpchunk = mpchunk_next) {
mpchunk_next = mpchunk->next;
BLI_remlink(&pool->chunks, mpchunk);
mempool_chunk_free(mpchunk, pool->flag);
}
/* re-initialize */
pool->free = NULL;
pool->totused = 0;
#ifdef USE_TOTALLOC
pool->totalloc = 0;
#endif
pool->free = NULL;
mempool_chunk_add(pool, first, NULL);
chunks_temp = pool->chunks;
pool->chunks.first = pool->chunks.last = NULL;
while ((mpchunk = chunks_temp.first)) {
BLI_remlink(&chunks_temp, mpchunk);
lasttail = mempool_chunk_add(pool, mpchunk, lasttail);
}
}
void BLI_mempool_clear(BLI_mempool *pool)
{
return BLI_mempool_clear_ex(pool, -1);
}
/**
@@ -527,7 +559,7 @@ void BLI_mempool_clear(BLI_mempool *pool)
*/
void BLI_mempool_destroy(BLI_mempool *pool)
{
mempool_chunk_free_all(pool);
mempool_chunk_free_all(&pool->chunks, pool->flag);
if (pool->flag & BLI_MEMPOOL_SYSMALLOC) {
free(pool);

View File

@@ -90,6 +90,16 @@ BLI_INLINE bool edgehash_test_expand_buckets(const unsigned int nentries, const
return (nentries > nbuckets * 3);
}
/**
* Increase initial bucket size to match a reserved ammount.
*/
BLI_INLINE void edgehash_buckets_reserve(EdgeHash *eh, const unsigned int nentries_reserve)
{
while (edgehash_test_expand_buckets(nentries_reserve, eh->nbuckets)) {
eh->nbuckets = _ehash_hashsizes[++eh->cursize];
}
}
BLI_INLINE unsigned int edgehash_keyhash(EdgeHash *eh, unsigned int v0, unsigned int v1)
{
BLI_assert(v0 < v1);
@@ -176,9 +186,7 @@ EdgeHash *BLI_edgehash_new_ex(const char *info,
/* if we have reserved the number of elements that this hash will contain */
if (nentries_reserve) {
while (edgehash_test_expand_buckets(nentries_reserve, eh->nbuckets)) {
eh->nbuckets = _ehash_hashsizes[++eh->cursize];
}
edgehash_buckets_reserve(eh, nentries_reserve);
}
eh->buckets = MEM_callocN(eh->nbuckets * sizeof(*eh->buckets), "eh buckets 2");