2011-02-23 10:52:22 +00:00
|
|
|
/*
|
2008-07-04 17:32:55 +00:00
|
|
|
* ***** BEGIN GPL LICENSE BLOCK *****
|
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
2010-02-12 13:34:04 +00:00
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
2008-07-04 17:32:55 +00:00
|
|
|
*
|
|
|
|
|
* The Original Code is Copyright (C) 2008 by Blender Foundation.
|
|
|
|
|
* All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* The Original Code is: all of this file.
|
|
|
|
|
*
|
2011-11-16 19:31:42 +00:00
|
|
|
* Contributor(s): Geoffery Bantle
|
2008-07-04 17:32:55 +00:00
|
|
|
*
|
|
|
|
|
* ***** END GPL LICENSE BLOCK *****
|
|
|
|
|
*/
|
|
|
|
|
|
2011-02-27 20:37:56 +00:00
|
|
|
/** \file blender/blenlib/intern/BLI_mempool.c
|
|
|
|
|
* \ingroup bli
|
2013-01-19 03:16:52 +00:00
|
|
|
*
|
2011-11-16 19:31:42 +00:00
|
|
|
* Simple, fast memory allocator for allocating many elements of the same size.
|
|
|
|
|
*/
|
|
|
|
|
|
2013-09-01 00:46:04 +00:00
|
|
|
#include <string.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
#include "BLI_utildefines.h"
|
|
|
|
|
|
|
|
|
|
#include "BLI_mempool.h" /* own include */
|
|
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
#include "MEM_guardedalloc.h"
|
2011-11-16 19:31:42 +00:00
|
|
|
|
2013-09-03 04:39:12 +00:00
|
|
|
#include "BLI_strict_flags.h" /* keep last */
|
|
|
|
|
|
2013-10-03 12:22:44 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
# include "valgrind/memcheck.h"
|
|
|
|
|
#endif
|
2013-09-03 04:39:12 +00:00
|
|
|
|
2012-09-03 22:04:14 +00:00
|
|
|
/* note: copied from BLO_blend_defs.h, don't use here because we're in BLI */
|
2011-11-16 19:31:42 +00:00
|
|
|
#ifdef __BIG_ENDIAN__
|
|
|
|
|
/* Big Endian */
|
2012-05-12 15:13:06 +00:00
|
|
|
# define MAKE_ID(a, b, c, d) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d) )
|
2011-11-16 19:31:42 +00:00
|
|
|
#else
|
|
|
|
|
/* Little Endian */
|
2012-05-12 15:13:06 +00:00
|
|
|
# define MAKE_ID(a, b, c, d) ( (int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a) )
|
2011-11-16 19:31:42 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#define FREEWORD MAKE_ID('f', 'r', 'e', 'e')
|
2008-07-04 17:32:55 +00:00
|
|
|
|
2013-02-17 05:16:48 +00:00
|
|
|
/* currently totalloc isnt used */
|
|
|
|
|
// #define USE_TOTALLOC
|
2013-08-30 21:32:57 +00:00
|
|
|
|
|
|
|
|
/* when undefined, merge the allocs for BLI_mempool_chunk and its data */
|
2013-08-25 13:15:22 +00:00
|
|
|
// #define USE_DATA_PTR
|
2013-02-17 05:16:48 +00:00
|
|
|
|
2014-04-05 12:09:36 +11:00
|
|
|
/* optimize pool size */
|
|
|
|
|
#define USE_CHUNK_POW2
|
|
|
|
|
|
|
|
|
|
|
2013-10-07 07:39:58 +00:00
|
|
|
#ifndef NDEBUG
|
2013-10-03 14:44:33 +00:00
|
|
|
static bool mempool_debug_memset = false;
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* A free element from #BLI_mempool_chunk. Data is cast to this type and stored in
|
|
|
|
|
* #BLI_mempool.free as a single linked list, each item #BLI_mempool.esize large.
|
|
|
|
|
*
|
|
|
|
|
* Each element represents a block which BLI_mempool_alloc may return.
|
|
|
|
|
*/
|
2011-11-16 16:50:30 +00:00
|
|
|
typedef struct BLI_freenode {
|
2008-07-04 17:32:55 +00:00
|
|
|
struct BLI_freenode *next;
|
2011-11-16 19:31:42 +00:00
|
|
|
int freeword; /* used to identify this as a freed node */
|
2011-11-16 16:50:30 +00:00
|
|
|
} BLI_freenode;
|
2008-07-04 17:32:55 +00:00
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* A chunk of memory in the mempool stored in
|
|
|
|
|
* #BLI_mempool.chunks as a double linked list.
|
|
|
|
|
*/
|
2011-11-16 16:50:30 +00:00
|
|
|
typedef struct BLI_mempool_chunk {
|
2014-04-05 09:48:14 +11:00
|
|
|
struct BLI_mempool_chunk *next;
|
2013-08-25 13:15:22 +00:00
|
|
|
#ifdef USE_DATA_PTR
|
|
|
|
|
void *_data;
|
|
|
|
|
#endif
|
2011-11-16 16:50:30 +00:00
|
|
|
} BLI_mempool_chunk;
|
2008-07-04 17:32:55 +00:00
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* The mempool, stores and tracks memory \a chunks and elements within those chunks \a free.
|
|
|
|
|
*/
|
2011-11-16 22:20:17 +00:00
|
|
|
struct BLI_mempool {
|
2014-04-07 11:45:36 +10:00
|
|
|
BLI_mempool_chunk *chunks; /* single linked list of allocated chunks */
|
|
|
|
|
/* keep a pointer to the last, so we can append new chunks there
|
|
|
|
|
* this is needed for iteration so we can loop over chunks in the order added */
|
|
|
|
|
BLI_mempool_chunk *chunk_tail;
|
|
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
unsigned int esize; /* element size in bytes */
|
|
|
|
|
unsigned int csize; /* chunk size in bytes */
|
|
|
|
|
unsigned int pchunk; /* number of elements per chunk */
|
|
|
|
|
unsigned int flag;
|
2011-11-16 19:31:42 +00:00
|
|
|
/* keeps aligned to 16 bits */
|
|
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
BLI_freenode *free; /* free element list. Interleaved into chunk datas. */
|
|
|
|
|
unsigned int maxchunks; /* use to know how many chunks to keep for BLI_mempool_clear */
|
|
|
|
|
unsigned int totused; /* number of elements currently in use */
|
2013-08-25 14:50:40 +00:00
|
|
|
#ifdef USE_TOTALLOC
|
2013-08-30 21:32:57 +00:00
|
|
|
unsigned int totalloc; /* number of elements allocated in total */
|
2013-08-25 14:50:40 +00:00
|
|
|
#endif
|
2011-11-16 22:20:17 +00:00
|
|
|
};
|
2008-07-04 17:32:55 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
#define MEMPOOL_ELEM_SIZE_MIN (sizeof(void *) * 2)
|
|
|
|
|
|
2013-08-25 13:15:22 +00:00
|
|
|
#ifdef USE_DATA_PTR
|
|
|
|
|
# define CHUNK_DATA(chunk) (chunk)->_data
|
|
|
|
|
#else
|
|
|
|
|
# define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1))
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-04-05 12:09:36 +11:00
|
|
|
/* extra bytes implicitly used for every chunk alloc */
|
|
|
|
|
#ifdef USE_CHUNK_POW2
|
|
|
|
|
# define CHUNK_OVERHEAD (unsigned int)(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
|
|
|
|
|
#else
|
|
|
|
|
# define CHUNK_OVERHEAD (unsigned int)(MEM_SIZE_OVERHEAD)
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef USE_CHUNK_POW2
|
|
|
|
|
static unsigned int power_of_2_max_u(unsigned int x)
|
|
|
|
|
{
|
|
|
|
|
x -= 1;
|
|
|
|
|
x = x | (x >> 1);
|
|
|
|
|
x = x | (x >> 2);
|
|
|
|
|
x = x | (x >> 4);
|
|
|
|
|
x = x | (x >> 8);
|
|
|
|
|
x = x | (x >>16);
|
|
|
|
|
return x + 1;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
BLI_INLINE BLI_mempool_chunk *mempool_chunk_find(BLI_mempool_chunk *head, unsigned int index)
|
|
|
|
|
{
|
|
|
|
|
while (index-- && head) {
|
|
|
|
|
head = head->next;
|
|
|
|
|
}
|
|
|
|
|
return head;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
/**
|
|
|
|
|
* \return the number of chunks to allocate based on how many elements are needed.
|
2014-04-04 21:20:17 +11:00
|
|
|
*
|
|
|
|
|
* \note for small pools 1 is a good default, the elements need to be initialized,
|
|
|
|
|
* adding overhead on creation which is redundant if they aren't used.
|
|
|
|
|
*
|
2013-08-25 16:16:38 +00:00
|
|
|
*/
|
2013-08-30 21:32:57 +00:00
|
|
|
BLI_INLINE unsigned int mempool_maxchunks(const unsigned int totelem, const unsigned int pchunk)
|
2013-08-25 16:16:38 +00:00
|
|
|
{
|
2014-04-04 21:20:17 +11:00
|
|
|
return (totelem <= pchunk) ? 1 : ((totelem / pchunk) + 1);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
|
|
|
|
|
2013-08-25 13:15:22 +00:00
|
|
|
static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool)
|
|
|
|
|
{
|
|
|
|
|
BLI_mempool_chunk *mpchunk;
|
|
|
|
|
#ifdef USE_DATA_PTR
|
2014-04-05 10:57:32 +11:00
|
|
|
mpchunk = MEM_mallocN(sizeof(BLI_mempool_chunk), "BLI_Mempool Chunk");
|
|
|
|
|
CHUNK_DATA(mpchunk) = MEM_mallocN((size_t)pool->csize, "BLI Mempool Chunk Data");
|
2013-08-25 13:15:22 +00:00
|
|
|
#else
|
2014-04-05 10:57:32 +11:00
|
|
|
mpchunk = MEM_mallocN(sizeof(BLI_mempool_chunk) + (size_t)pool->csize, "BLI_Mempool Chunk");
|
2013-08-25 13:15:22 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return mpchunk;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-25 14:50:40 +00:00
|
|
|
/**
|
|
|
|
|
* Initialize a chunk and add into \a pool->chunks
|
|
|
|
|
*
|
|
|
|
|
* \param pool The pool to add the chunk into.
|
|
|
|
|
* \param mpchunk The new uninitialized chunk (can be malloc'd)
|
|
|
|
|
* \param lasttail The last element of the previous chunk
|
|
|
|
|
* (used when building free chunks initially)
|
|
|
|
|
* \return The last chunk,
|
|
|
|
|
*/
|
|
|
|
|
static BLI_freenode *mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpchunk,
|
|
|
|
|
BLI_freenode *lasttail)
|
|
|
|
|
{
|
|
|
|
|
BLI_freenode *curnode = NULL;
|
2013-08-30 21:32:57 +00:00
|
|
|
const unsigned int pchunk_last = pool->pchunk - 1;
|
2013-08-25 14:50:40 +00:00
|
|
|
char *addr;
|
2013-08-30 21:32:57 +00:00
|
|
|
unsigned int j;
|
2013-08-25 14:50:40 +00:00
|
|
|
|
2014-04-07 11:45:36 +10:00
|
|
|
/* append */
|
|
|
|
|
if (pool->chunk_tail) {
|
|
|
|
|
pool->chunk_tail->next = mpchunk;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
BLI_assert(pool->chunks == NULL);
|
|
|
|
|
pool->chunks = mpchunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mpchunk->next = NULL;
|
|
|
|
|
pool->chunk_tail = mpchunk;
|
2013-08-25 14:50:40 +00:00
|
|
|
|
|
|
|
|
if (pool->free == NULL) {
|
|
|
|
|
pool->free = CHUNK_DATA(mpchunk); /* start of the list */
|
|
|
|
|
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
|
|
|
|
|
pool->free->freeword = FREEWORD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* loop through the allocated data, building the pointer structures */
|
2013-08-31 02:12:31 +00:00
|
|
|
for (addr = CHUNK_DATA(mpchunk), j = 0; j != pchunk_last; j++) {
|
2013-08-25 14:50:40 +00:00
|
|
|
curnode = ((BLI_freenode *)addr);
|
|
|
|
|
addr += pool->esize;
|
|
|
|
|
curnode->next = (BLI_freenode *)addr;
|
|
|
|
|
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
|
|
|
|
|
if (j != pchunk_last)
|
|
|
|
|
curnode->next->freeword = FREEWORD;
|
|
|
|
|
curnode->freeword = FREEWORD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* terminate the list,
|
|
|
|
|
* will be overwritten if 'curnode' gets passed in again as 'lasttail' */
|
|
|
|
|
curnode->next = NULL;
|
|
|
|
|
|
2013-08-25 14:58:26 +00:00
|
|
|
#ifdef USE_TOTALLOC
|
|
|
|
|
pool->totalloc += pool->pchunk;
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-25 14:50:40 +00:00
|
|
|
/* final pointer in the previously allocated chunk is wrong */
|
|
|
|
|
if (lasttail) {
|
|
|
|
|
lasttail->next = CHUNK_DATA(mpchunk);
|
|
|
|
|
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
|
|
|
|
|
lasttail->freeword = FREEWORD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return curnode;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-05 10:57:32 +11:00
|
|
|
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk)
|
2013-08-25 13:15:22 +00:00
|
|
|
{
|
2014-04-05 10:57:32 +11:00
|
|
|
|
2013-08-25 13:15:22 +00:00
|
|
|
#ifdef USE_DATA_PTR
|
2014-04-05 10:57:32 +11:00
|
|
|
MEM_freeN(CHUNK_DATA(mpchunk));
|
2013-08-25 13:15:22 +00:00
|
|
|
#endif
|
2014-04-05 10:57:32 +11:00
|
|
|
MEM_freeN(mpchunk);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
|
|
|
|
|
2014-04-05 10:57:32 +11:00
|
|
|
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
|
2013-08-25 16:16:38 +00:00
|
|
|
{
|
2014-04-05 09:48:14 +11:00
|
|
|
BLI_mempool_chunk *mpchunk_next;
|
2013-08-25 16:16:38 +00:00
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
for (; mpchunk; mpchunk = mpchunk_next) {
|
2013-08-25 16:16:38 +00:00
|
|
|
mpchunk_next = mpchunk->next;
|
2014-04-05 10:57:32 +11:00
|
|
|
mempool_chunk_free(mpchunk);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
2013-08-25 13:15:22 +00:00
|
|
|
}
|
|
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
BLI_mempool *BLI_mempool_create(unsigned int esize, unsigned int totelem,
|
|
|
|
|
unsigned int pchunk, unsigned int flag)
|
2011-11-16 16:50:30 +00:00
|
|
|
{
|
2014-04-05 12:09:36 +11:00
|
|
|
BLI_mempool *pool;
|
2013-08-25 16:16:38 +00:00
|
|
|
BLI_freenode *lasttail = NULL;
|
2013-08-30 21:32:57 +00:00
|
|
|
unsigned int i, maxchunks;
|
2011-11-16 19:31:42 +00:00
|
|
|
|
2012-03-01 23:14:51 +00:00
|
|
|
/* allocate the pool structure */
|
2014-04-05 10:57:32 +11:00
|
|
|
pool = MEM_mallocN(sizeof(BLI_mempool), "memory pool");
|
2012-03-01 23:14:51 +00:00
|
|
|
|
|
|
|
|
/* set the elem size */
|
2013-06-20 19:39:29 +00:00
|
|
|
if (esize < (int)MEMPOOL_ELEM_SIZE_MIN) {
|
|
|
|
|
esize = (int)MEMPOOL_ELEM_SIZE_MIN;
|
2012-03-01 23:14:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flag & BLI_MEMPOOL_ALLOW_ITER) {
|
2014-04-05 12:09:36 +11:00
|
|
|
esize = MAX2(esize, (unsigned int)sizeof(BLI_freenode));
|
2012-03-01 23:14:51 +00:00
|
|
|
}
|
2011-11-16 19:31:42 +00:00
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
maxchunks = mempool_maxchunks(totelem, pchunk);
|
|
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
pool->chunks = NULL;
|
2014-04-07 11:45:36 +10:00
|
|
|
pool->chunk_tail = NULL;
|
2014-04-05 12:09:36 +11:00
|
|
|
pool->esize = esize;
|
|
|
|
|
pool->csize = esize * pchunk;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Optimize chunk size to powers of 2, accounting for slop-space */
|
|
|
|
|
#ifdef USE_CHUNK_POW2
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(pool->csize > CHUNK_OVERHEAD);
|
|
|
|
|
pool->csize = power_of_2_max_u(pool->csize) - CHUNK_OVERHEAD;
|
|
|
|
|
pchunk = pool->csize / esize;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pool->pchunk = pchunk;
|
|
|
|
|
pool->flag = flag;
|
2013-08-25 14:50:40 +00:00
|
|
|
pool->free = NULL; /* mempool_chunk_add assigns */
|
2013-08-25 16:16:38 +00:00
|
|
|
pool->maxchunks = maxchunks;
|
2013-08-25 14:50:40 +00:00
|
|
|
#ifdef USE_TOTALLOC
|
2013-02-17 05:16:48 +00:00
|
|
|
pool->totalloc = 0;
|
2013-08-25 14:50:40 +00:00
|
|
|
#endif
|
2012-03-01 23:14:51 +00:00
|
|
|
pool->totused = 0;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2012-03-01 22:59:18 +00:00
|
|
|
/* allocate the actual chunks */
|
|
|
|
|
for (i = 0; i < maxchunks; i++) {
|
2013-08-25 13:15:22 +00:00
|
|
|
BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
|
2013-08-25 16:16:38 +00:00
|
|
|
lasttail = mempool_chunk_add(pool, mpchunk, lasttail);
|
2008-07-04 17:32:55 +00:00
|
|
|
}
|
2013-08-25 14:50:40 +00:00
|
|
|
|
2013-10-03 12:22:44 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_CREATE_MEMPOOL(pool, 0, false);
|
|
|
|
|
#endif
|
|
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
return pool;
|
|
|
|
|
}
|
2011-11-16 16:50:30 +00:00
|
|
|
|
|
|
|
|
void *BLI_mempool_alloc(BLI_mempool *pool)
|
|
|
|
|
{
|
2012-03-01 23:14:51 +00:00
|
|
|
void *retval = NULL;
|
2008-07-04 17:32:55 +00:00
|
|
|
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
pool->totused++;
|
|
|
|
|
|
2013-12-10 21:17:52 +11:00
|
|
|
if (UNLIKELY(pool->free == NULL)) {
|
2012-03-01 23:14:51 +00:00
|
|
|
/* need to allocate a new chunk */
|
2013-08-25 13:15:22 +00:00
|
|
|
BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
|
2013-08-25 14:50:40 +00:00
|
|
|
mempool_chunk_add(pool, mpchunk, NULL);
|
2008-07-04 17:32:55 +00:00
|
|
|
}
|
|
|
|
|
|
2014-04-07 11:45:36 +10:00
|
|
|
BLI_assert(pool->chunk_tail->next == NULL);
|
|
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
retval = pool->free;
|
2012-03-01 22:59:18 +00:00
|
|
|
|
|
|
|
|
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
|
2011-11-16 19:31:42 +00:00
|
|
|
pool->free->freeword = 0x7FFFFFFF;
|
2012-03-01 22:59:18 +00:00
|
|
|
}
|
2011-11-16 19:31:42 +00:00
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
pool->free = pool->free->next;
|
2013-10-03 12:22:44 +00:00
|
|
|
|
|
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_MEMPOOL_ALLOC(pool, retval, pool->esize);
|
|
|
|
|
#endif
|
|
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
return retval;
|
|
|
|
|
}
|
2008-07-08 02:22:37 +00:00
|
|
|
|
2011-11-16 16:50:30 +00:00
|
|
|
void *BLI_mempool_calloc(BLI_mempool *pool)
|
|
|
|
|
{
|
2012-03-01 23:14:51 +00:00
|
|
|
void *retval = BLI_mempool_alloc(pool);
|
2013-05-08 12:55:51 +00:00
|
|
|
memset(retval, 0, (size_t)pool->esize);
|
2008-07-08 02:22:37 +00:00
|
|
|
return retval;
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-30 21:17:09 +00:00
|
|
|
/**
|
|
|
|
|
* Free an element from the mempool.
|
|
|
|
|
*
|
|
|
|
|
* \note doesnt protect against double frees, don't be stupid!
|
|
|
|
|
*/
|
2011-11-16 16:50:30 +00:00
|
|
|
void BLI_mempool_free(BLI_mempool *pool, void *addr)
|
|
|
|
|
{
|
2008-07-04 17:32:55 +00:00
|
|
|
BLI_freenode *newhead = addr;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2013-08-19 10:18:25 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
|
{
|
|
|
|
|
BLI_mempool_chunk *chunk;
|
|
|
|
|
bool found = false;
|
2014-04-05 09:48:14 +11:00
|
|
|
for (chunk = pool->chunks; chunk; chunk = chunk->next) {
|
2013-08-25 13:15:22 +00:00
|
|
|
if (ARRAY_HAS_ITEM((char *)addr, (char *)CHUNK_DATA(chunk), pool->csize)) {
|
2013-08-19 10:18:25 +00:00
|
|
|
found = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!found) {
|
|
|
|
|
BLI_assert(!"Attempt to free data which is not in pool.\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-10-03 14:44:33 +00:00
|
|
|
|
|
|
|
|
/* enable for debugging */
|
|
|
|
|
if (UNLIKELY(mempool_debug_memset)) {
|
|
|
|
|
memset(addr, 255, pool->esize);
|
|
|
|
|
}
|
2013-08-19 10:18:25 +00:00
|
|
|
#endif
|
|
|
|
|
|
2012-03-01 22:59:18 +00:00
|
|
|
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
|
2013-02-15 04:14:53 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
|
/* this will detect double free's */
|
|
|
|
|
BLI_assert(newhead->freeword != FREEWORD);
|
|
|
|
|
#endif
|
2011-11-16 19:31:42 +00:00
|
|
|
newhead->freeword = FREEWORD;
|
2012-03-01 22:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
newhead->next = pool->free;
|
|
|
|
|
pool->free = newhead;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
|
|
|
|
pool->totused--;
|
|
|
|
|
|
2013-10-05 04:38:08 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_MEMPOOL_FREE(pool, addr);
|
|
|
|
|
#endif
|
|
|
|
|
|
2012-03-01 23:14:51 +00:00
|
|
|
/* nothing is in use; free all the chunks except the first */
|
2013-10-05 04:38:08 +00:00
|
|
|
if (UNLIKELY(pool->totused == 0)) {
|
2012-03-01 23:14:51 +00:00
|
|
|
BLI_freenode *curnode = NULL;
|
|
|
|
|
char *tmpaddr = NULL;
|
2013-08-30 21:32:57 +00:00
|
|
|
unsigned int i;
|
2013-08-26 23:37:08 +00:00
|
|
|
BLI_mempool_chunk *first;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
first = pool->chunks;
|
2014-04-05 10:57:32 +11:00
|
|
|
mempool_chunk_free_all(first->next);
|
2014-04-05 09:48:14 +11:00
|
|
|
first->next = NULL;
|
2014-04-07 11:45:36 +10:00
|
|
|
pool->chunk_tail = first;
|
2014-04-05 09:48:14 +11:00
|
|
|
|
2013-02-17 05:16:48 +00:00
|
|
|
#ifdef USE_TOTALLOC
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
pool->totalloc = pool->pchunk;
|
2013-02-17 05:16:48 +00:00
|
|
|
#endif
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2013-10-05 04:38:08 +00:00
|
|
|
/* temp alloc so valgrind doesn't complain when setting free'd blocks 'next' */
|
|
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_MEMPOOL_ALLOC(pool, CHUNK_DATA(first), pool->csize);
|
|
|
|
|
#endif
|
2013-08-25 13:15:22 +00:00
|
|
|
pool->free = CHUNK_DATA(first); /* start of the list */
|
|
|
|
|
for (tmpaddr = CHUNK_DATA(first), i = 0; i < pool->pchunk; i++) {
|
2012-03-01 23:14:51 +00:00
|
|
|
curnode = ((BLI_freenode *)tmpaddr);
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
tmpaddr += pool->esize;
|
2012-03-01 23:14:51 +00:00
|
|
|
curnode->next = (BLI_freenode *)tmpaddr;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
}
|
2012-03-01 23:14:51 +00:00
|
|
|
curnode->next = NULL; /* terminate the list */
|
2013-10-03 12:22:44 +00:00
|
|
|
|
|
|
|
|
#ifdef WITH_MEM_VALGRIND
|
2013-10-05 04:38:08 +00:00
|
|
|
VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
|
2013-10-03 12:22:44 +00:00
|
|
|
#endif
|
2013-10-05 04:38:08 +00:00
|
|
|
}
|
2008-07-04 17:32:55 +00:00
|
|
|
}
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2012-02-29 15:00:37 +00:00
|
|
|
int BLI_mempool_count(BLI_mempool *pool)
|
|
|
|
|
{
|
2013-08-30 21:32:57 +00:00
|
|
|
return (int)pool->totused;
|
2012-02-29 15:00:37 +00:00
|
|
|
}
|
|
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
void *BLI_mempool_findelem(BLI_mempool *pool, unsigned int index)
|
2011-11-27 01:20:08 +00:00
|
|
|
{
|
2013-01-19 03:16:52 +00:00
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
|
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
if (index < pool->totused) {
|
2011-11-27 21:11:17 +00:00
|
|
|
/* we could have some faster mem chunk stepping code inline */
|
|
|
|
|
BLI_mempool_iter iter;
|
|
|
|
|
void *elem;
|
|
|
|
|
BLI_mempool_iternew(pool, &iter);
|
2012-03-11 23:47:41 +00:00
|
|
|
for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
|
|
|
|
|
/* do nothing */
|
2013-01-19 03:51:17 +00:00
|
|
|
}
|
2011-11-27 21:11:17 +00:00
|
|
|
return elem;
|
2011-11-27 01:20:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-19 03:51:17 +00:00
|
|
|
/**
|
2013-08-04 19:40:50 +00:00
|
|
|
* Fill in \a data with pointers to each element of the mempool,
|
|
|
|
|
* to create lookup table.
|
|
|
|
|
*
|
2013-08-30 22:04:37 +00:00
|
|
|
* \param pool Pool to create a table from.
|
2013-01-19 03:51:17 +00:00
|
|
|
* \param data array of pointers at least the size of 'pool->totused'
|
|
|
|
|
*/
|
2013-08-04 19:40:50 +00:00
|
|
|
void BLI_mempool_as_table(BLI_mempool *pool, void **data)
|
2013-01-19 03:51:17 +00:00
|
|
|
{
|
|
|
|
|
BLI_mempool_iter iter;
|
|
|
|
|
void *elem;
|
|
|
|
|
void **p = data;
|
|
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
|
|
|
|
BLI_mempool_iternew(pool, &iter);
|
2013-08-04 19:40:50 +00:00
|
|
|
while ((elem = BLI_mempool_iterstep(&iter))) {
|
2013-01-19 03:51:17 +00:00
|
|
|
*p++ = elem;
|
|
|
|
|
}
|
2013-09-02 17:59:04 +00:00
|
|
|
BLI_assert((unsigned int)(p - data) == pool->totused);
|
2013-01-19 03:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
2013-01-30 21:17:09 +00:00
|
|
|
/**
|
2013-08-04 19:40:50 +00:00
|
|
|
* A version of #BLI_mempool_as_table that allocates and returns the data.
|
2013-01-30 21:17:09 +00:00
|
|
|
*/
|
2013-08-04 19:40:50 +00:00
|
|
|
void **BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
|
2013-01-30 21:17:09 +00:00
|
|
|
{
|
2013-08-04 19:40:50 +00:00
|
|
|
void **data = MEM_mallocN((size_t)pool->totused * sizeof(void *), allocstr);
|
|
|
|
|
BLI_mempool_as_table(pool, data);
|
|
|
|
|
return data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Fill in \a data with the contents of the mempool.
|
|
|
|
|
*/
|
|
|
|
|
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
|
|
|
|
|
{
|
|
|
|
|
BLI_mempool_iter iter;
|
|
|
|
|
char *elem, *p = data;
|
2013-08-03 11:35:09 +00:00
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
2013-08-04 19:40:50 +00:00
|
|
|
BLI_mempool_iternew(pool, &iter);
|
|
|
|
|
while ((elem = BLI_mempool_iterstep(&iter))) {
|
|
|
|
|
memcpy(p, elem, (size_t)pool->esize);
|
|
|
|
|
p += pool->esize;
|
2013-08-03 11:35:09 +00:00
|
|
|
}
|
2013-09-02 17:59:04 +00:00
|
|
|
BLI_assert((unsigned int)(p - (char *)data) == pool->totused * pool->esize);
|
2013-08-04 19:40:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* A version of #BLI_mempool_as_array that allocates and returns the data.
|
|
|
|
|
*/
|
|
|
|
|
void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
|
|
|
|
|
{
|
|
|
|
|
char *data = MEM_mallocN((size_t)(pool->totused * pool->esize), allocstr);
|
|
|
|
|
BLI_mempool_as_array(pool, data);
|
2013-01-30 21:17:09 +00:00
|
|
|
return data;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* Create a new mempool iterator, \a BLI_MEMPOOL_ALLOW_ITER flag must be set.
|
|
|
|
|
*/
|
2011-11-16 19:31:42 +00:00
|
|
|
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
|
|
|
|
|
{
|
2013-01-19 03:16:52 +00:00
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
iter->pool = pool;
|
2014-04-05 09:48:14 +11:00
|
|
|
iter->curchunk = pool->chunks;
|
2011-11-16 19:31:42 +00:00
|
|
|
iter->curindex = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-29 03:39:59 +00:00
|
|
|
#if 0
|
|
|
|
|
/* unoptimized, more readable */
|
|
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
static void *bli_mempool_iternext(BLI_mempool_iter *iter)
|
|
|
|
|
{
|
|
|
|
|
void *ret = NULL;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
if (!iter->curchunk || !iter->pool->totused) return NULL;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
iter->curindex++;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2013-08-31 02:12:31 +00:00
|
|
|
if (iter->curindex == iter->pool->pchunk) {
|
2011-11-16 19:31:42 +00:00
|
|
|
iter->curchunk = iter->curchunk->next;
|
|
|
|
|
iter->curindex = 0;
|
|
|
|
|
}
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
|
|
|
|
|
{
|
|
|
|
|
BLI_freenode *ret;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
do {
|
|
|
|
|
ret = bli_mempool_iternext(iter);
|
2012-03-12 23:56:11 +00:00
|
|
|
} while (ret && ret->freeword == FREEWORD);
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-29 03:39:59 +00:00
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
/* optimized version of code above */
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* Step over the iterator, returning the mempool item or NULL.
|
|
|
|
|
*/
|
2011-12-29 03:39:59 +00:00
|
|
|
void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
|
|
|
|
|
{
|
|
|
|
|
BLI_freenode *ret;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
if (LIKELY(iter->curchunk)) {
|
2013-08-30 22:04:37 +00:00
|
|
|
ret = (BLI_freenode *)(((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex));
|
2011-12-29 03:39:59 +00:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-31 02:12:31 +00:00
|
|
|
if (UNLIKELY(++iter->curindex == iter->pool->pchunk)) {
|
2011-12-29 03:39:59 +00:00
|
|
|
iter->curindex = 0;
|
|
|
|
|
iter->curchunk = iter->curchunk->next;
|
|
|
|
|
}
|
|
|
|
|
} while (ret->freeword == FREEWORD);
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-12-29 03:39:59 +00:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* Empty the pool, as if it were just created.
|
|
|
|
|
*
|
|
|
|
|
* \param pool The pool to clear.
|
|
|
|
|
* \param totelem_reserve Optionally reserve how many items should be kept from clearing.
|
|
|
|
|
*/
|
2013-08-25 16:16:38 +00:00
|
|
|
void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
|
2013-08-25 14:58:26 +00:00
|
|
|
{
|
2013-08-25 16:16:38 +00:00
|
|
|
BLI_mempool_chunk *mpchunk;
|
|
|
|
|
BLI_mempool_chunk *mpchunk_next;
|
2013-08-30 21:32:57 +00:00
|
|
|
unsigned int maxchunks;
|
2013-08-25 16:16:38 +00:00
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
BLI_mempool_chunk *chunks_temp;
|
2013-08-25 16:16:38 +00:00
|
|
|
BLI_freenode *lasttail = NULL;
|
|
|
|
|
|
2013-10-05 04:59:43 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_DESTROY_MEMPOOL(pool);
|
|
|
|
|
VALGRIND_CREATE_MEMPOOL(pool, 0, false);
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
if (totelem_reserve == -1) {
|
|
|
|
|
maxchunks = pool->maxchunks;
|
|
|
|
|
}
|
|
|
|
|
else {
|
2013-08-30 21:32:57 +00:00
|
|
|
maxchunks = mempool_maxchunks((unsigned int)totelem_reserve, pool->pchunk);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
2013-08-25 14:58:26 +00:00
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
/* free all after pool->maxchunks */
|
2014-04-05 09:48:14 +11:00
|
|
|
mpchunk = mempool_chunk_find(pool->chunks, maxchunks - 1);
|
|
|
|
|
if (mpchunk && mpchunk->next) {
|
|
|
|
|
/* terminate */
|
2013-08-25 16:16:38 +00:00
|
|
|
mpchunk_next = mpchunk->next;
|
2014-04-05 09:48:14 +11:00
|
|
|
mpchunk->next = NULL;
|
|
|
|
|
mpchunk = mpchunk_next;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
mpchunk_next = mpchunk->next;
|
2014-04-05 10:57:32 +11:00
|
|
|
mempool_chunk_free(mpchunk);
|
2014-04-05 09:48:14 +11:00
|
|
|
} while ((mpchunk = mpchunk_next));
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
2013-08-25 14:58:26 +00:00
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
/* re-initialize */
|
|
|
|
|
pool->free = NULL;
|
2013-08-25 14:58:26 +00:00
|
|
|
pool->totused = 0;
|
|
|
|
|
#ifdef USE_TOTALLOC
|
|
|
|
|
pool->totalloc = 0;
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
chunks_temp = pool->chunks;
|
2014-04-05 09:48:14 +11:00
|
|
|
pool->chunks = NULL;
|
2014-04-07 11:45:36 +10:00
|
|
|
pool->chunk_tail = NULL;
|
2013-08-25 16:16:38 +00:00
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
while ((mpchunk = chunks_temp)) {
|
|
|
|
|
chunks_temp = mpchunk->next;
|
2013-08-25 16:16:38 +00:00
|
|
|
lasttail = mempool_chunk_add(pool, mpchunk, lasttail);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* Wrap #BLI_mempool_clear_ex with no reserve set.
|
|
|
|
|
*/
|
2013-08-25 16:16:38 +00:00
|
|
|
void BLI_mempool_clear(BLI_mempool *pool)
|
|
|
|
|
{
|
2013-09-02 17:59:04 +00:00
|
|
|
BLI_mempool_clear_ex(pool, -1);
|
2013-08-25 14:58:26 +00:00
|
|
|
}
|
|
|
|
|
|
2013-01-30 21:17:09 +00:00
|
|
|
/**
|
|
|
|
|
* Free the mempool its self (and all elements).
|
|
|
|
|
*/
|
2008-07-04 17:32:55 +00:00
|
|
|
void BLI_mempool_destroy(BLI_mempool *pool)
|
|
|
|
|
{
|
2014-04-05 10:57:32 +11:00
|
|
|
mempool_chunk_free_all(pool->chunks);
|
2011-11-16 16:50:30 +00:00
|
|
|
|
2013-10-03 12:22:44 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_DESTROY_MEMPOOL(pool);
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-04-05 10:57:32 +11:00
|
|
|
MEM_freeN(pool);
|
2008-07-08 02:22:37 +00:00
|
|
|
}
|
2013-10-03 14:44:33 +00:00
|
|
|
|
2013-10-07 07:39:58 +00:00
|
|
|
#ifndef NDEBUG
|
2013-10-03 14:44:33 +00:00
|
|
|
void BLI_mempool_set_memory_debug(void)
|
|
|
|
|
{
|
|
|
|
|
mempool_debug_memset = true;
|
|
|
|
|
}
|
|
|
|
|
#endif
|