2011-02-23 10:52:22 +00:00
|
|
|
/*
|
2008-07-04 17:32:55 +00:00
|
|
|
* ***** BEGIN GPL LICENSE BLOCK *****
|
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
2010-02-12 13:34:04 +00:00
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
2008-07-04 17:32:55 +00:00
|
|
|
*
|
|
|
|
|
* The Original Code is Copyright (C) 2008 by Blender Foundation.
|
|
|
|
|
* All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* The Original Code is: all of this file.
|
|
|
|
|
*
|
2011-11-16 19:31:42 +00:00
|
|
|
* Contributor(s): Geoffery Bantle
|
2008-07-04 17:32:55 +00:00
|
|
|
*
|
|
|
|
|
* ***** END GPL LICENSE BLOCK *****
|
|
|
|
|
*/
|
|
|
|
|
|
2011-02-27 20:37:56 +00:00
|
|
|
/** \file blender/blenlib/intern/BLI_mempool.c
|
|
|
|
|
* \ingroup bli
|
2015-08-05 00:21:50 +10:00
|
|
|
* \author Geoffrey Bantle
|
2013-01-19 03:16:52 +00:00
|
|
|
*
|
2011-11-16 19:31:42 +00:00
|
|
|
* Simple, fast memory allocator for allocating many elements of the same size.
|
2015-08-05 00:21:50 +10:00
|
|
|
*
|
|
|
|
|
* Supports:
|
|
|
|
|
*
|
|
|
|
|
* - Freeing chunks.
|
|
|
|
|
* - Iterating over allocated chunks
|
|
|
|
|
* (optionally when using the #BLI_MEMPOOL_ALLOW_ITER flag).
|
2011-11-16 19:31:42 +00:00
|
|
|
*/
|
|
|
|
|
|
2013-09-01 00:46:04 +00:00
|
|
|
#include <string.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
2017-11-23 21:12:00 +01:00
|
|
|
#include "atomic_ops.h"
|
|
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
#include "BLI_utildefines.h"
|
|
|
|
|
|
|
|
|
|
#include "BLI_mempool.h" /* own include */
|
|
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
#include "MEM_guardedalloc.h"
|
2011-11-16 19:31:42 +00:00
|
|
|
|
2013-09-03 04:39:12 +00:00
|
|
|
#include "BLI_strict_flags.h" /* keep last */
|
|
|
|
|
|
2013-10-03 12:22:44 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
# include "valgrind/memcheck.h"
|
|
|
|
|
#endif
|
2013-09-03 04:39:12 +00:00
|
|
|
|
2012-09-03 22:04:14 +00:00
|
|
|
/* note: copied from BLO_blend_defs.h, don't use here because we're in BLI */
|
2011-11-16 19:31:42 +00:00
|
|
|
#ifdef __BIG_ENDIAN__
|
|
|
|
|
/* Big Endian */
|
2012-05-12 15:13:06 +00:00
|
|
|
# define MAKE_ID(a, b, c, d) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d) )
|
2016-06-01 02:52:05 +10:00
|
|
|
# define MAKE_ID_8(a, b, c, d, e, f, g, h) \
|
|
|
|
|
((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
|
|
|
|
|
(int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h) )
|
2011-11-16 19:31:42 +00:00
|
|
|
#else
|
|
|
|
|
/* Little Endian */
|
2012-05-12 15:13:06 +00:00
|
|
|
# define MAKE_ID(a, b, c, d) ( (int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a) )
|
2016-06-01 02:52:05 +10:00
|
|
|
# define MAKE_ID_8(a, b, c, d, e, f, g, h) \
|
|
|
|
|
((int64_t)(h) << 56 | (int64_t)(g) << 48 | (int64_t)(f) << 40 | (int64_t)(e) << 32 | \
|
|
|
|
|
(int64_t)(d) << 24 | (int64_t)(c) << 16 | (int64_t)(b) << 8 | (a) )
|
2011-11-16 19:31:42 +00:00
|
|
|
#endif
|
|
|
|
|
|
2016-06-01 02:52:05 +10:00
|
|
|
/**
|
|
|
|
|
* Important that this value is an is _not_ aligned with ``sizeof(void *)``.
|
|
|
|
|
* So having a pointer to 2/4/8... aligned memory is enough to ensure the freeword will never be used.
|
|
|
|
|
* To be safe, use a word thats the same in both directions.
|
|
|
|
|
*/
|
|
|
|
|
#define FREEWORD ((sizeof(void *) > sizeof(int32_t)) ? \
|
|
|
|
|
MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
|
|
|
|
|
MAKE_ID('e', 'f', 'f', 'e'))
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* The 'used' word just needs to be set to something besides FREEWORD.
|
|
|
|
|
*/
|
2014-04-07 12:52:23 +10:00
|
|
|
#define USEDWORD MAKE_ID('u', 's', 'e', 'd')
|
2008-07-04 17:32:55 +00:00
|
|
|
|
2013-02-17 05:16:48 +00:00
|
|
|
/* currently totalloc isnt used */
|
|
|
|
|
// #define USE_TOTALLOC
|
2013-08-30 21:32:57 +00:00
|
|
|
|
|
|
|
|
/* when undefined, merge the allocs for BLI_mempool_chunk and its data */
|
2013-08-25 13:15:22 +00:00
|
|
|
// #define USE_DATA_PTR
|
2013-02-17 05:16:48 +00:00
|
|
|
|
2014-04-05 12:09:36 +11:00
|
|
|
/* optimize pool size */
|
|
|
|
|
#define USE_CHUNK_POW2
|
|
|
|
|
|
|
|
|
|
|
2013-10-07 07:39:58 +00:00
|
|
|
#ifndef NDEBUG
|
2013-10-03 14:44:33 +00:00
|
|
|
static bool mempool_debug_memset = false;
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* A free element from #BLI_mempool_chunk. Data is cast to this type and stored in
|
|
|
|
|
* #BLI_mempool.free as a single linked list, each item #BLI_mempool.esize large.
|
|
|
|
|
*
|
|
|
|
|
* Each element represents a block which BLI_mempool_alloc may return.
|
|
|
|
|
*/
|
2011-11-16 16:50:30 +00:00
|
|
|
typedef struct BLI_freenode {
|
2008-07-04 17:32:55 +00:00
|
|
|
struct BLI_freenode *next;
|
2016-06-01 02:52:05 +10:00
|
|
|
intptr_t freeword; /* used to identify this as a freed node */
|
2011-11-16 16:50:30 +00:00
|
|
|
} BLI_freenode;
|
2008-07-04 17:32:55 +00:00
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* A chunk of memory in the mempool stored in
|
|
|
|
|
* #BLI_mempool.chunks as a double linked list.
|
|
|
|
|
*/
|
2011-11-16 16:50:30 +00:00
|
|
|
typedef struct BLI_mempool_chunk {
|
2014-04-05 09:48:14 +11:00
|
|
|
struct BLI_mempool_chunk *next;
|
2013-08-25 13:15:22 +00:00
|
|
|
#ifdef USE_DATA_PTR
|
|
|
|
|
void *_data;
|
|
|
|
|
#endif
|
2011-11-16 16:50:30 +00:00
|
|
|
} BLI_mempool_chunk;
|
2008-07-04 17:32:55 +00:00
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* The mempool, stores and tracks memory \a chunks and elements within those chunks \a free.
|
|
|
|
|
*/
|
2011-11-16 22:20:17 +00:00
|
|
|
struct BLI_mempool {
|
2014-04-07 11:45:36 +10:00
|
|
|
BLI_mempool_chunk *chunks; /* single linked list of allocated chunks */
|
|
|
|
|
/* keep a pointer to the last, so we can append new chunks there
|
|
|
|
|
* this is needed for iteration so we can loop over chunks in the order added */
|
|
|
|
|
BLI_mempool_chunk *chunk_tail;
|
|
|
|
|
|
2017-10-28 17:48:45 +11:00
|
|
|
uint esize; /* element size in bytes */
|
|
|
|
|
uint csize; /* chunk size in bytes */
|
|
|
|
|
uint pchunk; /* number of elements per chunk */
|
|
|
|
|
uint flag;
|
2011-11-16 19:31:42 +00:00
|
|
|
/* keeps aligned to 16 bits */
|
|
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
BLI_freenode *free; /* free element list. Interleaved into chunk datas. */
|
2017-10-28 17:48:45 +11:00
|
|
|
uint maxchunks; /* use to know how many chunks to keep for BLI_mempool_clear */
|
|
|
|
|
uint totused; /* number of elements currently in use */
|
2013-08-25 14:50:40 +00:00
|
|
|
#ifdef USE_TOTALLOC
|
2017-10-28 17:48:45 +11:00
|
|
|
uint totalloc; /* number of elements allocated in total */
|
2013-08-25 14:50:40 +00:00
|
|
|
#endif
|
2011-11-16 22:20:17 +00:00
|
|
|
};
|
2008-07-04 17:32:55 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
#define MEMPOOL_ELEM_SIZE_MIN (sizeof(void *) * 2)
|
|
|
|
|
|
2013-08-25 13:15:22 +00:00
|
|
|
#ifdef USE_DATA_PTR
|
|
|
|
|
# define CHUNK_DATA(chunk) (chunk)->_data
|
|
|
|
|
#else
|
|
|
|
|
# define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1))
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-04-07 12:34:13 +10:00
|
|
|
#define NODE_STEP_NEXT(node) ((void *)((char *)(node) + esize))
|
|
|
|
|
#define NODE_STEP_PREV(node) ((void *)((char *)(node) - esize))
|
|
|
|
|
|
2014-04-05 12:09:36 +11:00
|
|
|
/* extra bytes implicitly used for every chunk alloc */
|
2014-04-07 12:52:23 +10:00
|
|
|
#ifdef USE_DATA_PTR
|
2017-10-28 17:48:45 +11:00
|
|
|
# define CHUNK_OVERHEAD (uint)(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
|
2014-04-05 12:09:36 +11:00
|
|
|
#else
|
2017-10-28 17:48:45 +11:00
|
|
|
# define CHUNK_OVERHEAD (uint)(MEM_SIZE_OVERHEAD)
|
2014-04-05 12:09:36 +11:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef USE_CHUNK_POW2
|
2017-10-28 17:48:45 +11:00
|
|
|
static uint power_of_2_max_u(uint x)
|
2014-04-05 12:09:36 +11:00
|
|
|
{
|
|
|
|
|
x -= 1;
|
|
|
|
|
x = x | (x >> 1);
|
|
|
|
|
x = x | (x >> 2);
|
|
|
|
|
x = x | (x >> 4);
|
|
|
|
|
x = x | (x >> 8);
|
2014-04-10 06:24:33 +10:00
|
|
|
x = x | (x >> 16);
|
2014-04-05 12:09:36 +11:00
|
|
|
return x + 1;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2017-10-28 17:48:45 +11:00
|
|
|
BLI_INLINE BLI_mempool_chunk *mempool_chunk_find(BLI_mempool_chunk *head, uint index)
|
2014-04-05 09:48:14 +11:00
|
|
|
{
|
|
|
|
|
while (index-- && head) {
|
|
|
|
|
head = head->next;
|
|
|
|
|
}
|
|
|
|
|
return head;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
/**
|
|
|
|
|
* \return the number of chunks to allocate based on how many elements are needed.
|
2014-04-04 21:20:17 +11:00
|
|
|
*
|
|
|
|
|
* \note for small pools 1 is a good default, the elements need to be initialized,
|
|
|
|
|
* adding overhead on creation which is redundant if they aren't used.
|
|
|
|
|
*
|
2013-08-25 16:16:38 +00:00
|
|
|
*/
|
2017-10-28 17:48:45 +11:00
|
|
|
BLI_INLINE uint mempool_maxchunks(const uint totelem, const uint pchunk)
|
2013-08-25 16:16:38 +00:00
|
|
|
{
|
2014-04-04 21:20:17 +11:00
|
|
|
return (totelem <= pchunk) ? 1 : ((totelem / pchunk) + 1);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
|
|
|
|
|
2013-08-25 13:15:22 +00:00
|
|
|
static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool)
|
|
|
|
|
{
|
|
|
|
|
BLI_mempool_chunk *mpchunk;
|
|
|
|
|
#ifdef USE_DATA_PTR
|
2014-04-05 10:57:32 +11:00
|
|
|
mpchunk = MEM_mallocN(sizeof(BLI_mempool_chunk), "BLI_Mempool Chunk");
|
|
|
|
|
CHUNK_DATA(mpchunk) = MEM_mallocN((size_t)pool->csize, "BLI Mempool Chunk Data");
|
2013-08-25 13:15:22 +00:00
|
|
|
#else
|
2014-04-05 10:57:32 +11:00
|
|
|
mpchunk = MEM_mallocN(sizeof(BLI_mempool_chunk) + (size_t)pool->csize, "BLI_Mempool Chunk");
|
2013-08-25 13:15:22 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return mpchunk;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-25 14:50:40 +00:00
|
|
|
/**
|
|
|
|
|
* Initialize a chunk and add into \a pool->chunks
|
|
|
|
|
*
|
|
|
|
|
* \param pool The pool to add the chunk into.
|
|
|
|
|
* \param mpchunk The new uninitialized chunk (can be malloc'd)
|
2014-04-10 10:39:40 +10:00
|
|
|
* \param lasttail The last element of the previous chunk
|
2013-08-25 14:50:40 +00:00
|
|
|
* (used when building free chunks initially)
|
2014-04-10 10:39:40 +10:00
|
|
|
* \return The last chunk,
|
2013-08-25 14:50:40 +00:00
|
|
|
*/
|
2014-04-10 10:39:40 +10:00
|
|
|
static BLI_freenode *mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpchunk,
|
|
|
|
|
BLI_freenode *lasttail)
|
2013-08-25 14:50:40 +00:00
|
|
|
{
|
2017-10-28 17:48:45 +11:00
|
|
|
const uint esize = pool->esize;
|
2014-04-07 12:34:13 +10:00
|
|
|
BLI_freenode *curnode = CHUNK_DATA(mpchunk);
|
2017-10-28 17:48:45 +11:00
|
|
|
uint j;
|
2013-08-25 14:50:40 +00:00
|
|
|
|
2014-04-07 11:45:36 +10:00
|
|
|
/* append */
|
|
|
|
|
if (pool->chunk_tail) {
|
|
|
|
|
pool->chunk_tail->next = mpchunk;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
BLI_assert(pool->chunks == NULL);
|
|
|
|
|
pool->chunks = mpchunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mpchunk->next = NULL;
|
|
|
|
|
pool->chunk_tail = mpchunk;
|
2013-08-25 14:50:40 +00:00
|
|
|
|
2014-04-10 10:39:40 +10:00
|
|
|
if (UNLIKELY(pool->free == NULL)) {
|
|
|
|
|
pool->free = curnode;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-25 14:50:40 +00:00
|
|
|
/* loop through the allocated data, building the pointer structures */
|
2014-04-07 12:34:13 +10:00
|
|
|
j = pool->pchunk;
|
|
|
|
|
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
|
|
|
|
|
while (j--) {
|
|
|
|
|
curnode->next = NODE_STEP_NEXT(curnode);
|
2013-08-25 14:50:40 +00:00
|
|
|
curnode->freeword = FREEWORD;
|
2014-04-07 12:34:13 +10:00
|
|
|
curnode = curnode->next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
while (j--) {
|
|
|
|
|
curnode->next = NODE_STEP_NEXT(curnode);
|
|
|
|
|
curnode = curnode->next;
|
2013-08-25 14:50:40 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-10 10:39:40 +10:00
|
|
|
/* terminate the list (rewind one)
|
|
|
|
|
* will be overwritten if 'curnode' gets passed in again as 'lasttail' */
|
2014-04-07 12:34:13 +10:00
|
|
|
curnode = NODE_STEP_PREV(curnode);
|
2014-04-10 10:39:40 +10:00
|
|
|
curnode->next = NULL;
|
2013-08-25 14:50:40 +00:00
|
|
|
|
2013-08-25 14:58:26 +00:00
|
|
|
#ifdef USE_TOTALLOC
|
|
|
|
|
pool->totalloc += pool->pchunk;
|
|
|
|
|
#endif
|
2014-04-10 10:39:40 +10:00
|
|
|
|
|
|
|
|
/* final pointer in the previously allocated chunk is wrong */
|
|
|
|
|
if (lasttail) {
|
|
|
|
|
lasttail->next = CHUNK_DATA(mpchunk);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return curnode;
|
2013-08-25 14:50:40 +00:00
|
|
|
}
|
|
|
|
|
|
2014-04-05 10:57:32 +11:00
|
|
|
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk)
|
2013-08-25 13:15:22 +00:00
|
|
|
{
|
2014-04-05 10:57:32 +11:00
|
|
|
|
2013-08-25 13:15:22 +00:00
|
|
|
#ifdef USE_DATA_PTR
|
2014-04-05 10:57:32 +11:00
|
|
|
MEM_freeN(CHUNK_DATA(mpchunk));
|
2013-08-25 13:15:22 +00:00
|
|
|
#endif
|
2014-04-05 10:57:32 +11:00
|
|
|
MEM_freeN(mpchunk);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
|
|
|
|
|
2014-04-05 10:57:32 +11:00
|
|
|
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
|
2013-08-25 16:16:38 +00:00
|
|
|
{
|
2014-04-05 09:48:14 +11:00
|
|
|
BLI_mempool_chunk *mpchunk_next;
|
2013-08-25 16:16:38 +00:00
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
for (; mpchunk; mpchunk = mpchunk_next) {
|
2013-08-25 16:16:38 +00:00
|
|
|
mpchunk_next = mpchunk->next;
|
2014-04-05 10:57:32 +11:00
|
|
|
mempool_chunk_free(mpchunk);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
2013-08-25 13:15:22 +00:00
|
|
|
}
|
|
|
|
|
|
2017-10-28 17:48:45 +11:00
|
|
|
BLI_mempool *BLI_mempool_create(uint esize, uint totelem,
|
|
|
|
|
uint pchunk, uint flag)
|
2011-11-16 16:50:30 +00:00
|
|
|
{
|
2014-04-05 12:09:36 +11:00
|
|
|
BLI_mempool *pool;
|
2014-04-10 10:39:40 +10:00
|
|
|
BLI_freenode *lasttail = NULL;
|
2017-10-28 17:48:45 +11:00
|
|
|
uint i, maxchunks;
|
2011-11-16 19:31:42 +00:00
|
|
|
|
2012-03-01 23:14:51 +00:00
|
|
|
/* allocate the pool structure */
|
2014-04-05 10:57:32 +11:00
|
|
|
pool = MEM_mallocN(sizeof(BLI_mempool), "memory pool");
|
2012-03-01 23:14:51 +00:00
|
|
|
|
|
|
|
|
/* set the elem size */
|
2013-06-20 19:39:29 +00:00
|
|
|
if (esize < (int)MEMPOOL_ELEM_SIZE_MIN) {
|
|
|
|
|
esize = (int)MEMPOOL_ELEM_SIZE_MIN;
|
2012-03-01 23:14:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flag & BLI_MEMPOOL_ALLOW_ITER) {
|
2017-10-28 17:48:45 +11:00
|
|
|
esize = MAX2(esize, (uint)sizeof(BLI_freenode));
|
2012-03-01 23:14:51 +00:00
|
|
|
}
|
2011-11-16 19:31:42 +00:00
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
maxchunks = mempool_maxchunks(totelem, pchunk);
|
|
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
pool->chunks = NULL;
|
2014-04-07 11:45:36 +10:00
|
|
|
pool->chunk_tail = NULL;
|
2014-04-05 12:09:36 +11:00
|
|
|
pool->esize = esize;
|
|
|
|
|
pool->csize = esize * pchunk;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Optimize chunk size to powers of 2, accounting for slop-space */
|
|
|
|
|
#ifdef USE_CHUNK_POW2
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(pool->csize > CHUNK_OVERHEAD);
|
|
|
|
|
pool->csize = power_of_2_max_u(pool->csize) - CHUNK_OVERHEAD;
|
|
|
|
|
pchunk = pool->csize / esize;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pool->pchunk = pchunk;
|
|
|
|
|
pool->flag = flag;
|
2013-08-25 14:50:40 +00:00
|
|
|
pool->free = NULL; /* mempool_chunk_add assigns */
|
2013-08-25 16:16:38 +00:00
|
|
|
pool->maxchunks = maxchunks;
|
2013-08-25 14:50:40 +00:00
|
|
|
#ifdef USE_TOTALLOC
|
2013-02-17 05:16:48 +00:00
|
|
|
pool->totalloc = 0;
|
2013-08-25 14:50:40 +00:00
|
|
|
#endif
|
2012-03-01 23:14:51 +00:00
|
|
|
pool->totused = 0;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2014-04-08 11:59:28 +10:00
|
|
|
if (totelem) {
|
|
|
|
|
/* allocate the actual chunks */
|
|
|
|
|
for (i = 0; i < maxchunks; i++) {
|
|
|
|
|
BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
|
2014-04-10 10:39:40 +10:00
|
|
|
lasttail = mempool_chunk_add(pool, mpchunk, lasttail);
|
2014-04-08 11:59:28 +10:00
|
|
|
}
|
2008-07-04 17:32:55 +00:00
|
|
|
}
|
2013-08-25 14:50:40 +00:00
|
|
|
|
2013-10-03 12:22:44 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_CREATE_MEMPOOL(pool, 0, false);
|
|
|
|
|
#endif
|
|
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
return pool;
|
|
|
|
|
}
|
2011-11-16 16:50:30 +00:00
|
|
|
|
|
|
|
|
void *BLI_mempool_alloc(BLI_mempool *pool)
|
|
|
|
|
{
|
2014-04-07 12:52:23 +10:00
|
|
|
BLI_freenode *free_pop;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2013-12-10 21:17:52 +11:00
|
|
|
if (UNLIKELY(pool->free == NULL)) {
|
2012-03-01 23:14:51 +00:00
|
|
|
/* need to allocate a new chunk */
|
2013-08-25 13:15:22 +00:00
|
|
|
BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
|
2014-04-10 10:39:40 +10:00
|
|
|
mempool_chunk_add(pool, mpchunk, NULL);
|
2008-07-04 17:32:55 +00:00
|
|
|
}
|
|
|
|
|
|
2014-04-07 12:52:23 +10:00
|
|
|
free_pop = pool->free;
|
2014-04-07 11:45:36 +10:00
|
|
|
|
2014-04-07 12:52:23 +10:00
|
|
|
BLI_assert(pool->chunk_tail->next == NULL);
|
2012-03-01 22:59:18 +00:00
|
|
|
|
|
|
|
|
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
|
2014-04-07 12:52:23 +10:00
|
|
|
free_pop->freeword = USEDWORD;
|
2012-03-01 22:59:18 +00:00
|
|
|
}
|
2011-11-16 19:31:42 +00:00
|
|
|
|
2014-04-07 12:52:23 +10:00
|
|
|
pool->free = free_pop->next;
|
|
|
|
|
pool->totused++;
|
2013-10-03 12:22:44 +00:00
|
|
|
|
|
|
|
|
#ifdef WITH_MEM_VALGRIND
|
2014-04-07 12:52:23 +10:00
|
|
|
VALGRIND_MEMPOOL_ALLOC(pool, free_pop, pool->esize);
|
2013-10-03 12:22:44 +00:00
|
|
|
#endif
|
|
|
|
|
|
2014-04-07 12:52:23 +10:00
|
|
|
return (void *)free_pop;
|
2008-07-04 17:32:55 +00:00
|
|
|
}
|
2008-07-08 02:22:37 +00:00
|
|
|
|
2011-11-16 16:50:30 +00:00
|
|
|
void *BLI_mempool_calloc(BLI_mempool *pool)
|
|
|
|
|
{
|
2012-03-01 23:14:51 +00:00
|
|
|
void *retval = BLI_mempool_alloc(pool);
|
2013-05-08 12:55:51 +00:00
|
|
|
memset(retval, 0, (size_t)pool->esize);
|
2008-07-08 02:22:37 +00:00
|
|
|
return retval;
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-30 21:17:09 +00:00
|
|
|
/**
|
|
|
|
|
* Free an element from the mempool.
|
|
|
|
|
*
|
|
|
|
|
* \note doesnt protect against double frees, don't be stupid!
|
|
|
|
|
*/
|
2011-11-16 16:50:30 +00:00
|
|
|
void BLI_mempool_free(BLI_mempool *pool, void *addr)
|
|
|
|
|
{
|
2008-07-04 17:32:55 +00:00
|
|
|
BLI_freenode *newhead = addr;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2013-08-19 10:18:25 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
|
{
|
|
|
|
|
BLI_mempool_chunk *chunk;
|
|
|
|
|
bool found = false;
|
2014-04-05 09:48:14 +11:00
|
|
|
for (chunk = pool->chunks; chunk; chunk = chunk->next) {
|
2013-08-25 13:15:22 +00:00
|
|
|
if (ARRAY_HAS_ITEM((char *)addr, (char *)CHUNK_DATA(chunk), pool->csize)) {
|
2013-08-19 10:18:25 +00:00
|
|
|
found = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!found) {
|
|
|
|
|
BLI_assert(!"Attempt to free data which is not in pool.\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-10-03 14:44:33 +00:00
|
|
|
|
|
|
|
|
/* enable for debugging */
|
|
|
|
|
if (UNLIKELY(mempool_debug_memset)) {
|
|
|
|
|
memset(addr, 255, pool->esize);
|
|
|
|
|
}
|
2013-08-19 10:18:25 +00:00
|
|
|
#endif
|
|
|
|
|
|
2012-03-01 22:59:18 +00:00
|
|
|
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
|
2013-02-15 04:14:53 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
|
/* this will detect double free's */
|
|
|
|
|
BLI_assert(newhead->freeword != FREEWORD);
|
|
|
|
|
#endif
|
2011-11-16 19:31:42 +00:00
|
|
|
newhead->freeword = FREEWORD;
|
2012-03-01 22:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
2008-07-04 17:32:55 +00:00
|
|
|
newhead->next = pool->free;
|
|
|
|
|
pool->free = newhead;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
|
|
|
|
pool->totused--;
|
|
|
|
|
|
2013-10-05 04:38:08 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_MEMPOOL_FREE(pool, addr);
|
|
|
|
|
#endif
|
|
|
|
|
|
2012-03-01 23:14:51 +00:00
|
|
|
/* nothing is in use; free all the chunks except the first */
|
2014-06-08 23:07:00 +10:00
|
|
|
if (UNLIKELY(pool->totused == 0) &&
|
|
|
|
|
(pool->chunks->next))
|
|
|
|
|
{
|
2017-10-28 17:48:45 +11:00
|
|
|
const uint esize = pool->esize;
|
2014-04-07 12:34:13 +10:00
|
|
|
BLI_freenode *curnode;
|
2017-10-28 17:48:45 +11:00
|
|
|
uint j;
|
2013-08-26 23:37:08 +00:00
|
|
|
BLI_mempool_chunk *first;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
first = pool->chunks;
|
2014-04-05 10:57:32 +11:00
|
|
|
mempool_chunk_free_all(first->next);
|
2014-04-05 09:48:14 +11:00
|
|
|
first->next = NULL;
|
2014-04-07 11:45:36 +10:00
|
|
|
pool->chunk_tail = first;
|
2014-04-05 09:48:14 +11:00
|
|
|
|
2013-02-17 05:16:48 +00:00
|
|
|
#ifdef USE_TOTALLOC
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
pool->totalloc = pool->pchunk;
|
2013-02-17 05:16:48 +00:00
|
|
|
#endif
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2013-10-05 04:38:08 +00:00
|
|
|
/* temp alloc so valgrind doesn't complain when setting free'd blocks 'next' */
|
|
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_MEMPOOL_ALLOC(pool, CHUNK_DATA(first), pool->csize);
|
|
|
|
|
#endif
|
2014-04-07 12:34:13 +10:00
|
|
|
|
|
|
|
|
curnode = CHUNK_DATA(first);
|
|
|
|
|
pool->free = curnode;
|
|
|
|
|
|
|
|
|
|
j = pool->pchunk;
|
|
|
|
|
while (j--) {
|
|
|
|
|
curnode->next = NODE_STEP_NEXT(curnode);
|
|
|
|
|
curnode = curnode->next;
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
}
|
2014-04-07 12:34:13 +10:00
|
|
|
curnode = NODE_STEP_PREV(curnode);
|
2012-03-01 23:14:51 +00:00
|
|
|
curnode->next = NULL; /* terminate the list */
|
2013-10-03 12:22:44 +00:00
|
|
|
|
|
|
|
|
#ifdef WITH_MEM_VALGRIND
|
2013-10-05 04:38:08 +00:00
|
|
|
VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
|
2013-10-03 12:22:44 +00:00
|
|
|
#endif
|
2013-10-05 04:38:08 +00:00
|
|
|
}
|
2008-07-04 17:32:55 +00:00
|
|
|
}
|
Added a new notifyer, NC_SPACE_CHANGED, to signal an editor that
replaces another so it can do updates (e.g. dopesheet editor can
sync channel selection).
Also coded a simple optimization for allocating small objects,
based on mempools. It's #ifdef'd out, you can enabled it by
defining OPTIMIZE_SMALL_BLOCKS (e.g. adding -DDOPTIMIZE_SMALL_BLOCKS to
your compiler flags).
We suffer from a great deal of performance loss from the system allocator
(vgroups, ghash, edgehash, the singly-linked list implementation in blenlib,
editmesh, and likely a great many areas I'm forgetting), and this is the
common solution for handling the many-small-objects problem. It's not
really production-ready yet (it's long-term memory consequencers need to
be profiled first, and the implementation tweaked as necassary), but for
people on systems with slow system allocators it's worth trying.
Note that since this creates a guardedalloc<->blenlib link, the build systems
need to be updated accordingly (I've already done this for scons, though I'm
not sure if the player builds).
2010-01-21 03:08:57 +00:00
|
|
|
|
2018-02-15 23:36:11 +11:00
|
|
|
int BLI_mempool_len(BLI_mempool *pool)
|
2012-02-29 15:00:37 +00:00
|
|
|
{
|
2013-08-30 21:32:57 +00:00
|
|
|
return (int)pool->totused;
|
2012-02-29 15:00:37 +00:00
|
|
|
}
|
|
|
|
|
|
2017-10-28 17:48:45 +11:00
|
|
|
void *BLI_mempool_findelem(BLI_mempool *pool, uint index)
|
2011-11-27 01:20:08 +00:00
|
|
|
{
|
2013-01-19 03:16:52 +00:00
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
|
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
if (index < pool->totused) {
|
2011-11-27 21:11:17 +00:00
|
|
|
/* we could have some faster mem chunk stepping code inline */
|
|
|
|
|
BLI_mempool_iter iter;
|
|
|
|
|
void *elem;
|
|
|
|
|
BLI_mempool_iternew(pool, &iter);
|
2012-03-11 23:47:41 +00:00
|
|
|
for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
|
|
|
|
|
/* do nothing */
|
2013-01-19 03:51:17 +00:00
|
|
|
}
|
2011-11-27 21:11:17 +00:00
|
|
|
return elem;
|
2011-11-27 01:20:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-19 03:51:17 +00:00
|
|
|
/**
|
2013-08-04 19:40:50 +00:00
|
|
|
* Fill in \a data with pointers to each element of the mempool,
|
|
|
|
|
* to create lookup table.
|
|
|
|
|
*
|
2013-08-30 22:04:37 +00:00
|
|
|
* \param pool Pool to create a table from.
|
2013-01-19 03:51:17 +00:00
|
|
|
* \param data array of pointers at least the size of 'pool->totused'
|
|
|
|
|
*/
|
2013-08-04 19:40:50 +00:00
|
|
|
void BLI_mempool_as_table(BLI_mempool *pool, void **data)
|
2013-01-19 03:51:17 +00:00
|
|
|
{
|
|
|
|
|
BLI_mempool_iter iter;
|
|
|
|
|
void *elem;
|
|
|
|
|
void **p = data;
|
|
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
|
|
|
|
BLI_mempool_iternew(pool, &iter);
|
2013-08-04 19:40:50 +00:00
|
|
|
while ((elem = BLI_mempool_iterstep(&iter))) {
|
2013-01-19 03:51:17 +00:00
|
|
|
*p++ = elem;
|
|
|
|
|
}
|
2017-10-28 17:48:45 +11:00
|
|
|
BLI_assert((uint)(p - data) == pool->totused);
|
2013-01-19 03:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
2013-01-30 21:17:09 +00:00
|
|
|
/**
|
2013-08-04 19:40:50 +00:00
|
|
|
* A version of #BLI_mempool_as_table that allocates and returns the data.
|
2013-01-30 21:17:09 +00:00
|
|
|
*/
|
2013-08-04 19:40:50 +00:00
|
|
|
void **BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
|
2013-01-30 21:17:09 +00:00
|
|
|
{
|
2013-08-04 19:40:50 +00:00
|
|
|
void **data = MEM_mallocN((size_t)pool->totused * sizeof(void *), allocstr);
|
|
|
|
|
BLI_mempool_as_table(pool, data);
|
|
|
|
|
return data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Fill in \a data with the contents of the mempool.
|
|
|
|
|
*/
|
|
|
|
|
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
|
|
|
|
|
{
|
2017-10-28 17:48:45 +11:00
|
|
|
const uint esize = pool->esize;
|
2013-08-04 19:40:50 +00:00
|
|
|
BLI_mempool_iter iter;
|
|
|
|
|
char *elem, *p = data;
|
2013-08-03 11:35:09 +00:00
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
2013-08-04 19:40:50 +00:00
|
|
|
BLI_mempool_iternew(pool, &iter);
|
|
|
|
|
while ((elem = BLI_mempool_iterstep(&iter))) {
|
2014-04-07 12:34:13 +10:00
|
|
|
memcpy(p, elem, (size_t)esize);
|
|
|
|
|
p = NODE_STEP_NEXT(p);
|
2013-08-03 11:35:09 +00:00
|
|
|
}
|
2017-10-28 17:48:45 +11:00
|
|
|
BLI_assert((uint)(p - (char *)data) == pool->totused * esize);
|
2013-08-04 19:40:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* A version of #BLI_mempool_as_array that allocates and returns the data.
|
|
|
|
|
*/
|
|
|
|
|
void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
|
|
|
|
|
{
|
|
|
|
|
char *data = MEM_mallocN((size_t)(pool->totused * pool->esize), allocstr);
|
|
|
|
|
BLI_mempool_as_array(pool, data);
|
2013-01-30 21:17:09 +00:00
|
|
|
return data;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
2017-11-23 21:12:00 +01:00
|
|
|
* Initialize a new mempool iterator, \a BLI_MEMPOOL_ALLOW_ITER flag must be set.
|
2013-08-30 22:04:37 +00:00
|
|
|
*/
|
2011-11-16 19:31:42 +00:00
|
|
|
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
|
|
|
|
|
{
|
2013-01-19 03:16:52 +00:00
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
iter->pool = pool;
|
2014-04-05 09:48:14 +11:00
|
|
|
iter->curchunk = pool->chunks;
|
2011-11-16 19:31:42 +00:00
|
|
|
iter->curindex = 0;
|
2017-11-23 21:12:00 +01:00
|
|
|
|
|
|
|
|
iter->curchunk_threaded_shared = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Initialize an array of mempool iterators, \a BLI_MEMPOOL_ALLOW_ITER flag must be set.
|
|
|
|
|
*
|
|
|
|
|
* This is used in threaded code, to generate as much iterators as needed (each task should have its own),
|
|
|
|
|
* such that each iterator goes over its own single chunk, and only getting the next chunk to iterate over has to be
|
|
|
|
|
* protected against concurrency (which can be done in a lockless way).
|
|
|
|
|
*
|
|
|
|
|
* To be used when creating a task for each single item in the pool is totally overkill.
|
|
|
|
|
*
|
|
|
|
|
* See BLI_task_parallel_mempool implementation for detailed usage example.
|
|
|
|
|
*/
|
|
|
|
|
BLI_mempool_iter *BLI_mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t num_iter)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
|
|
|
|
|
|
|
|
|
|
BLI_mempool_iter *iter_arr = MEM_mallocN(sizeof(*iter_arr) * num_iter, __func__);
|
|
|
|
|
BLI_mempool_chunk **curchunk_threaded_shared = MEM_mallocN(sizeof(void *), __func__);
|
|
|
|
|
|
|
|
|
|
BLI_mempool_iternew(pool, iter_arr);
|
|
|
|
|
|
|
|
|
|
*curchunk_threaded_shared = iter_arr->curchunk;
|
|
|
|
|
iter_arr->curchunk_threaded_shared = curchunk_threaded_shared;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 1; i < num_iter; i++) {
|
|
|
|
|
iter_arr[i] = iter_arr[0];
|
|
|
|
|
*curchunk_threaded_shared = iter_arr[i].curchunk = (*curchunk_threaded_shared) ? (*curchunk_threaded_shared)->next : NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return iter_arr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(iter_arr->curchunk_threaded_shared != NULL);
|
|
|
|
|
|
|
|
|
|
MEM_freeN(iter_arr->curchunk_threaded_shared);
|
|
|
|
|
MEM_freeN(iter_arr);
|
2011-11-16 19:31:42 +00:00
|
|
|
}
|
|
|
|
|
|
2011-12-29 03:39:59 +00:00
|
|
|
#if 0
|
|
|
|
|
/* unoptimized, more readable */
|
|
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
static void *bli_mempool_iternext(BLI_mempool_iter *iter)
|
|
|
|
|
{
|
|
|
|
|
void *ret = NULL;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2017-11-23 21:12:00 +01:00
|
|
|
if (iter->curchunk == NULL || !iter->pool->totused) {
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2013-08-30 21:32:57 +00:00
|
|
|
ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
iter->curindex++;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2013-08-31 02:12:31 +00:00
|
|
|
if (iter->curindex == iter->pool->pchunk) {
|
2011-11-16 19:31:42 +00:00
|
|
|
iter->curindex = 0;
|
2017-11-23 21:12:00 +01:00
|
|
|
if (iter->curchunk_threaded_shared) {
|
|
|
|
|
while (1) {
|
|
|
|
|
iter->curchunk = *iter->curchunk_threaded_shared;
|
|
|
|
|
if (iter->curchunk == NULL) {
|
2017-11-23 22:43:11 +01:00
|
|
|
return ret;
|
2017-11-23 21:12:00 +01:00
|
|
|
}
|
|
|
|
|
if (atomic_cas_ptr((void **)iter->curchunk_threaded_shared, iter->curchunk, iter->curchunk->next) == iter->curchunk) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
iter->curchunk = iter->curchunk->next;
|
2011-11-16 19:31:42 +00:00
|
|
|
}
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
|
|
|
|
|
{
|
|
|
|
|
BLI_freenode *ret;
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
do {
|
|
|
|
|
ret = bli_mempool_iternext(iter);
|
2012-03-12 23:56:11 +00:00
|
|
|
} while (ret && ret->freeword == FREEWORD);
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-11-16 19:31:42 +00:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-29 03:39:59 +00:00
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
/* optimized version of code above */
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* Step over the iterator, returning the mempool item or NULL.
|
|
|
|
|
*/
|
2011-12-29 03:39:59 +00:00
|
|
|
void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
|
|
|
|
|
{
|
2016-06-02 00:04:51 +10:00
|
|
|
if (UNLIKELY(iter->curchunk == NULL)) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2011-12-29 03:39:59 +00:00
|
|
|
|
2017-10-28 17:48:45 +11:00
|
|
|
const uint esize = iter->pool->esize;
|
2016-06-02 00:04:51 +10:00
|
|
|
BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
|
|
|
|
|
BLI_freenode *ret;
|
2011-12-29 03:39:59 +00:00
|
|
|
do {
|
2016-06-02 00:04:51 +10:00
|
|
|
ret = curnode;
|
|
|
|
|
|
|
|
|
|
if (++iter->curindex != iter->pool->pchunk) {
|
|
|
|
|
curnode = POINTER_OFFSET(curnode, esize);
|
2011-12-29 03:39:59 +00:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
iter->curindex = 0;
|
2017-11-23 21:12:00 +01:00
|
|
|
if (iter->curchunk_threaded_shared) {
|
|
|
|
|
for (iter->curchunk = *iter->curchunk_threaded_shared;
|
|
|
|
|
(iter->curchunk != NULL) &&
|
|
|
|
|
(atomic_cas_ptr((void **)iter->curchunk_threaded_shared, iter->curchunk, iter->curchunk->next) != iter->curchunk);
|
|
|
|
|
iter->curchunk = *iter->curchunk_threaded_shared);
|
|
|
|
|
|
|
|
|
|
if (UNLIKELY(iter->curchunk == NULL)) {
|
|
|
|
|
return (ret->freeword == FREEWORD) ? NULL : ret;
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-12-29 03:39:59 +00:00
|
|
|
iter->curchunk = iter->curchunk->next;
|
2017-11-23 21:12:00 +01:00
|
|
|
if (UNLIKELY(iter->curchunk == NULL)) {
|
2016-06-09 17:53:51 +02:00
|
|
|
return (ret->freeword == FREEWORD) ? NULL : ret;
|
2016-06-02 00:04:51 +10:00
|
|
|
}
|
|
|
|
|
curnode = CHUNK_DATA(iter->curchunk);
|
2011-12-29 03:39:59 +00:00
|
|
|
}
|
|
|
|
|
} while (ret->freeword == FREEWORD);
|
2012-03-11 23:47:41 +00:00
|
|
|
|
2011-12-29 03:39:59 +00:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* Empty the pool, as if it were just created.
|
|
|
|
|
*
|
|
|
|
|
* \param pool The pool to clear.
|
|
|
|
|
* \param totelem_reserve Optionally reserve how many items should be kept from clearing.
|
|
|
|
|
*/
|
2013-08-25 16:16:38 +00:00
|
|
|
void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
|
2013-08-25 14:58:26 +00:00
|
|
|
{
|
2013-08-25 16:16:38 +00:00
|
|
|
BLI_mempool_chunk *mpchunk;
|
|
|
|
|
BLI_mempool_chunk *mpchunk_next;
|
2017-10-28 17:48:45 +11:00
|
|
|
uint maxchunks;
|
2013-08-25 16:16:38 +00:00
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
BLI_mempool_chunk *chunks_temp;
|
2014-04-10 10:39:40 +10:00
|
|
|
BLI_freenode *lasttail = NULL;
|
2013-08-25 16:16:38 +00:00
|
|
|
|
2013-10-05 04:59:43 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_DESTROY_MEMPOOL(pool);
|
|
|
|
|
VALGRIND_CREATE_MEMPOOL(pool, 0, false);
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
if (totelem_reserve == -1) {
|
|
|
|
|
maxchunks = pool->maxchunks;
|
|
|
|
|
}
|
|
|
|
|
else {
|
2017-10-28 17:48:45 +11:00
|
|
|
maxchunks = mempool_maxchunks((uint)totelem_reserve, pool->pchunk);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
2013-08-25 14:58:26 +00:00
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
/* free all after pool->maxchunks */
|
2014-04-05 09:48:14 +11:00
|
|
|
mpchunk = mempool_chunk_find(pool->chunks, maxchunks - 1);
|
|
|
|
|
if (mpchunk && mpchunk->next) {
|
|
|
|
|
/* terminate */
|
2013-08-25 16:16:38 +00:00
|
|
|
mpchunk_next = mpchunk->next;
|
2014-04-05 09:48:14 +11:00
|
|
|
mpchunk->next = NULL;
|
|
|
|
|
mpchunk = mpchunk_next;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
mpchunk_next = mpchunk->next;
|
2014-04-05 10:57:32 +11:00
|
|
|
mempool_chunk_free(mpchunk);
|
2014-04-05 09:48:14 +11:00
|
|
|
} while ((mpchunk = mpchunk_next));
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
2013-08-25 14:58:26 +00:00
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
/* re-initialize */
|
|
|
|
|
pool->free = NULL;
|
2013-08-25 14:58:26 +00:00
|
|
|
pool->totused = 0;
|
|
|
|
|
#ifdef USE_TOTALLOC
|
|
|
|
|
pool->totalloc = 0;
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-08-25 16:16:38 +00:00
|
|
|
chunks_temp = pool->chunks;
|
2014-04-05 09:48:14 +11:00
|
|
|
pool->chunks = NULL;
|
2014-04-07 11:45:36 +10:00
|
|
|
pool->chunk_tail = NULL;
|
2013-08-25 16:16:38 +00:00
|
|
|
|
2014-04-05 09:48:14 +11:00
|
|
|
while ((mpchunk = chunks_temp)) {
|
|
|
|
|
chunks_temp = mpchunk->next;
|
2014-04-10 10:39:40 +10:00
|
|
|
lasttail = mempool_chunk_add(pool, mpchunk, lasttail);
|
2013-08-25 16:16:38 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-30 22:04:37 +00:00
|
|
|
/**
|
|
|
|
|
* Wrap #BLI_mempool_clear_ex with no reserve set.
|
|
|
|
|
*/
|
2013-08-25 16:16:38 +00:00
|
|
|
void BLI_mempool_clear(BLI_mempool *pool)
|
|
|
|
|
{
|
2013-09-02 17:59:04 +00:00
|
|
|
BLI_mempool_clear_ex(pool, -1);
|
2013-08-25 14:58:26 +00:00
|
|
|
}
|
|
|
|
|
|
2013-01-30 21:17:09 +00:00
|
|
|
/**
|
|
|
|
|
* Free the mempool its self (and all elements).
|
|
|
|
|
*/
|
2008-07-04 17:32:55 +00:00
|
|
|
void BLI_mempool_destroy(BLI_mempool *pool)
|
|
|
|
|
{
|
2014-04-05 10:57:32 +11:00
|
|
|
mempool_chunk_free_all(pool->chunks);
|
2011-11-16 16:50:30 +00:00
|
|
|
|
2013-10-03 12:22:44 +00:00
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
|
|
|
VALGRIND_DESTROY_MEMPOOL(pool);
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-04-05 10:57:32 +11:00
|
|
|
MEM_freeN(pool);
|
2008-07-08 02:22:37 +00:00
|
|
|
}
|
2013-10-03 14:44:33 +00:00
|
|
|
|
2013-10-07 07:39:58 +00:00
|
|
|
#ifndef NDEBUG
|
2013-10-03 14:44:33 +00:00
|
|
|
void BLI_mempool_set_memory_debug(void)
|
|
|
|
|
{
|
|
|
|
|
mempool_debug_memset = true;
|
|
|
|
|
}
|
|
|
|
|
#endif
|