| 
									
										
										
										
											2016-09-13 02:41:43 -04:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * This program is free software; you can redistribute it and/or | 
					
						
							|  |  |  |  * modify it under the terms of the GNU General Public License | 
					
						
							|  |  |  |  * as published by the Free Software Foundation; either version 2 | 
					
						
							| 
									
										
										
										
											2018-04-22 08:44:23 +02:00
										 |  |  |  * of the License, or (at your option) any later version. | 
					
						
							| 
									
										
										
										
											2016-09-13 02:41:43 -04:00
										 |  |  |  * | 
					
						
							|  |  |  |  * This program is distributed in the hope that it will be useful, | 
					
						
							|  |  |  |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
					
						
							|  |  |  |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
					
						
							|  |  |  |  * GNU General Public License for more details. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * You should have received a copy of the GNU General Public License | 
					
						
							|  |  |  |  * along with this program; if not, write to the Free Software Foundation, | 
					
						
							|  |  |  |  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  |  * The Original Code is Copyright (C) 2016 by Mike Erwin. | 
					
						
							| 
									
										
										
										
											2016-09-13 02:41:43 -04:00
										 |  |  |  * All rights reserved. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-18 08:08:12 +11:00
										 |  |  | /** \file
 | 
					
						
							|  |  |  |  * \ingroup gpu | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  |  * GPU geometry batch | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  |  * Contains VAOs + VBOs + Shader representing a drawable entity. | 
					
						
							| 
									
										
										
										
											2016-09-13 02:41:43 -04:00
										 |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  | #ifndef __GPU_BATCH_H__
 | 
					
						
							|  |  |  | #define __GPU_BATCH_H__
 | 
					
						
							| 
									
										
										
										
											2016-09-13 02:41:43 -04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | #include "GPU_element.h"
 | 
					
						
							|  |  |  | #include "GPU_shader.h"
 | 
					
						
							| 
									
										
										
										
											2020-03-19 09:33:03 +01:00
										 |  |  | #include "GPU_shader_interface.h"
 | 
					
						
							|  |  |  | #include "GPU_vertex_buffer.h"
 | 
					
						
							| 
									
										
										
										
											2017-03-16 23:32:35 -04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-02 15:28:47 +01:00
										 |  |  | #ifdef __cplusplus
 | 
					
						
							|  |  |  | extern "C" { | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | typedef enum { | 
					
						
							| 
									
										
										
										
											2019-05-13 17:27:35 +02:00
										 |  |  |   GPU_BATCH_UNUSED, | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   GPU_BATCH_READY_TO_FORMAT, | 
					
						
							|  |  |  |   GPU_BATCH_READY_TO_BUILD, | 
					
						
							|  |  |  |   GPU_BATCH_BUILDING, | 
					
						
							|  |  |  |   GPU_BATCH_READY_TO_DRAW, | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  | } GPUBatchPhase; | 
					
						
							| 
									
										
										
										
											2017-03-16 23:32:35 -04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-14 22:43:44 +02:00
										 |  |  | #define GPU_BATCH_VBO_MAX_LEN 6
 | 
					
						
							| 
									
										
										
										
											2019-12-02 01:40:58 +01:00
										 |  |  | #define GPU_BATCH_INST_VBO_MAX_LEN 2
 | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  | #define GPU_BATCH_VAO_STATIC_LEN 3
 | 
					
						
							|  |  |  | #define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
 | 
					
						
							| 
									
										
										
										
											2018-01-16 00:06:39 +11:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  | typedef struct GPUBatch { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* geometry */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /** verts[0] is required, others can be NULL */ | 
					
						
							|  |  |  |   GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN]; | 
					
						
							|  |  |  |   /** Instance attributes. */ | 
					
						
							| 
									
										
										
										
											2019-12-02 01:40:58 +01:00
										 |  |  |   GPUVertBuf *inst[GPU_BATCH_INST_VBO_MAX_LEN]; | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /** NULL if element list not needed */ | 
					
						
							|  |  |  |   GPUIndexBuf *elem; | 
					
						
							|  |  |  |   uint32_t gl_prim_type; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* cached values (avoid dereferencing later) */ | 
					
						
							|  |  |  |   uint32_t vao_id; | 
					
						
							|  |  |  |   uint32_t program; | 
					
						
							|  |  |  |   const struct GPUShaderInterface *interface; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* book-keeping */ | 
					
						
							|  |  |  |   uint owns_flag; | 
					
						
							|  |  |  |   /** used to free all vaos. this implies all vaos were created under the same context. */ | 
					
						
							|  |  |  |   struct GPUContext *context; | 
					
						
							|  |  |  |   GPUBatchPhase phase; | 
					
						
							|  |  |  |   bool program_in_use; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
 | 
					
						
							|  |  |  |    * for each shader interface. Start with a static number of vaos and fallback to dynamic count | 
					
						
							|  |  |  |    * if necessary. Once a batch goes dynamic it does not go back. */ | 
					
						
							|  |  |  |   bool is_dynamic_vao_count; | 
					
						
							|  |  |  |   union { | 
					
						
							|  |  |  |     /** Static handle count */ | 
					
						
							|  |  |  |     struct { | 
					
						
							|  |  |  |       const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN]; | 
					
						
							|  |  |  |       uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN]; | 
					
						
							|  |  |  |     } static_vaos; | 
					
						
							|  |  |  |     /** Dynamic handle count */ | 
					
						
							|  |  |  |     struct { | 
					
						
							|  |  |  |       uint count; | 
					
						
							|  |  |  |       const struct GPUShaderInterface **interfaces; | 
					
						
							|  |  |  |       uint32_t *vao_ids; | 
					
						
							|  |  |  |     } dynamic_vaos; | 
					
						
							|  |  |  |   }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* XXX This is the only solution if we want to have some data structure using
 | 
					
						
							|  |  |  |    * batches as key to identify nodes. We must destroy these nodes with this callback. */ | 
					
						
							|  |  |  |   void (*free_callback)(struct GPUBatch *, void *); | 
					
						
							|  |  |  |   void *callback_data; | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  | } GPUBatch; | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | enum { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   GPU_BATCH_OWNS_VBO = (1 << 0), | 
					
						
							|  |  |  |   /* each vbo index gets bit-shifted */ | 
					
						
							|  |  |  |   GPU_BATCH_OWNS_INSTANCES = (1 << 30), | 
					
						
							|  |  |  |   GPU_BATCH_OWNS_INDEX = (1u << 31u), | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag); | 
					
						
							|  |  |  | void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag); | 
					
						
							| 
									
										
										
										
											2018-12-18 13:08:08 +01:00
										 |  |  | void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | #define GPU_batch_create(prim, verts, elem) GPU_batch_create_ex(prim, verts, elem, 0)
 | 
					
						
							|  |  |  | #define GPU_batch_init(batch, prim, verts, elem) GPU_batch_init_ex(batch, prim, verts, elem, 0)
 | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-14 23:29:46 +10:00
										 |  |  | /* Same as discard but does not free. (does not call free callback). */ | 
					
						
							|  |  |  | void GPU_batch_clear(GPUBatch *); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | void GPU_batch_discard(GPUBatch *); /* verts & elem are not discarded */ | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | void GPU_batch_vao_cache_clear(GPUBatch *); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | void GPU_batch_callback_free_set(GPUBatch *, void (*callback)(GPUBatch *, void *), void *); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */ | 
					
						
							| 
									
										
										
										
											2019-07-14 16:49:44 +02:00
										 |  |  | void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-02 01:40:58 +01:00
										 |  |  | int GPU_batch_instbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo); | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | #define GPU_batch_vertbuf_add(batch, verts) GPU_batch_vertbuf_add_ex(batch, verts, false)
 | 
					
						
							| 
									
										
										
										
											2016-09-15 18:41:28 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | void GPU_batch_program_set_no_use(GPUBatch *, uint32_t program, const GPUShaderInterface *); | 
					
						
							|  |  |  | void GPU_batch_program_set(GPUBatch *, uint32_t program, const GPUShaderInterface *); | 
					
						
							| 
									
										
										
										
											2019-02-06 10:33:14 +11:00
										 |  |  | void GPU_batch_program_set_shader(GPUBatch *, GPUShader *shader); | 
					
						
							| 
									
										
										
										
											2020-02-11 15:18:55 +01:00
										 |  |  | void GPU_batch_program_set_imm_shader(GPUBatch *batch); | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id); | 
					
						
							|  |  |  | void GPU_batch_program_set_builtin_with_config(GPUBatch *batch, | 
					
						
							|  |  |  |                                                eGPUBuiltinShader shader_id, | 
					
						
							|  |  |  |                                                eGPUShaderConfig sh_cfg); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | /* Entire batch draws with one shader program, but can be redrawn later with another program. */ | 
					
						
							|  |  |  | /* Vertex shader's inputs must be compatible with the batch's vertex format. */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | void GPU_batch_program_use_begin(GPUBatch *); /* call before Batch_Uniform (temp hack?) */ | 
					
						
							|  |  |  | void GPU_batch_program_use_end(GPUBatch *); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-20 15:41:39 -03:00
										 |  |  | void GPU_batch_uniform_1ui(GPUBatch *, const char *name, uint value); | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | void GPU_batch_uniform_1i(GPUBatch *, const char *name, int value); | 
					
						
							|  |  |  | void GPU_batch_uniform_1b(GPUBatch *, const char *name, bool value); | 
					
						
							|  |  |  | void GPU_batch_uniform_1f(GPUBatch *, const char *name, float value); | 
					
						
							|  |  |  | void GPU_batch_uniform_2f(GPUBatch *, const char *name, float x, float y); | 
					
						
							|  |  |  | void GPU_batch_uniform_3f(GPUBatch *, const char *name, float x, float y, float z); | 
					
						
							|  |  |  | void GPU_batch_uniform_4f(GPUBatch *, const char *name, float x, float y, float z, float w); | 
					
						
							|  |  |  | void GPU_batch_uniform_2fv(GPUBatch *, const char *name, const float data[2]); | 
					
						
							|  |  |  | void GPU_batch_uniform_3fv(GPUBatch *, const char *name, const float data[3]); | 
					
						
							|  |  |  | void GPU_batch_uniform_4fv(GPUBatch *, const char *name, const float data[4]); | 
					
						
							| 
									
										
										
										
											2019-05-27 08:22:38 -06:00
										 |  |  | void GPU_batch_uniform_2fv_array(GPUBatch *, const char *name, const int len, const float *data); | 
					
						
							|  |  |  | void GPU_batch_uniform_4fv_array(GPUBatch *, const char *name, const int len, const float *data); | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | void GPU_batch_uniform_mat4(GPUBatch *, const char *name, const float data[4][4]); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void GPU_batch_draw(GPUBatch *); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-22 11:31:49 +02:00
										 |  |  | /* Needs to be called before GPU_batch_draw_advanced. */ | 
					
						
							|  |  |  | void GPU_batch_bind(GPUBatch *); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | /* This does not bind/unbind shader and does not call GPU_matrix_bind() */ | 
					
						
							| 
									
										
										
										
											2019-05-22 11:31:49 +02:00
										 |  |  | void GPU_batch_draw_advanced(GPUBatch *, int v_first, int v_count, int i_first, int i_count); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* Does not even need batch */ | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  | void GPU_draw_primitive(GPUPrimType, int v_count); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | #if 0 /* future plans */
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  | /* Can multiple batches share a GPUVertBuf? Use ref count? */ | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* We often need a batch with its own data, to be created and discarded together. */ | 
					
						
							|  |  |  | /* WithOwn variants reduce number of system allocations. */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | typedef struct BatchWithOwnVertexBuffer { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   GPUBatch batch; | 
					
						
							|  |  |  |   GPUVertBuf verts; /* link batch.verts to this */ | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | } BatchWithOwnVertexBuffer; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | typedef struct BatchWithOwnElementList { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   GPUBatch batch; | 
					
						
							|  |  |  |   GPUIndexBuf elem; /* link batch.elem to this */ | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | } BatchWithOwnElementList; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | typedef struct BatchWithOwnVertexBufferAndElementList { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   GPUBatch batch; | 
					
						
							|  |  |  |   GPUIndexBuf elem; /* link batch.elem to this */ | 
					
						
							|  |  |  |   GPUVertBuf verts; /* link batch.verts to this */ | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | } BatchWithOwnVertexBufferAndElementList; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *); | 
					
						
							|  |  |  | GPUBatch *create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf *, uint prim_len); | 
					
						
							| 
									
										
										
										
											2019-04-17 08:24:14 +02:00
										 |  |  | GPUBatch *create_BatchWithOwnVertexBufferAndElementList(GPUPrimType, | 
					
						
							|  |  |  |                                                         GPUVertFormat *, | 
					
						
							|  |  |  |                                                         uint v_len, | 
					
						
							|  |  |  |                                                         uint prim_len); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | /* verts: shared, own */ | 
					
						
							|  |  |  | /* elem: none, shared, own */ | 
					
						
							| 
									
										
										
										
											2018-07-18 23:09:31 +10:00
										 |  |  | GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff); | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | #endif /* future plans */
 | 
					
						
							| 
									
										
										
										
											2017-02-08 00:38:07 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-10 10:25:46 +11:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * #GPUDrawList is an API to do lots of similar draw-calls very fast using multi-draw-indirect. | 
					
						
							|  |  |  |  * There is a fallback if the feature is not supported. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2019-05-31 01:45:41 +02:00
										 |  |  | typedef struct GPUDrawList GPUDrawList; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | GPUDrawList *GPU_draw_list_create(int length); | 
					
						
							|  |  |  | void GPU_draw_list_discard(GPUDrawList *list); | 
					
						
							|  |  |  | void GPU_draw_list_init(GPUDrawList *list, GPUBatch *batch); | 
					
						
							|  |  |  | void GPU_draw_list_command_add( | 
					
						
							|  |  |  |     GPUDrawList *list, int v_first, int v_count, int i_first, int i_count); | 
					
						
							|  |  |  | void GPU_draw_list_submit(GPUDrawList *list); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-15 16:21:23 +11:00
										 |  |  | void gpu_batch_init(void); | 
					
						
							|  |  |  | void gpu_batch_exit(void); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-17 21:11:23 +02:00
										 |  |  | /* Macros */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | #define GPU_BATCH_DISCARD_SAFE(batch) \
 | 
					
						
							|  |  |  |   do { \ | 
					
						
							|  |  |  |     if (batch != NULL) { \ | 
					
						
							|  |  |  |       GPU_batch_discard(batch); \ | 
					
						
							|  |  |  |       batch = NULL; \ | 
					
						
							|  |  |  |     } \ | 
					
						
							|  |  |  |   } while (0) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define GPU_BATCH_CLEAR_SAFE(batch) \
 | 
					
						
							|  |  |  |   do { \ | 
					
						
							|  |  |  |     if (batch != NULL) { \ | 
					
						
							|  |  |  |       GPU_batch_clear(batch); \ | 
					
						
							|  |  |  |       memset(batch, 0, sizeof(*(batch))); \ | 
					
						
							|  |  |  |     } \ | 
					
						
							|  |  |  |   } while (0) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) \
 | 
					
						
							|  |  |  |   do { \ | 
					
						
							|  |  |  |     if (_batch_array != NULL) { \ | 
					
						
							|  |  |  |       BLI_assert(_len > 0); \ | 
					
						
							|  |  |  |       for (int _i = 0; _i < _len; _i++) { \ | 
					
						
							|  |  |  |         GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \ | 
					
						
							|  |  |  |       } \ | 
					
						
							|  |  |  |       MEM_freeN(_batch_array); \ | 
					
						
							|  |  |  |     } \ | 
					
						
							|  |  |  |   } while (0) | 
					
						
							| 
									
										
										
										
											2018-08-30 17:47:36 -03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-02 15:28:47 +01:00
										 |  |  | #ifdef __cplusplus
 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-18 00:12:21 +02:00
										 |  |  | #endif /* __GPU_BATCH_H__ */
 |