ClangFormat: apply to source, most of intern

Apply clang format as proposed in T53211.

For details on usage and instructions for migrating branches
without conflicts, see:

https://wiki.blender.org/wiki/Tools/ClangFormat
This commit is contained in:
2019-04-17 06:17:24 +02:00
parent b3dabc200a
commit e12c08e8d1
4481 changed files with 1230080 additions and 1155401 deletions

View File

@@ -16,42 +16,42 @@
# ***** END GPL LICENSE BLOCK *****
set(INC
.
../../blenkernel
../../blenlib
../../gpu
../../makesdna
../../../../intern/guardedalloc
../../../../intern/glew-mx
.
../../blenkernel
../../blenlib
../../gpu
../../makesdna
../../../../intern/guardedalloc
../../../../intern/glew-mx
)
set(INC_SYS
${GLEW_INCLUDE_PATH}
${PYTHON_INCLUDE_DIRS}
${GLEW_INCLUDE_PATH}
${PYTHON_INCLUDE_DIRS}
)
set(SRC
gpu_py_api.c
gpu_py_batch.c
gpu_py_element.c
gpu_py_matrix.c
gpu_py_offscreen.c
gpu_py_select.c
gpu_py_shader.c
gpu_py_types.c
gpu_py_vertex_buffer.c
gpu_py_vertex_format.c
gpu_py_api.c
gpu_py_batch.c
gpu_py_element.c
gpu_py_matrix.c
gpu_py_offscreen.c
gpu_py_select.c
gpu_py_shader.c
gpu_py_types.c
gpu_py_vertex_buffer.c
gpu_py_vertex_format.c
gpu_py_api.h
gpu_py_batch.h
gpu_py_element.h
gpu_py_matrix.h
gpu_py_offscreen.h
gpu_py_select.h
gpu_py_shader.h
gpu_py_types.h
gpu_py_vertex_buffer.h
gpu_py_vertex_format.h
gpu_py_api.h
gpu_py_batch.h
gpu_py_element.h
gpu_py_matrix.h
gpu_py_offscreen.h
gpu_py_select.h
gpu_py_shader.h
gpu_py_types.h
gpu_py_vertex_buffer.h
gpu_py_vertex_format.h
)
set(LIB

View File

@@ -39,120 +39,111 @@
#include "gpu_py_api.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name Utils to invalidate functions
* \{ */
bool bpygpu_is_initialized_or_error(void)
{
if (!GPU_is_initialized()) {
PyErr_SetString(
PyExc_SystemError,
"GPU functions for drawing are not available in background mode");
if (!GPU_is_initialized()) {
PyErr_SetString(PyExc_SystemError,
"GPU functions for drawing are not available in background mode");
return false;
}
return false;
}
return true;
return true;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Primitive Type Utils
* \{ */
int bpygpu_ParsePrimType(PyObject *o, void *p)
{
Py_ssize_t mode_id_len;
const char *mode_id = _PyUnicode_AsStringAndSize(o, &mode_id_len);
if (mode_id == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
Py_ssize_t mode_id_len;
const char *mode_id = _PyUnicode_AsStringAndSize(o, &mode_id_len);
if (mode_id == NULL) {
PyErr_Format(PyExc_ValueError, "expected a string, got %s", Py_TYPE(o)->tp_name);
return 0;
}
#define MATCH_ID(id) \
if (mode_id_len == strlen(STRINGIFY(id))) { \
if (STREQ(mode_id, STRINGIFY(id))) { \
mode = GPU_PRIM_##id; \
goto success; \
} \
} ((void)0)
if (mode_id_len == strlen(STRINGIFY(id))) { \
if (STREQ(mode_id, STRINGIFY(id))) { \
mode = GPU_PRIM_##id; \
goto success; \
} \
} \
((void)0)
GPUPrimType mode;
MATCH_ID(POINTS);
MATCH_ID(LINES);
MATCH_ID(TRIS);
MATCH_ID(LINE_STRIP);
MATCH_ID(LINE_LOOP);
MATCH_ID(TRI_STRIP);
MATCH_ID(TRI_FAN);
MATCH_ID(LINE_STRIP_ADJ);
GPUPrimType mode;
MATCH_ID(POINTS);
MATCH_ID(LINES);
MATCH_ID(TRIS);
MATCH_ID(LINE_STRIP);
MATCH_ID(LINE_LOOP);
MATCH_ID(TRI_STRIP);
MATCH_ID(TRI_FAN);
MATCH_ID(LINE_STRIP_ADJ);
#undef MATCH_ID
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
mode_id);
return 0;
PyErr_Format(PyExc_ValueError, "unknown type literal: '%s'", mode_id);
return 0;
success:
(*(GPUPrimType *)p) = mode;
return 1;
(*(GPUPrimType *)p) = mode;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPU Module
* \{ */
PyDoc_STRVAR(GPU_doc,
"This module provides Python wrappers for the GPU implementation in Blender. "
"Some higher level functions can be found in the `gpu_extras` module. "
"\n\n"
"Submodules:\n"
"\n"
".. toctree::\n"
" :maxdepth: 1\n"
"\n"
" gpu.types.rst\n"
" gpu.shader.rst\n"
" gpu.matrix.rst\n"
" gpu.select.rst\n"
"\n"
);
"This module provides Python wrappers for the GPU implementation in Blender. "
"Some higher level functions can be found in the `gpu_extras` module. "
"\n\n"
"Submodules:\n"
"\n"
".. toctree::\n"
" :maxdepth: 1\n"
"\n"
" gpu.types.rst\n"
" gpu.shader.rst\n"
" gpu.matrix.rst\n"
" gpu.select.rst\n"
"\n");
static struct PyModuleDef GPU_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu",
.m_doc = GPU_doc,
PyModuleDef_HEAD_INIT,
.m_name = "gpu",
.m_doc = GPU_doc,
};
PyObject *BPyInit_gpu(void)
{
PyObject *sys_modules = PyImport_GetModuleDict();
PyObject *submodule;
PyObject *mod;
PyObject *sys_modules = PyImport_GetModuleDict();
PyObject *submodule;
PyObject *mod;
mod = PyModule_Create(&GPU_module_def);
mod = PyModule_Create(&GPU_module_def);
PyModule_AddObject(mod, "types", (submodule = BPyInit_gpu_types()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "types", (submodule = BPyInit_gpu_types()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "matrix", (submodule = BPyInit_gpu_matrix()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "matrix", (submodule = BPyInit_gpu_matrix()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "select", (submodule = BPyInit_gpu_select()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "select", (submodule = BPyInit_gpu_select()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "shader", (submodule = BPyInit_gpu_shader()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "shader", (submodule = BPyInit_gpu_shader()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
return mod;
return mod;
}
/** \} */

View File

@@ -21,13 +21,20 @@
#ifndef __GPU_PY_API_H__
#define __GPU_PY_API_H__
int bpygpu_ParsePrimType(PyObject *o, void *p);
PyObject *BPyInit_gpu(void);
bool bpygpu_is_initialized_or_error(void);
#define BPYGPU_IS_INIT_OR_ERROR_OBJ if (UNLIKELY(!bpygpu_is_initialized_or_error())) { return NULL; } ((void)0)
#define BPYGPU_IS_INIT_OR_ERROR_INT if (UNLIKELY(!bpygpu_is_initialized_or_error())) { return -1; } ((void)0)
#define BPYGPU_IS_INIT_OR_ERROR_OBJ \
if (UNLIKELY(!bpygpu_is_initialized_or_error())) { \
return NULL; \
} \
((void)0)
#define BPYGPU_IS_INIT_OR_ERROR_INT \
if (UNLIKELY(!bpygpu_is_initialized_or_error())) { \
return -1; \
} \
((void)0)
#endif /* __GPU_PY_API_H__ */
#endif /* __GPU_PY_API_H__ */

View File

@@ -32,7 +32,6 @@
#include "BLI_utildefines.h"
#include "GPU_batch.h"
#include "../mathutils/mathutils.h"
@@ -45,85 +44,81 @@
#include "gpu_py_element.h"
#include "gpu_py_batch.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name Utility Functions
* \{ */
static bool bpygpu_batch_is_program_or_error(BPyGPUBatch *self)
{
if (!glIsProgram(self->batch->program)) {
PyErr_SetString(
PyExc_RuntimeError,
"batch does not have any program assigned to it");
return false;
}
return true;
if (!glIsProgram(self->batch->program)) {
PyErr_SetString(PyExc_RuntimeError, "batch does not have any program assigned to it");
return false;
}
return true;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUBatch Type
* \{ */
static PyObject *bpygpu_Batch_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
BPYGPU_IS_INIT_OR_ERROR_OBJ;
const char *exc_str_missing_arg = "GPUBatch.__new__() missing required argument '%s' (pos %d)";
const char *exc_str_missing_arg = "GPUBatch.__new__() missing required argument '%s' (pos %d)";
struct {
GPUPrimType type_id;
BPyGPUVertBuf *py_vertbuf;
BPyGPUIndexBuf *py_indexbuf;
} params = {GPU_PRIM_NONE, NULL, NULL};
struct {
GPUPrimType type_id;
BPyGPUVertBuf *py_vertbuf;
BPyGPUIndexBuf *py_indexbuf;
} params = {GPU_PRIM_NONE, NULL, NULL};
static const char *_keywords[] = {"type", "buf", "elem", NULL};
static _PyArg_Parser _parser = {"|$O&O!O!:GPUBatch.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
bpygpu_ParsePrimType, &params.type_id,
&BPyGPUVertBuf_Type, &params.py_vertbuf,
&BPyGPUIndexBuf_Type, &params.py_indexbuf))
{
return NULL;
}
static const char *_keywords[] = {"type", "buf", "elem", NULL};
static _PyArg_Parser _parser = {"|$O&O!O!:GPUBatch.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
bpygpu_ParsePrimType,
&params.type_id,
&BPyGPUVertBuf_Type,
&params.py_vertbuf,
&BPyGPUIndexBuf_Type,
&params.py_indexbuf)) {
return NULL;
}
if (params.type_id == GPU_PRIM_NONE) {
PyErr_Format(PyExc_TypeError,
exc_str_missing_arg, _keywords[0], 1);
return NULL;
}
if (params.type_id == GPU_PRIM_NONE) {
PyErr_Format(PyExc_TypeError, exc_str_missing_arg, _keywords[0], 1);
return NULL;
}
if (params.py_vertbuf == NULL) {
PyErr_Format(PyExc_TypeError,
exc_str_missing_arg, _keywords[1], 2);
return NULL;
}
if (params.py_vertbuf == NULL) {
PyErr_Format(PyExc_TypeError, exc_str_missing_arg, _keywords[1], 2);
return NULL;
}
GPUBatch *batch = GPU_batch_create(
params.type_id,
params.py_vertbuf->buf,
params.py_indexbuf ? params.py_indexbuf->elem : NULL);
GPUBatch *batch = GPU_batch_create(params.type_id,
params.py_vertbuf->buf,
params.py_indexbuf ? params.py_indexbuf->elem : NULL);
BPyGPUBatch *ret = (BPyGPUBatch *)BPyGPUBatch_CreatePyObject(batch);
BPyGPUBatch *ret = (BPyGPUBatch *)BPyGPUBatch_CreatePyObject(batch);
#ifdef USE_GPU_PY_REFERENCES
ret->references = PyList_New(params.py_indexbuf ? 2 : 1);
PyList_SET_ITEM(ret->references, 0, (PyObject *)params.py_vertbuf);
Py_INCREF(params.py_vertbuf);
ret->references = PyList_New(params.py_indexbuf ? 2 : 1);
PyList_SET_ITEM(ret->references, 0, (PyObject *)params.py_vertbuf);
Py_INCREF(params.py_vertbuf);
if (params.py_indexbuf != NULL) {
PyList_SET_ITEM(ret->references, 1, (PyObject *)params.py_indexbuf);
Py_INCREF(params.py_indexbuf);
}
if (params.py_indexbuf != NULL) {
PyList_SET_ITEM(ret->references, 1, (PyObject *)params.py_indexbuf);
Py_INCREF(params.py_indexbuf);
}
PyObject_GC_Track(ret);
PyObject_GC_Track(ret);
#endif
return (PyObject *)ret;
return (PyObject *)ret;
}
PyDoc_STRVAR(bpygpu_Batch_vertbuf_add_doc,
@@ -140,240 +135,224 @@ PyDoc_STRVAR(bpygpu_Batch_vertbuf_add_doc,
);
static PyObject *bpygpu_Batch_vertbuf_add(BPyGPUBatch *self, BPyGPUVertBuf *py_buf)
{
if (!BPyGPUVertBuf_Check(py_buf)) {
PyErr_Format(PyExc_TypeError,
"Expected a GPUVertBuf, got %s",
Py_TYPE(py_buf)->tp_name);
return NULL;
}
if (!BPyGPUVertBuf_Check(py_buf)) {
PyErr_Format(PyExc_TypeError, "Expected a GPUVertBuf, got %s", Py_TYPE(py_buf)->tp_name);
return NULL;
}
if (self->batch->verts[0]->vertex_len != py_buf->buf->vertex_len) {
PyErr_Format(PyExc_TypeError,
"Expected %d length, got %d",
self->batch->verts[0]->vertex_len, py_buf->buf->vertex_len);
return NULL;
}
if (self->batch->verts[0]->vertex_len != py_buf->buf->vertex_len) {
PyErr_Format(PyExc_TypeError,
"Expected %d length, got %d",
self->batch->verts[0]->vertex_len,
py_buf->buf->vertex_len);
return NULL;
}
if (self->batch->verts[GPU_BATCH_VBO_MAX_LEN - 1] != NULL) {
PyErr_SetString(
PyExc_RuntimeError,
"Maximum number of vertex buffers exceeded: " STRINGIFY(GPU_BATCH_VBO_MAX_LEN));
return NULL;
}
if (self->batch->verts[GPU_BATCH_VBO_MAX_LEN - 1] != NULL) {
PyErr_SetString(
PyExc_RuntimeError,
"Maximum number of vertex buffers exceeded: " STRINGIFY(GPU_BATCH_VBO_MAX_LEN));
return NULL;
}
#ifdef USE_GPU_PY_REFERENCES
/* Hold user */
PyList_Append(self->references, (PyObject *)py_buf);
/* Hold user */
PyList_Append(self->references, (PyObject *)py_buf);
#endif
GPU_batch_vertbuf_add(self->batch, py_buf->buf);
Py_RETURN_NONE;
GPU_batch_vertbuf_add(self->batch, py_buf->buf);
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_Batch_program_set_doc,
".. method:: program_set(program)\n"
"\n"
" Assign a shader to this batch that will be used for drawing when not overwritten later.\n"
" Note: This method has to be called in the draw context that the batch will be drawn in.\n"
" This function does not need to be called when you always set the shader when calling `batch.draw`.\n"
"\n"
" :param program: The program/shader the batch will use in future draw calls.\n"
" :type program: :class:`gpu.types.GPUShader`\n"
);
PyDoc_STRVAR(
bpygpu_Batch_program_set_doc,
".. method:: program_set(program)\n"
"\n"
" Assign a shader to this batch that will be used for drawing when not overwritten later.\n"
" Note: This method has to be called in the draw context that the batch will be drawn in.\n"
" This function does not need to be called when you always set the shader when calling "
"`batch.draw`.\n"
"\n"
" :param program: The program/shader the batch will use in future draw calls.\n"
" :type program: :class:`gpu.types.GPUShader`\n");
static PyObject *bpygpu_Batch_program_set(BPyGPUBatch *self, BPyGPUShader *py_shader)
{
if (!BPyGPUShader_Check(py_shader)) {
PyErr_Format(PyExc_TypeError,
"Expected a GPUShader, got %s",
Py_TYPE(py_shader)->tp_name);
return NULL;
}
if (!BPyGPUShader_Check(py_shader)) {
PyErr_Format(PyExc_TypeError, "Expected a GPUShader, got %s", Py_TYPE(py_shader)->tp_name);
return NULL;
}
GPUShader *shader = py_shader->shader;
GPU_batch_program_set(
self->batch,
GPU_shader_get_program(shader),
GPU_shader_get_interface(shader));
GPUShader *shader = py_shader->shader;
GPU_batch_program_set(
self->batch, GPU_shader_get_program(shader), GPU_shader_get_interface(shader));
#ifdef USE_GPU_PY_REFERENCES
/* Remove existing user (if any), hold new user. */
int i = PyList_GET_SIZE(self->references);
while (--i != -1) {
PyObject *py_shader_test = PyList_GET_ITEM(self->references, i);
if (BPyGPUShader_Check(py_shader_test)) {
PyList_SET_ITEM(self->references, i, (PyObject *)py_shader);
Py_INCREF(py_shader);
Py_DECREF(py_shader_test);
/* Only ever reference one shader. */
break;
}
}
if (i != -1) {
PyList_Append(self->references, (PyObject *)py_shader);
}
/* Remove existing user (if any), hold new user. */
int i = PyList_GET_SIZE(self->references);
while (--i != -1) {
PyObject *py_shader_test = PyList_GET_ITEM(self->references, i);
if (BPyGPUShader_Check(py_shader_test)) {
PyList_SET_ITEM(self->references, i, (PyObject *)py_shader);
Py_INCREF(py_shader);
Py_DECREF(py_shader_test);
/* Only ever reference one shader. */
break;
}
}
if (i != -1) {
PyList_Append(self->references, (PyObject *)py_shader);
}
#endif
Py_RETURN_NONE;
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_Batch_draw_doc,
".. method:: draw(program=None)\n"
"\n"
" Run the drawing program with the parameters assigned to the batch.\n"
"\n"
" :param program: Program that performs the drawing operations.\n"
" If ``None`` is passed, the last program setted to this batch will run.\n"
" :type program: :class:`gpu.types.GPUShader`\n"
);
".. method:: draw(program=None)\n"
"\n"
" Run the drawing program with the parameters assigned to the batch.\n"
"\n"
" :param program: Program that performs the drawing operations.\n"
" If ``None`` is passed, the last program setted to this batch will run.\n"
" :type program: :class:`gpu.types.GPUShader`\n");
static PyObject *bpygpu_Batch_draw(BPyGPUBatch *self, PyObject *args)
{
BPyGPUShader *py_program = NULL;
BPyGPUShader *py_program = NULL;
if (!PyArg_ParseTuple(
args, "|O!:GPUBatch.draw",
&BPyGPUShader_Type, &py_program))
{
return NULL;
}
else if (py_program == NULL) {
if (!bpygpu_batch_is_program_or_error(self)) {
return NULL;
}
}
else if (self->batch->program != GPU_shader_get_program(py_program->shader)) {
GPU_batch_program_set(
self->batch,
GPU_shader_get_program(py_program->shader),
GPU_shader_get_interface(py_program->shader));
}
if (!PyArg_ParseTuple(args, "|O!:GPUBatch.draw", &BPyGPUShader_Type, &py_program)) {
return NULL;
}
else if (py_program == NULL) {
if (!bpygpu_batch_is_program_or_error(self)) {
return NULL;
}
}
else if (self->batch->program != GPU_shader_get_program(py_program->shader)) {
GPU_batch_program_set(self->batch,
GPU_shader_get_program(py_program->shader),
GPU_shader_get_interface(py_program->shader));
}
GPU_batch_draw(self->batch);
Py_RETURN_NONE;
GPU_batch_draw(self->batch);
Py_RETURN_NONE;
}
static PyObject *bpygpu_Batch_program_use_begin(BPyGPUBatch *self)
{
if (!bpygpu_batch_is_program_or_error(self)) {
return NULL;
}
GPU_batch_program_use_begin(self->batch);
Py_RETURN_NONE;
if (!bpygpu_batch_is_program_or_error(self)) {
return NULL;
}
GPU_batch_program_use_begin(self->batch);
Py_RETURN_NONE;
}
static PyObject *bpygpu_Batch_program_use_end(BPyGPUBatch *self)
{
if (!bpygpu_batch_is_program_or_error(self)) {
return NULL;
}
GPU_batch_program_use_end(self->batch);
Py_RETURN_NONE;
if (!bpygpu_batch_is_program_or_error(self)) {
return NULL;
}
GPU_batch_program_use_end(self->batch);
Py_RETURN_NONE;
}
static struct PyMethodDef bpygpu_Batch_methods[] = {
{"vertbuf_add", (PyCFunction)bpygpu_Batch_vertbuf_add,
METH_O, bpygpu_Batch_vertbuf_add_doc},
{"program_set", (PyCFunction)bpygpu_Batch_program_set,
METH_O, bpygpu_Batch_program_set_doc},
{"draw", (PyCFunction) bpygpu_Batch_draw,
METH_VARARGS, bpygpu_Batch_draw_doc},
{"_program_use_begin", (PyCFunction)bpygpu_Batch_program_use_begin,
METH_NOARGS, ""},
{"_program_use_end", (PyCFunction)bpygpu_Batch_program_use_end,
METH_NOARGS, ""},
{NULL, NULL, 0, NULL},
{"vertbuf_add", (PyCFunction)bpygpu_Batch_vertbuf_add, METH_O, bpygpu_Batch_vertbuf_add_doc},
{"program_set", (PyCFunction)bpygpu_Batch_program_set, METH_O, bpygpu_Batch_program_set_doc},
{"draw", (PyCFunction)bpygpu_Batch_draw, METH_VARARGS, bpygpu_Batch_draw_doc},
{"_program_use_begin", (PyCFunction)bpygpu_Batch_program_use_begin, METH_NOARGS, ""},
{"_program_use_end", (PyCFunction)bpygpu_Batch_program_use_end, METH_NOARGS, ""},
{NULL, NULL, 0, NULL},
};
#ifdef USE_GPU_PY_REFERENCES
static int bpygpu_Batch_traverse(BPyGPUBatch *self, visitproc visit, void *arg)
{
Py_VISIT(self->references);
return 0;
Py_VISIT(self->references);
return 0;
}
static int bpygpu_Batch_clear(BPyGPUBatch *self)
{
Py_CLEAR(self->references);
return 0;
Py_CLEAR(self->references);
return 0;
}
#endif
static void bpygpu_Batch_dealloc(BPyGPUBatch *self)
{
GPU_batch_discard(self->batch);
GPU_batch_discard(self->batch);
#ifdef USE_GPU_PY_REFERENCES
if (self->references) {
PyObject_GC_UnTrack(self);
bpygpu_Batch_clear(self);
Py_XDECREF(self->references);
}
if (self->references) {
PyObject_GC_UnTrack(self);
bpygpu_Batch_clear(self);
Py_XDECREF(self->references);
}
#endif
Py_TYPE(self)->tp_free(self);
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(py_gpu_batch_doc,
".. class:: GPUBatch(type, buf, elem=None)\n"
"\n"
" Reusable container for drawable geometry.\n"
"\n"
" :arg type: One of these primitive types: {\n"
" `POINTS`,\n"
" `LINES`,\n"
" `TRIS`,\n"
" `LINE_STRIP`,\n"
" `LINE_LOOP`,\n"
" `TRI_STRIP`,\n"
" `TRI_FAN`,\n"
" `LINES_ADJ`,\n"
" `TRIS_ADJ`,\n"
" `LINE_STRIP_ADJ` }\n"
" :type type: `str`\n"
" :arg buf: Vertex buffer containing all or some of the attributes required for drawing.\n"
" :type buf: :class:`gpu.types.GPUVertBuf`\n"
" :arg elem: An optional index buffer.\n"
" :type elem: :class:`gpu.types.GPUIndexBuf`\n"
);
PyDoc_STRVAR(
py_gpu_batch_doc,
".. class:: GPUBatch(type, buf, elem=None)\n"
"\n"
" Reusable container for drawable geometry.\n"
"\n"
" :arg type: One of these primitive types: {\n"
" `POINTS`,\n"
" `LINES`,\n"
" `TRIS`,\n"
" `LINE_STRIP`,\n"
" `LINE_LOOP`,\n"
" `TRI_STRIP`,\n"
" `TRI_FAN`,\n"
" `LINES_ADJ`,\n"
" `TRIS_ADJ`,\n"
" `LINE_STRIP_ADJ` }\n"
" :type type: `str`\n"
" :arg buf: Vertex buffer containing all or some of the attributes required for drawing.\n"
" :type buf: :class:`gpu.types.GPUVertBuf`\n"
" :arg elem: An optional index buffer.\n"
" :type elem: :class:`gpu.types.GPUIndexBuf`\n");
PyTypeObject BPyGPUBatch_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUBatch",
.tp_basicsize = sizeof(BPyGPUBatch),
.tp_dealloc = (destructor)bpygpu_Batch_dealloc,
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUBatch",
.tp_basicsize = sizeof(BPyGPUBatch),
.tp_dealloc = (destructor)bpygpu_Batch_dealloc,
#ifdef USE_GPU_PY_REFERENCES
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_doc = py_gpu_batch_doc,
.tp_traverse = (traverseproc)bpygpu_Batch_traverse,
.tp_clear = (inquiry)bpygpu_Batch_clear,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_doc = py_gpu_batch_doc,
.tp_traverse = (traverseproc)bpygpu_Batch_traverse,
.tp_clear = (inquiry)bpygpu_Batch_clear,
#else
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_flags = Py_TPFLAGS_DEFAULT,
#endif
.tp_methods = bpygpu_Batch_methods,
.tp_new = bpygpu_Batch_new,
.tp_methods = bpygpu_Batch_methods,
.tp_new = bpygpu_Batch_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUBatch_CreatePyObject(GPUBatch *batch)
{
BPyGPUBatch *self;
BPyGPUBatch *self;
#ifdef USE_GPU_PY_REFERENCES
self = (BPyGPUBatch *)_PyObject_GC_New(&BPyGPUBatch_Type);
self->references = NULL;
self = (BPyGPUBatch *)_PyObject_GC_New(&BPyGPUBatch_Type);
self->references = NULL;
#else
self = PyObject_New(BPyGPUBatch, &BPyGPUBatch_Type);
self = PyObject_New(BPyGPUBatch, &BPyGPUBatch_Type);
#endif
self->batch = batch;
self->batch = batch;
return (PyObject *)self;
return (PyObject *)self;
}
/** \} */

View File

@@ -27,15 +27,15 @@
extern PyTypeObject BPyGPUBatch_Type;
#define BPyGPUBatch_Check(v) (Py_TYPE(v) == &BPyGPUBatch_Type)
#define BPyGPUBatch_Check(v) (Py_TYPE(v) == &BPyGPUBatch_Type)
typedef struct BPyGPUBatch {
PyObject_VAR_HEAD
/* The batch is owned, we may support thin wrapped batches later. */
struct GPUBatch *batch;
PyObject_VAR_HEAD
/* The batch is owned, we may support thin wrapped batches later. */
struct GPUBatch *batch;
#ifdef USE_GPU_PY_REFERENCES
/* Just to keep a user to prevent freeing buf's we're using */
PyObject *references;
/* Just to keep a user to prevent freeing buf's we're using */
PyObject *references;
#endif
} BPyGPUBatch;

View File

@@ -35,204 +35,190 @@
#include "gpu_py_api.h"
#include "gpu_py_element.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name IndexBuf Type
* \{ */
static PyObject *bpygpu_IndexBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
BPYGPU_IS_INIT_OR_ERROR_OBJ;
const char *error_prefix = "IndexBuf.__new__";
bool ok = true;
const char *error_prefix = "IndexBuf.__new__";
bool ok = true;
struct {
GPUPrimType type_id;
PyObject *seq;
} params;
struct {
GPUPrimType type_id;
PyObject *seq;
} params;
uint verts_per_prim;
uint index_len;
GPUIndexBufBuilder builder;
uint verts_per_prim;
uint index_len;
GPUIndexBufBuilder builder;
static const char *_keywords[] = {"type", "seq", NULL};
static _PyArg_Parser _parser = {"$O&O:IndexBuf.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
bpygpu_ParsePrimType, &params.type_id,
&params.seq))
{
return NULL;
}
static const char *_keywords[] = {"type", "seq", NULL};
static _PyArg_Parser _parser = {"$O&O:IndexBuf.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, bpygpu_ParsePrimType, &params.type_id, &params.seq)) {
return NULL;
}
verts_per_prim = GPU_indexbuf_primitive_len(params.type_id);
if (verts_per_prim == -1) {
PyErr_Format(PyExc_ValueError,
"The argument 'type' must be "
"'POINTS', 'LINES', 'TRIS' or 'LINES_ADJ'");
return NULL;
}
verts_per_prim = GPU_indexbuf_primitive_len(params.type_id);
if (verts_per_prim == -1) {
PyErr_Format(PyExc_ValueError,
"The argument 'type' must be "
"'POINTS', 'LINES', 'TRIS' or 'LINES_ADJ'");
return NULL;
}
if (PyObject_CheckBuffer(params.seq)) {
Py_buffer pybuffer;
if (PyObject_CheckBuffer(params.seq)) {
Py_buffer pybuffer;
if (PyObject_GetBuffer(params.seq, &pybuffer, PyBUF_FORMAT | PyBUF_ND) == -1) {
/* PyObject_GetBuffer already handles error messages. */
return NULL;
}
if (PyObject_GetBuffer(params.seq, &pybuffer, PyBUF_FORMAT | PyBUF_ND) == -1) {
/* PyObject_GetBuffer already handles error messages. */
return NULL;
}
if (pybuffer.ndim != 1 && pybuffer.shape[1] != verts_per_prim) {
PyErr_Format(PyExc_ValueError,
"Each primitive must exactly %d indices",
verts_per_prim);
return NULL;
}
if (pybuffer.ndim != 1 && pybuffer.shape[1] != verts_per_prim) {
PyErr_Format(PyExc_ValueError, "Each primitive must exactly %d indices", verts_per_prim);
return NULL;
}
if (pybuffer.itemsize != 4 ||
PyC_StructFmt_type_is_float_any(PyC_StructFmt_type_from_str(pybuffer.format)))
{
PyErr_Format(PyExc_ValueError,
"Each index must be an 4-bytes integer value");
return NULL;
}
if (pybuffer.itemsize != 4 ||
PyC_StructFmt_type_is_float_any(PyC_StructFmt_type_from_str(pybuffer.format))) {
PyErr_Format(PyExc_ValueError, "Each index must be an 4-bytes integer value");
return NULL;
}
index_len = pybuffer.shape[0];
if (pybuffer.ndim != 1) {
index_len *= pybuffer.shape[1];
}
index_len = pybuffer.shape[0];
if (pybuffer.ndim != 1) {
index_len *= pybuffer.shape[1];
}
/* The `vertex_len` parameter is only used for asserts in the Debug build. */
/* Not very useful in python since scripts are often tested in Release build. */
/* Use `INT_MAX` instead of the actual number of vertices. */
GPU_indexbuf_init(
&builder, params.type_id, index_len, INT_MAX);
/* The `vertex_len` parameter is only used for asserts in the Debug build. */
/* Not very useful in python since scripts are often tested in Release build. */
/* Use `INT_MAX` instead of the actual number of vertices. */
GPU_indexbuf_init(&builder, params.type_id, index_len, INT_MAX);
#if 0
uint *buf = pybuffer.buf;
for (uint i = index_len; i--; buf++) {
GPU_indexbuf_add_generic_vert(&builder, *buf);
}
uint *buf = pybuffer.buf;
for (uint i = index_len; i--; buf++) {
GPU_indexbuf_add_generic_vert(&builder, *buf);
}
#else
memcpy(builder.data, pybuffer.buf, index_len * sizeof(*builder.data));
builder.index_len = index_len;
memcpy(builder.data, pybuffer.buf, index_len * sizeof(*builder.data));
builder.index_len = index_len;
#endif
PyBuffer_Release(&pybuffer);
}
else {
PyObject *seq_fast = PySequence_Fast(params.seq, error_prefix);
PyBuffer_Release(&pybuffer);
}
else {
PyObject *seq_fast = PySequence_Fast(params.seq, error_prefix);
if (seq_fast == NULL) {
return false;
}
if (seq_fast == NULL) {
return false;
}
const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
index_len = seq_len * verts_per_prim;
index_len = seq_len * verts_per_prim;
/* The `vertex_len` parameter is only used for asserts in the Debug build. */
/* Not very useful in python since scripts are often tested in Release build. */
/* Use `INT_MAX` instead of the actual number of vertices. */
GPU_indexbuf_init(
&builder, params.type_id, index_len, INT_MAX);
/* The `vertex_len` parameter is only used for asserts in the Debug build. */
/* Not very useful in python since scripts are often tested in Release build. */
/* Use `INT_MAX` instead of the actual number of vertices. */
GPU_indexbuf_init(&builder, params.type_id, index_len, INT_MAX);
if (verts_per_prim == 1) {
for (uint i = 0; i < seq_len; i++) {
GPU_indexbuf_add_generic_vert(
&builder, PyC_Long_AsU32(seq_items[i]));
}
}
else {
int values[4];
for (uint i = 0; i < seq_len; i++) {
PyObject *seq_fast_item = PySequence_Fast(seq_items[i], error_prefix);
if (seq_fast_item == NULL) {
PyErr_Format(PyExc_TypeError,
"%s: expected a sequence, got %s",
error_prefix, Py_TYPE(seq_items[i])->tp_name);
ok = false;
goto finally;
}
if (verts_per_prim == 1) {
for (uint i = 0; i < seq_len; i++) {
GPU_indexbuf_add_generic_vert(&builder, PyC_Long_AsU32(seq_items[i]));
}
}
else {
int values[4];
for (uint i = 0; i < seq_len; i++) {
PyObject *seq_fast_item = PySequence_Fast(seq_items[i], error_prefix);
if (seq_fast_item == NULL) {
PyErr_Format(PyExc_TypeError,
"%s: expected a sequence, got %s",
error_prefix,
Py_TYPE(seq_items[i])->tp_name);
ok = false;
goto finally;
}
ok = PyC_AsArray_FAST(
values, seq_fast_item, verts_per_prim,
&PyLong_Type, false, error_prefix) == 0;
ok = PyC_AsArray_FAST(
values, seq_fast_item, verts_per_prim, &PyLong_Type, false, error_prefix) == 0;
if (ok) {
for (uint j = 0; j < verts_per_prim; j++) {
GPU_indexbuf_add_generic_vert(&builder, values[j]);
}
}
Py_DECREF(seq_fast_item);
}
}
if (ok) {
for (uint j = 0; j < verts_per_prim; j++) {
GPU_indexbuf_add_generic_vert(&builder, values[j]);
}
}
Py_DECREF(seq_fast_item);
}
}
if (PyErr_Occurred()) {
ok = false;
}
if (PyErr_Occurred()) {
ok = false;
}
finally:
finally:
Py_DECREF(seq_fast);
}
Py_DECREF(seq_fast);
}
if (ok == false) {
MEM_freeN(builder.data);
return NULL;
}
if (ok == false) {
MEM_freeN(builder.data);
return NULL;
}
return BPyGPUIndexBuf_CreatePyObject(GPU_indexbuf_build(&builder));
return BPyGPUIndexBuf_CreatePyObject(GPU_indexbuf_build(&builder));
}
static void bpygpu_IndexBuf_dealloc(BPyGPUIndexBuf *self)
{
GPU_indexbuf_discard(self->elem);
Py_TYPE(self)->tp_free(self);
GPU_indexbuf_discard(self->elem);
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(py_gpu_element_doc,
".. class:: GPUIndexBuf(type, seq)\n"
"\n"
" Contains an index buffer.\n"
"\n"
" :param type: One of these primitive types: {\n"
" `POINTS`,\n"
" `LINES`,\n"
" `TRIS`,\n"
" `LINE_STRIP_ADJ` }\n"
" :type type: `str`\n"
" :param seq: Indices this index buffer will contain.\n"
" Whether a 1D or 2D sequence is required depends on the type.\n"
" Optionally the sequence can support the buffer protocol.\n"
" :type seq: 1D or 2D sequence\n"
);
".. class:: GPUIndexBuf(type, seq)\n"
"\n"
" Contains an index buffer.\n"
"\n"
" :param type: One of these primitive types: {\n"
" `POINTS`,\n"
" `LINES`,\n"
" `TRIS`,\n"
" `LINE_STRIP_ADJ` }\n"
" :type type: `str`\n"
" :param seq: Indices this index buffer will contain.\n"
" Whether a 1D or 2D sequence is required depends on the type.\n"
" Optionally the sequence can support the buffer protocol.\n"
" :type seq: 1D or 2D sequence\n");
PyTypeObject BPyGPUIndexBuf_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUIndexBuf",
.tp_basicsize = sizeof(BPyGPUIndexBuf),
.tp_dealloc = (destructor)bpygpu_IndexBuf_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_gpu_element_doc,
.tp_new = bpygpu_IndexBuf_new,
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUIndexBuf",
.tp_basicsize = sizeof(BPyGPUIndexBuf),
.tp_dealloc = (destructor)bpygpu_IndexBuf_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_gpu_element_doc,
.tp_new = bpygpu_IndexBuf_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUIndexBuf_CreatePyObject(GPUIndexBuf *elem)
{
BPyGPUIndexBuf *self;
BPyGPUIndexBuf *self;
self = PyObject_New(BPyGPUIndexBuf, &BPyGPUIndexBuf_Type);
self->elem = elem;
self = PyObject_New(BPyGPUIndexBuf, &BPyGPUIndexBuf_Type);
self->elem = elem;
return (PyObject *)self;
return (PyObject *)self;
}
/** \} */

View File

@@ -23,11 +23,10 @@
extern PyTypeObject BPyGPUIndexBuf_Type;
#define BPyGPUIndexBuf_Check(v) (Py_TYPE(v) == &BPyGPUIndexBuf_Type)
#define BPyGPUIndexBuf_Check(v) (Py_TYPE(v) == &BPyGPUIndexBuf_Type)
typedef struct BPyGPUIndexBuf {
PyObject_VAR_HEAD
struct GPUIndexBuf *elem;
PyObject_VAR_HEAD struct GPUIndexBuf *elem;
} BPyGPUIndexBuf;
PyObject *BPyGPUIndexBuf_CreatePyObject(struct GPUIndexBuf *elem);

View File

@@ -28,7 +28,6 @@
#include <Python.h>
#include "BLI_utildefines.h"
#include "../mathutils/mathutils.h"
@@ -47,42 +46,42 @@
static bool bpygpu_stack_is_push_model_view_ok_or_error(void)
{
if (GPU_matrix_stack_level_get_model_view() >= GPU_PY_MATRIX_STACK_LEN) {
PyErr_SetString(PyExc_RuntimeError,
"Maximum model-view stack depth " STRINGIFY(GPU_PY_MATRIX_STACK_DEPTH) " reached");
return false;
}
return true;
if (GPU_matrix_stack_level_get_model_view() >= GPU_PY_MATRIX_STACK_LEN) {
PyErr_SetString(
PyExc_RuntimeError,
"Maximum model-view stack depth " STRINGIFY(GPU_PY_MATRIX_STACK_DEPTH) " reached");
return false;
}
return true;
}
static bool bpygpu_stack_is_push_projection_ok_or_error(void)
{
if (GPU_matrix_stack_level_get_projection() >= GPU_PY_MATRIX_STACK_LEN) {
PyErr_SetString(PyExc_RuntimeError,
"Maximum projection stack depth " STRINGIFY(GPU_PY_MATRIX_STACK_DEPTH) " reached");
return false;
}
return true;
if (GPU_matrix_stack_level_get_projection() >= GPU_PY_MATRIX_STACK_LEN) {
PyErr_SetString(
PyExc_RuntimeError,
"Maximum projection stack depth " STRINGIFY(GPU_PY_MATRIX_STACK_DEPTH) " reached");
return false;
}
return true;
}
static bool bpygpu_stack_is_pop_model_view_ok_or_error(void)
{
if (GPU_matrix_stack_level_get_model_view() == 0) {
PyErr_SetString(PyExc_RuntimeError,
"Minimum model-view stack depth reached");
return false;
}
return true;
if (GPU_matrix_stack_level_get_model_view() == 0) {
PyErr_SetString(PyExc_RuntimeError, "Minimum model-view stack depth reached");
return false;
}
return true;
}
static bool bpygpu_stack_is_pop_projection_ok_or_error(void)
{
if (GPU_matrix_stack_level_get_projection() == 0) {
PyErr_SetString(PyExc_RuntimeError,
"Minimum projection stack depth reached");
return false;
}
return true;
if (GPU_matrix_stack_level_get_projection() == 0) {
PyErr_SetString(PyExc_RuntimeError, "Minimum projection stack depth reached");
return false;
}
return true;
}
/** \} */
@@ -92,59 +91,55 @@ static bool bpygpu_stack_is_pop_projection_ok_or_error(void)
* \{ */
PyDoc_STRVAR(bpygpu_matrix_push_doc,
".. function:: push()\n"
"\n"
" Add to the model-view matrix stack.\n"
);
".. function:: push()\n"
"\n"
" Add to the model-view matrix stack.\n");
static PyObject *bpygpu_matrix_push(PyObject *UNUSED(self))
{
if (!bpygpu_stack_is_push_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_push();
Py_RETURN_NONE;
if (!bpygpu_stack_is_push_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_push();
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_pop_doc,
".. function:: pop()\n"
"\n"
" Remove the last model-view matrix from the stack.\n"
);
".. function:: pop()\n"
"\n"
" Remove the last model-view matrix from the stack.\n");
static PyObject *bpygpu_matrix_pop(PyObject *UNUSED(self))
{
if (!bpygpu_stack_is_pop_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_pop();
Py_RETURN_NONE;
if (!bpygpu_stack_is_pop_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_pop();
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_push_projection_doc,
".. function:: push_projection()\n"
"\n"
" Add to the projection matrix stack.\n"
);
".. function:: push_projection()\n"
"\n"
" Add to the projection matrix stack.\n");
static PyObject *bpygpu_matrix_push_projection(PyObject *UNUSED(self))
{
if (!bpygpu_stack_is_push_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_push_projection();
Py_RETURN_NONE;
if (!bpygpu_stack_is_push_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_push_projection();
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_pop_projection_doc,
".. function:: pop_projection()\n"
"\n"
" Remove the last projection matrix from the stack.\n"
);
".. function:: pop_projection()\n"
"\n"
" Remove the last projection matrix from the stack.\n");
static PyObject *bpygpu_matrix_pop_projection(PyObject *UNUSED(self))
{
if (!bpygpu_stack_is_pop_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_pop_projection();
Py_RETURN_NONE;
if (!bpygpu_stack_is_pop_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_pop_projection();
Py_RETURN_NONE;
}
/** \} */
@@ -157,120 +152,121 @@ static PyObject *bpygpu_matrix_pop_projection(PyObject *UNUSED(self))
* \{ */
typedef struct {
PyObject_HEAD /* required python macro */
int type;
int level;
PyObject_HEAD /* required python macro */
int type;
int level;
} BPyGPU_MatrixStackContext;
enum {
PYGPU_MATRIX_TYPE_MODEL_VIEW = 1,
PYGPU_MATRIX_TYPE_PROJECTION = 2,
PYGPU_MATRIX_TYPE_MODEL_VIEW = 1,
PYGPU_MATRIX_TYPE_PROJECTION = 2,
};
static PyObject *bpygpu_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self);
static PyObject *bpygpu_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self, PyObject *args);
static PyMethodDef bpygpu_matrix_stack_context_methods[] = {
{"__enter__", (PyCFunction)bpygpu_matrix_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)bpygpu_matrix_stack_context_exit, METH_VARARGS},
{NULL},
{"__enter__", (PyCFunction)bpygpu_matrix_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)bpygpu_matrix_stack_context_exit, METH_VARARGS},
{NULL},
};
static PyTypeObject BPyGPU_matrix_stack_context_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUMatrixStackContext",
.tp_basicsize = sizeof(BPyGPU_MatrixStackContext),
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = bpygpu_matrix_stack_context_methods,
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUMatrixStackContext",
.tp_basicsize = sizeof(BPyGPU_MatrixStackContext),
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = bpygpu_matrix_stack_context_methods,
};
static PyObject *bpygpu_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self)
{
/* sanity - should never happen */
if (self->level != -1) {
PyErr_SetString(PyExc_RuntimeError, "Already in use");
return NULL;
}
/* sanity - should never happen */
if (self->level != -1) {
PyErr_SetString(PyExc_RuntimeError, "Already in use");
return NULL;
}
if (self->type == PYGPU_MATRIX_TYPE_MODEL_VIEW) {
if (!bpygpu_stack_is_push_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_push();
self->level = GPU_matrix_stack_level_get_model_view();
}
else if (self->type == PYGPU_MATRIX_TYPE_PROJECTION) {
if (!bpygpu_stack_is_push_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_push_projection();
self->level = GPU_matrix_stack_level_get_projection();
}
else {
BLI_assert(0);
}
Py_RETURN_NONE;
if (self->type == PYGPU_MATRIX_TYPE_MODEL_VIEW) {
if (!bpygpu_stack_is_push_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_push();
self->level = GPU_matrix_stack_level_get_model_view();
}
else if (self->type == PYGPU_MATRIX_TYPE_PROJECTION) {
if (!bpygpu_stack_is_push_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_push_projection();
self->level = GPU_matrix_stack_level_get_projection();
}
else {
BLI_assert(0);
}
Py_RETURN_NONE;
}
static PyObject *bpygpu_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self, PyObject *UNUSED(args))
static PyObject *bpygpu_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self,
PyObject *UNUSED(args))
{
/* sanity - should never happen */
if (self->level == -1) {
fprintf(stderr, "Not yet in use\n");
goto finally;
}
/* sanity - should never happen */
if (self->level == -1) {
fprintf(stderr, "Not yet in use\n");
goto finally;
}
if (self->type == PYGPU_MATRIX_TYPE_MODEL_VIEW) {
const int level = GPU_matrix_stack_level_get_model_view();
if (level != self->level) {
fprintf(stderr, "Level push/pop mismatch, expected %d, got %d\n", self->level, level);
}
if (level != 0) {
GPU_matrix_pop();
}
}
else if (self->type == PYGPU_MATRIX_TYPE_PROJECTION) {
const int level = GPU_matrix_stack_level_get_projection();
if (level != self->level) {
fprintf(stderr, "Level push/pop mismatch, expected %d, got %d", self->level, level);
}
if (level != 0) {
GPU_matrix_pop_projection();
}
}
else {
BLI_assert(0);
}
if (self->type == PYGPU_MATRIX_TYPE_MODEL_VIEW) {
const int level = GPU_matrix_stack_level_get_model_view();
if (level != self->level) {
fprintf(stderr, "Level push/pop mismatch, expected %d, got %d\n", self->level, level);
}
if (level != 0) {
GPU_matrix_pop();
}
}
else if (self->type == PYGPU_MATRIX_TYPE_PROJECTION) {
const int level = GPU_matrix_stack_level_get_projection();
if (level != self->level) {
fprintf(stderr, "Level push/pop mismatch, expected %d, got %d", self->level, level);
}
if (level != 0) {
GPU_matrix_pop_projection();
}
}
else {
BLI_assert(0);
}
finally:
Py_RETURN_NONE;
Py_RETURN_NONE;
}
static PyObject *bpygpu_matrix_push_pop_impl(int type)
{
BPyGPU_MatrixStackContext *ret = PyObject_New(BPyGPU_MatrixStackContext, &BPyGPU_matrix_stack_context_Type);
ret->type = type;
ret->level = -1;
return (PyObject *)ret;
BPyGPU_MatrixStackContext *ret = PyObject_New(BPyGPU_MatrixStackContext,
&BPyGPU_matrix_stack_context_Type);
ret->type = type;
ret->level = -1;
return (PyObject *)ret;
}
PyDoc_STRVAR(bpygpu_matrix_push_pop_doc,
".. function:: push_pop()\n"
"\n"
" Context manager to ensure balanced push/pop calls, even in the case of an error.\n"
);
PyDoc_STRVAR(
bpygpu_matrix_push_pop_doc,
".. function:: push_pop()\n"
"\n"
" Context manager to ensure balanced push/pop calls, even in the case of an error.\n");
static PyObject *bpygpu_matrix_push_pop(PyObject *UNUSED(self))
{
return bpygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_MODEL_VIEW);
return bpygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_MODEL_VIEW);
}
PyDoc_STRVAR(bpygpu_matrix_push_pop_projection_doc,
".. function:: push_pop_projection()\n"
"\n"
" Context manager to ensure balanced push/pop calls, even in the case of an error.\n"
);
PyDoc_STRVAR(
bpygpu_matrix_push_pop_projection_doc,
".. function:: push_pop_projection()\n"
"\n"
" Context manager to ensure balanced push/pop calls, even in the case of an error.\n");
static PyObject *bpygpu_matrix_push_pop_projection(PyObject *UNUSED(self))
{
return bpygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_PROJECTION);
return bpygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_PROJECTION);
}
/** \} */
@@ -280,88 +276,84 @@ static PyObject *bpygpu_matrix_push_pop_projection(PyObject *UNUSED(self))
* \{ */
PyDoc_STRVAR(bpygpu_matrix_multiply_matrix_doc,
".. function:: multiply_matrix(matrix)\n"
"\n"
" Multiply the current stack matrix.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n"
);
".. function:: multiply_matrix(matrix)\n"
"\n"
" Multiply the current stack matrix.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n");
static PyObject *bpygpu_matrix_multiply_matrix(PyObject *UNUSED(self), PyObject *value)
{
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
return NULL;
}
GPU_matrix_mul(pymat->matrix);
Py_RETURN_NONE;
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
return NULL;
}
GPU_matrix_mul(pymat->matrix);
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_scale_doc,
".. function:: scale(scale)\n"
"\n"
" Scale the current stack matrix.\n"
"\n"
" :param scale: Scale the current stack matrix.\n"
" :type scale: sequence of 2 or 3 floats\n"
);
".. function:: scale(scale)\n"
"\n"
" Scale the current stack matrix.\n"
"\n"
" :param scale: Scale the current stack matrix.\n"
" :type scale: sequence of 2 or 3 floats\n");
static PyObject *bpygpu_matrix_scale(PyObject *UNUSED(self), PyObject *value)
{
float scale[3];
int len;
if ((len = mathutils_array_parse(scale, 2, 3, value, "gpu.matrix.scale(): invalid vector arg")) == -1) {
return NULL;
}
if (len == 2) {
GPU_matrix_scale_2fv(scale);
}
else {
GPU_matrix_scale_3fv(scale);
}
Py_RETURN_NONE;
float scale[3];
int len;
if ((len = mathutils_array_parse(
scale, 2, 3, value, "gpu.matrix.scale(): invalid vector arg")) == -1) {
return NULL;
}
if (len == 2) {
GPU_matrix_scale_2fv(scale);
}
else {
GPU_matrix_scale_3fv(scale);
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_scale_uniform_doc,
".. function:: scale_uniform(scale)\n"
"\n"
" :param scale: Scale the current stack matrix.\n"
" :type scale: float\n"
);
".. function:: scale_uniform(scale)\n"
"\n"
" :param scale: Scale the current stack matrix.\n"
" :type scale: float\n");
static PyObject *bpygpu_matrix_scale_uniform(PyObject *UNUSED(self), PyObject *value)
{
float scalar;
if ((scalar = PyFloat_AsDouble(value)) == -1.0f && PyErr_Occurred()) {
PyErr_Format(PyExc_TypeError,
"expected a number, not %.200s",
Py_TYPE(value)->tp_name);
return NULL;
}
GPU_matrix_scale_1f(scalar);
Py_RETURN_NONE;
float scalar;
if ((scalar = PyFloat_AsDouble(value)) == -1.0f && PyErr_Occurred()) {
PyErr_Format(PyExc_TypeError, "expected a number, not %.200s", Py_TYPE(value)->tp_name);
return NULL;
}
GPU_matrix_scale_1f(scalar);
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_translate_doc,
".. function:: translate(offset)\n"
"\n"
" Scale the current stack matrix.\n"
"\n"
" :param offset: Translate the current stack matrix.\n"
" :type offset: sequence of 2 or 3 floats\n"
);
".. function:: translate(offset)\n"
"\n"
" Scale the current stack matrix.\n"
"\n"
" :param offset: Translate the current stack matrix.\n"
" :type offset: sequence of 2 or 3 floats\n");
static PyObject *bpygpu_matrix_translate(PyObject *UNUSED(self), PyObject *value)
{
float offset[3];
int len;
if ((len = mathutils_array_parse(offset, 2, 3, value, "gpu.matrix.translate(): invalid vector arg")) == -1) {
return NULL;
}
if (len == 2) {
GPU_matrix_translate_2fv(offset);
}
else {
GPU_matrix_translate_3fv(offset);
}
Py_RETURN_NONE;
float offset[3];
int len;
if ((len = mathutils_array_parse(
offset, 2, 3, value, "gpu.matrix.translate(): invalid vector arg")) == -1) {
return NULL;
}
if (len == 2) {
GPU_matrix_translate_2fv(offset);
}
else {
GPU_matrix_translate_3fv(offset);
}
Py_RETURN_NONE;
}
/** \} */
@@ -371,61 +363,57 @@ static PyObject *bpygpu_matrix_translate(PyObject *UNUSED(self), PyObject *value
* \{ */
PyDoc_STRVAR(bpygpu_matrix_reset_doc,
".. function:: reset()\n"
"\n"
" Empty stack and set to identity.\n"
);
".. function:: reset()\n"
"\n"
" Empty stack and set to identity.\n");
static PyObject *bpygpu_matrix_reset(PyObject *UNUSED(self))
{
GPU_matrix_reset();
Py_RETURN_NONE;
GPU_matrix_reset();
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_load_identity_doc,
".. function:: load_identity()\n"
"\n"
" Empty stack and set to identity.\n"
);
".. function:: load_identity()\n"
"\n"
" Empty stack and set to identity.\n");
static PyObject *bpygpu_matrix_load_identity(PyObject *UNUSED(self))
{
GPU_matrix_identity_set();
Py_RETURN_NONE;
GPU_matrix_identity_set();
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_load_matrix_doc,
".. function:: load_matrix(matrix)\n"
"\n"
" Load a matrix into the stack.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n"
);
".. function:: load_matrix(matrix)\n"
"\n"
" Load a matrix into the stack.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n");
static PyObject *bpygpu_matrix_load_matrix(PyObject *UNUSED(self), PyObject *value)
{
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
return NULL;
}
GPU_matrix_set(pymat->matrix);
Py_RETURN_NONE;
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
return NULL;
}
GPU_matrix_set(pymat->matrix);
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_matrix_load_projection_matrix_doc,
".. function:: load_projection_matrix(matrix)\n"
"\n"
" Load a projection matrix into the stack.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n"
);
".. function:: load_projection_matrix(matrix)\n"
"\n"
" Load a projection matrix into the stack.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n");
static PyObject *bpygpu_matrix_load_projection_matrix(PyObject *UNUSED(self), PyObject *value)
{
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
return NULL;
}
GPU_matrix_projection_set(pymat->matrix);
Py_RETURN_NONE;
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
return NULL;
}
GPU_matrix_projection_set(pymat->matrix);
Py_RETURN_NONE;
}
/** \} */
@@ -435,49 +423,45 @@ static PyObject *bpygpu_matrix_load_projection_matrix(PyObject *UNUSED(self), Py
* \{ */
PyDoc_STRVAR(bpygpu_matrix_get_projection_matrix_doc,
".. function:: get_projection_matrix()\n"
"\n"
" Return a copy of the projection matrix.\n"
"\n"
" :return: A 4x4 projection matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n"
);
".. function:: get_projection_matrix()\n"
"\n"
" Return a copy of the projection matrix.\n"
"\n"
" :return: A 4x4 projection matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n");
static PyObject *bpygpu_matrix_get_projection_matrix(PyObject *UNUSED(self))
{
float matrix[4][4];
GPU_matrix_projection_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 4, 4, NULL);
float matrix[4][4];
GPU_matrix_projection_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 4, 4, NULL);
}
PyDoc_STRVAR(bpygpu_matrix_get_model_view_matrix_doc,
".. function:: get_model_view_matrix()\n"
"\n"
" Return a copy of the model-view matrix.\n"
"\n"
" :return: A 4x4 view matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n"
);
".. function:: get_model_view_matrix()\n"
"\n"
" Return a copy of the model-view matrix.\n"
"\n"
" :return: A 4x4 view matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n");
static PyObject *bpygpu_matrix_get_model_view_matrix(PyObject *UNUSED(self))
{
float matrix[4][4];
GPU_matrix_model_view_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 4, 4, NULL);
float matrix[4][4];
GPU_matrix_model_view_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 4, 4, NULL);
}
PyDoc_STRVAR(bpygpu_matrix_get_normal_matrix_doc,
".. function:: get_normal_matrix()\n"
"\n"
" Return a copy of the normal matrix.\n"
"\n"
" :return: A 3x3 normal matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n"
);
".. function:: get_normal_matrix()\n"
"\n"
" Return a copy of the normal matrix.\n"
"\n"
" :return: A 3x3 normal matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n");
static PyObject *bpygpu_matrix_get_normal_matrix(PyObject *UNUSED(self))
{
float matrix[3][3];
GPU_matrix_normal_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 3, 3, NULL);
float matrix[3][3];
GPU_matrix_normal_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 3, 3, NULL);
}
/** \} */
@@ -487,85 +471,96 @@ static PyObject *bpygpu_matrix_get_normal_matrix(PyObject *UNUSED(self))
* \{ */
static struct PyMethodDef bpygpu_matrix_methods[] = {
/* Manage Stack */
{"push", (PyCFunction)bpygpu_matrix_push,
METH_NOARGS, bpygpu_matrix_push_doc},
{"pop", (PyCFunction)bpygpu_matrix_pop,
METH_NOARGS, bpygpu_matrix_pop_doc},
/* Manage Stack */
{"push", (PyCFunction)bpygpu_matrix_push, METH_NOARGS, bpygpu_matrix_push_doc},
{"pop", (PyCFunction)bpygpu_matrix_pop, METH_NOARGS, bpygpu_matrix_pop_doc},
{"push_projection", (PyCFunction)bpygpu_matrix_push_projection,
METH_NOARGS, bpygpu_matrix_push_projection_doc},
{"pop_projection", (PyCFunction)bpygpu_matrix_pop_projection,
METH_NOARGS, bpygpu_matrix_pop_projection_doc},
{"push_projection",
(PyCFunction)bpygpu_matrix_push_projection,
METH_NOARGS,
bpygpu_matrix_push_projection_doc},
{"pop_projection",
(PyCFunction)bpygpu_matrix_pop_projection,
METH_NOARGS,
bpygpu_matrix_pop_projection_doc},
/* Stack (Context Manager) */
{"push_pop", (PyCFunction)bpygpu_matrix_push_pop,
METH_NOARGS, bpygpu_matrix_push_pop_doc},
{"push_pop_projection", (PyCFunction)bpygpu_matrix_push_pop_projection,
METH_NOARGS, bpygpu_matrix_push_pop_projection_doc},
/* Stack (Context Manager) */
{"push_pop", (PyCFunction)bpygpu_matrix_push_pop, METH_NOARGS, bpygpu_matrix_push_pop_doc},
{"push_pop_projection",
(PyCFunction)bpygpu_matrix_push_pop_projection,
METH_NOARGS,
bpygpu_matrix_push_pop_projection_doc},
/* Manipulate State */
{"multiply_matrix", (PyCFunction)bpygpu_matrix_multiply_matrix,
METH_O, bpygpu_matrix_multiply_matrix_doc},
{"scale", (PyCFunction)bpygpu_matrix_scale,
METH_O, bpygpu_matrix_scale_doc},
{"scale_uniform", (PyCFunction)bpygpu_matrix_scale_uniform,
METH_O, bpygpu_matrix_scale_uniform_doc},
{"translate", (PyCFunction)bpygpu_matrix_translate,
METH_O, bpygpu_matrix_translate_doc},
/* Manipulate State */
{"multiply_matrix",
(PyCFunction)bpygpu_matrix_multiply_matrix,
METH_O,
bpygpu_matrix_multiply_matrix_doc},
{"scale", (PyCFunction)bpygpu_matrix_scale, METH_O, bpygpu_matrix_scale_doc},
{"scale_uniform",
(PyCFunction)bpygpu_matrix_scale_uniform,
METH_O,
bpygpu_matrix_scale_uniform_doc},
{"translate", (PyCFunction)bpygpu_matrix_translate, METH_O, bpygpu_matrix_translate_doc},
/* TODO */
/* TODO */
#if 0
{"rotate", (PyCFunction)bpygpu_matrix_rotate,
METH_O, bpygpu_matrix_rotate_doc},
{"rotate_axis", (PyCFunction)bpygpu_matrix_rotate_axis,
METH_O, bpygpu_matrix_rotate_axis_doc},
{"look_at", (PyCFunction)bpygpu_matrix_look_at,
METH_O, bpygpu_matrix_look_at_doc},
{"rotate", (PyCFunction)bpygpu_matrix_rotate,
METH_O, bpygpu_matrix_rotate_doc},
{"rotate_axis", (PyCFunction)bpygpu_matrix_rotate_axis,
METH_O, bpygpu_matrix_rotate_axis_doc},
{"look_at", (PyCFunction)bpygpu_matrix_look_at,
METH_O, bpygpu_matrix_look_at_doc},
#endif
/* Write State */
{"reset", (PyCFunction)bpygpu_matrix_reset,
METH_NOARGS, bpygpu_matrix_reset_doc},
{"load_identity", (PyCFunction)bpygpu_matrix_load_identity,
METH_NOARGS, bpygpu_matrix_load_identity_doc},
{"load_matrix", (PyCFunction)bpygpu_matrix_load_matrix,
METH_O, bpygpu_matrix_load_matrix_doc},
{"load_projection_matrix", (PyCFunction)bpygpu_matrix_load_projection_matrix,
METH_O, bpygpu_matrix_load_projection_matrix_doc},
/* Write State */
{"reset", (PyCFunction)bpygpu_matrix_reset, METH_NOARGS, bpygpu_matrix_reset_doc},
{"load_identity",
(PyCFunction)bpygpu_matrix_load_identity,
METH_NOARGS,
bpygpu_matrix_load_identity_doc},
{"load_matrix", (PyCFunction)bpygpu_matrix_load_matrix, METH_O, bpygpu_matrix_load_matrix_doc},
{"load_projection_matrix",
(PyCFunction)bpygpu_matrix_load_projection_matrix,
METH_O,
bpygpu_matrix_load_projection_matrix_doc},
/* Read State */
{"get_projection_matrix", (PyCFunction)bpygpu_matrix_get_projection_matrix,
METH_NOARGS, bpygpu_matrix_get_projection_matrix_doc},
{"get_model_view_matrix", (PyCFunction)bpygpu_matrix_get_model_view_matrix,
METH_NOARGS, bpygpu_matrix_get_model_view_matrix_doc},
{"get_normal_matrix", (PyCFunction)bpygpu_matrix_get_normal_matrix,
METH_NOARGS, bpygpu_matrix_get_normal_matrix_doc},
/* Read State */
{"get_projection_matrix",
(PyCFunction)bpygpu_matrix_get_projection_matrix,
METH_NOARGS,
bpygpu_matrix_get_projection_matrix_doc},
{"get_model_view_matrix",
(PyCFunction)bpygpu_matrix_get_model_view_matrix,
METH_NOARGS,
bpygpu_matrix_get_model_view_matrix_doc},
{"get_normal_matrix",
(PyCFunction)bpygpu_matrix_get_normal_matrix,
METH_NOARGS,
bpygpu_matrix_get_normal_matrix_doc},
{NULL, NULL, 0, NULL},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(bpygpu_matrix_doc,
"This module provides access to the matrix stack."
);
PyDoc_STRVAR(bpygpu_matrix_doc, "This module provides access to the matrix stack.");
static PyModuleDef BPyGPU_matrix_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.matrix",
.m_doc = bpygpu_matrix_doc,
.m_methods = bpygpu_matrix_methods,
PyModuleDef_HEAD_INIT,
.m_name = "gpu.matrix",
.m_doc = bpygpu_matrix_doc,
.m_methods = bpygpu_matrix_methods,
};
PyObject *BPyInit_gpu_matrix(void)
{
PyObject *submodule;
PyObject *submodule;
submodule = PyModule_Create(&BPyGPU_matrix_module_def);
submodule = PyModule_Create(&BPyGPU_matrix_module_def);
if (PyType_Ready(&BPyGPU_matrix_stack_context_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPU_matrix_stack_context_Type) < 0) {
return NULL;
}
return submodule;
return submodule;
}
/** \} */

View File

@@ -23,4 +23,4 @@
PyObject *BPyInit_gpu_matrix(void);
#endif /* __GPU_PY_MATRIX_H__ */
#endif /* __GPU_PY_MATRIX_H__ */

View File

@@ -51,316 +51,331 @@
#include "gpu_py_api.h"
#include "gpu_py_offscreen.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPUOffScreen Common Utilities
* \{ */
static int bpygpu_offscreen_valid_check(BPyGPUOffScreen *bpygpu_ofs)
{
if (UNLIKELY(bpygpu_ofs->ofs == NULL)) {
PyErr_SetString(PyExc_ReferenceError, "GPU offscreen was freed, no further access is valid");
return -1;
}
return 0;
if (UNLIKELY(bpygpu_ofs->ofs == NULL)) {
PyErr_SetString(PyExc_ReferenceError, "GPU offscreen was freed, no further access is valid");
return -1;
}
return 0;
}
#define BPY_GPU_OFFSCREEN_CHECK_OBJ(bpygpu) { \
if (UNLIKELY(bpygpu_offscreen_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} ((void)0)
#define BPY_GPU_OFFSCREEN_CHECK_OBJ(bpygpu) \
{ \
if (UNLIKELY(bpygpu_offscreen_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} \
((void)0)
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUOffscreen Type
* \{ */
static PyObject *bpygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args, PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
BPYGPU_IS_INIT_OR_ERROR_OBJ;
GPUOffScreen *ofs;
int width, height, samples = 0;
char err_out[256];
GPUOffScreen *ofs;
int width, height, samples = 0;
char err_out[256];
static const char *_keywords[] = {"width", "height", "samples", NULL};
static _PyArg_Parser _parser = {"ii|i:GPUOffScreen.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
&width, &height, &samples))
{
return NULL;
}
static const char *_keywords[] = {"width", "height", "samples", NULL};
static _PyArg_Parser _parser = {"ii|i:GPUOffScreen.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &width, &height, &samples)) {
return NULL;
}
ofs = GPU_offscreen_create(width, height, samples, true, false, err_out);
ofs = GPU_offscreen_create(width, height, samples, true, false, err_out);
if (ofs == NULL) {
PyErr_Format(PyExc_RuntimeError,
"gpu.offscreen.new(...) failed with '%s'",
err_out[0] ? err_out : "unknown error");
return NULL;
}
if (ofs == NULL) {
PyErr_Format(PyExc_RuntimeError,
"gpu.offscreen.new(...) failed with '%s'",
err_out[0] ? err_out : "unknown error");
return NULL;
}
return BPyGPUOffScreen_CreatePyObject(ofs);
return BPyGPUOffScreen_CreatePyObject(ofs);
}
PyDoc_STRVAR(bpygpu_offscreen_width_doc, "Width of the texture.\n\n:type: `int`");
static PyObject *bpygpu_offscreen_width_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_width(self->ofs));
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_width(self->ofs));
}
PyDoc_STRVAR(bpygpu_offscreen_height_doc, "Height of the texture.\n\n:type: `int`");
static PyObject *bpygpu_offscreen_height_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_height(self->ofs));
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_height(self->ofs));
}
PyDoc_STRVAR(bpygpu_offscreen_color_texture_doc, "OpenGL bindcode for the color texture.\n\n:type: `int`");
PyDoc_STRVAR(bpygpu_offscreen_color_texture_doc,
"OpenGL bindcode for the color texture.\n\n:type: `int`");
static PyObject *bpygpu_offscreen_color_texture_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
GPUTexture *texture = GPU_offscreen_color_texture(self->ofs);
return PyLong_FromLong(GPU_texture_opengl_bindcode(texture));
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
GPUTexture *texture = GPU_offscreen_color_texture(self->ofs);
return PyLong_FromLong(GPU_texture_opengl_bindcode(texture));
}
PyDoc_STRVAR(bpygpu_offscreen_bind_doc,
".. method:: bind(save=True)\n"
"\n"
" Bind the offscreen object.\n"
" To make sure that the offscreen gets unbind whether an exception occurs or not, pack it into a `with` statement.\n"
"\n"
" :arg save: Save the current OpenGL state, so that it can be restored when unbinding.\n"
" :type save: `bool`\n"
);
PyDoc_STRVAR(
bpygpu_offscreen_bind_doc,
".. method:: bind(save=True)\n"
"\n"
" Bind the offscreen object.\n"
" To make sure that the offscreen gets unbind whether an exception occurs or not, pack it "
"into a `with` statement.\n"
"\n"
" :arg save: Save the current OpenGL state, so that it can be restored when unbinding.\n"
" :type save: `bool`\n");
static PyObject *bpygpu_offscreen_bind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
bool save = true;
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
bool save = true;
static const char *_keywords[] = {"save", NULL};
static _PyArg_Parser _parser = {"|O&:bind", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
PyC_ParseBool, &save))
{
return NULL;
}
static const char *_keywords[] = {"save", NULL};
static _PyArg_Parser _parser = {"|O&:bind", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &save)) {
return NULL;
}
GPU_offscreen_bind(self->ofs, save);
GPU_offscreen_bind(self->ofs, save);
self->is_saved = save;
Py_INCREF(self);
self->is_saved = save;
Py_INCREF(self);
return (PyObject *)self;
return (PyObject *)self;
}
PyDoc_STRVAR(bpygpu_offscreen_unbind_doc,
".. method:: unbind(restore=True)\n"
"\n"
" Unbind the offscreen object.\n"
"\n"
" :arg restore: Restore the OpenGL state, can only be used when the state has been saved before.\n"
" :type restore: `bool`\n"
);
".. method:: unbind(restore=True)\n"
"\n"
" Unbind the offscreen object.\n"
"\n"
" :arg restore: Restore the OpenGL state, can only be used when the state has been "
"saved before.\n"
" :type restore: `bool`\n");
static PyObject *bpygpu_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
bool restore = true;
bool restore = true;
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
static const char *_keywords[] = {"restore", NULL};
static _PyArg_Parser _parser = {"|O&:unbind", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
PyC_ParseBool, &restore))
{
return NULL;
}
static const char *_keywords[] = {"restore", NULL};
static _PyArg_Parser _parser = {"|O&:unbind", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &restore)) {
return NULL;
}
GPU_offscreen_unbind(self->ofs, restore);
Py_RETURN_NONE;
GPU_offscreen_unbind(self->ofs, restore);
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_offscreen_draw_view3d_doc,
".. method:: draw_view3d(scene, view3d, region, view_matrix, projection_matrix)\n"
"\n"
" Draw the 3d viewport in the offscreen object.\n"
"\n"
" :arg scene: Scene to draw.\n"
" :type scene: :class:`bpy.types.Scene`\n"
" :arg view_layer: View layer to draw.\n"
" :type view_layer: :class:`bpy.types.ViewLayer`\n"
" :arg view3d: 3D View to get the drawing settings from.\n"
" :type view3d: :class:`bpy.types.SpaceView3D`\n"
" :arg region: Region of the 3D View (required as temporary draw target).\n"
" :type region: :class:`bpy.types.Region`\n"
" :arg view_matrix: View Matrix (e.g. ``camera.matrix_world.inverted()``).\n"
" :type view_matrix: :class:`mathutils.Matrix`\n"
" :arg projection_matrix: Projection Matrix (e.g. ``camera.calc_matrix_camera(...)``).\n"
" :type projection_matrix: :class:`mathutils.Matrix`\n"
);
static PyObject *bpygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
PyDoc_STRVAR(
bpygpu_offscreen_draw_view3d_doc,
".. method:: draw_view3d(scene, view3d, region, view_matrix, projection_matrix)\n"
"\n"
" Draw the 3d viewport in the offscreen object.\n"
"\n"
" :arg scene: Scene to draw.\n"
" :type scene: :class:`bpy.types.Scene`\n"
" :arg view_layer: View layer to draw.\n"
" :type view_layer: :class:`bpy.types.ViewLayer`\n"
" :arg view3d: 3D View to get the drawing settings from.\n"
" :type view3d: :class:`bpy.types.SpaceView3D`\n"
" :arg region: Region of the 3D View (required as temporary draw target).\n"
" :type region: :class:`bpy.types.Region`\n"
" :arg view_matrix: View Matrix (e.g. ``camera.matrix_world.inverted()``).\n"
" :type view_matrix: :class:`mathutils.Matrix`\n"
" :arg projection_matrix: Projection Matrix (e.g. ``camera.calc_matrix_camera(...)``).\n"
" :type projection_matrix: :class:`mathutils.Matrix`\n");
static PyObject *bpygpu_offscreen_draw_view3d(BPyGPUOffScreen *self,
PyObject *args,
PyObject *kwds)
{
MatrixObject *py_mat_view, *py_mat_projection;
PyObject *py_scene, *py_view_layer, *py_region, *py_view3d;
MatrixObject *py_mat_view, *py_mat_projection;
PyObject *py_scene, *py_view_layer, *py_region, *py_view3d;
struct Depsgraph *depsgraph;
struct Scene *scene;
struct ViewLayer *view_layer;
View3D *v3d;
ARegion *ar;
struct RV3DMatrixStore *rv3d_mats;
struct Depsgraph *depsgraph;
struct Scene *scene;
struct ViewLayer *view_layer;
View3D *v3d;
ARegion *ar;
struct RV3DMatrixStore *rv3d_mats;
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
static const char *_keywords[] = {
"scene", "view_layer", "view3d", "region",
"view_matrix", "projection_matrix", NULL};
static const char *_keywords[] = {
"scene", "view_layer", "view3d", "region", "view_matrix", "projection_matrix", NULL};
static _PyArg_Parser _parser = {"OOOOO&O&:draw_view3d", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
&py_scene, &py_view_layer, &py_view3d, &py_region,
Matrix_Parse4x4, &py_mat_view,
Matrix_Parse4x4, &py_mat_projection) ||
(!(scene = PyC_RNA_AsPointer(py_scene, "Scene")) ||
!(view_layer = PyC_RNA_AsPointer(py_view_layer, "ViewLayer")) ||
!(v3d = PyC_RNA_AsPointer(py_view3d, "SpaceView3D")) ||
!(ar = PyC_RNA_AsPointer(py_region, "Region"))))
{
return NULL;
}
static _PyArg_Parser _parser = {"OOOOO&O&:draw_view3d", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
&py_scene,
&py_view_layer,
&py_view3d,
&py_region,
Matrix_Parse4x4,
&py_mat_view,
Matrix_Parse4x4,
&py_mat_projection) ||
(!(scene = PyC_RNA_AsPointer(py_scene, "Scene")) ||
!(view_layer = PyC_RNA_AsPointer(py_view_layer, "ViewLayer")) ||
!(v3d = PyC_RNA_AsPointer(py_view3d, "SpaceView3D")) ||
!(ar = PyC_RNA_AsPointer(py_region, "Region")))) {
return NULL;
}
BLI_assert(BKE_id_is_in_global_main(&scene->id));
BLI_assert(BKE_id_is_in_global_main(&scene->id));
depsgraph = BKE_scene_get_depsgraph(scene, view_layer, true);
depsgraph = BKE_scene_get_depsgraph(scene, view_layer, true);
rv3d_mats = ED_view3d_mats_rv3d_backup(ar->regiondata);
rv3d_mats = ED_view3d_mats_rv3d_backup(ar->regiondata);
GPU_offscreen_bind(self->ofs, true);
GPU_offscreen_bind(self->ofs, true);
ED_view3d_draw_offscreen(depsgraph,
scene,
v3d->shading.type,
v3d,
ar,
GPU_offscreen_width(self->ofs),
GPU_offscreen_height(self->ofs),
(float(*)[4])py_mat_view->matrix,
(float(*)[4])py_mat_projection->matrix,
false,
true,
"",
NULL,
true,
self->ofs,
NULL);
ED_view3d_draw_offscreen(depsgraph,
scene,
v3d->shading.type,
v3d,
ar,
GPU_offscreen_width(self->ofs),
GPU_offscreen_height(self->ofs),
(float(*)[4])py_mat_view->matrix,
(float(*)[4])py_mat_projection->matrix,
false,
true,
"",
NULL,
true,
self->ofs,
NULL);
GPU_offscreen_unbind(self->ofs, true);
GPU_offscreen_unbind(self->ofs, true);
ED_view3d_mats_rv3d_restore(ar->regiondata, rv3d_mats);
MEM_freeN(rv3d_mats);
ED_view3d_mats_rv3d_restore(ar->regiondata, rv3d_mats);
MEM_freeN(rv3d_mats);
Py_RETURN_NONE;
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_offscreen_free_doc,
".. method:: free()\n"
"\n"
" Free the offscreen object.\n"
" The framebuffer, texture and render objects will no longer be accessible.\n"
);
".. method:: free()\n"
"\n"
" Free the offscreen object.\n"
" The framebuffer, texture and render objects will no longer be accessible.\n");
static PyObject *bpygpu_offscreen_free(BPyGPUOffScreen *self)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
GPU_offscreen_free(self->ofs);
self->ofs = NULL;
Py_RETURN_NONE;
GPU_offscreen_free(self->ofs);
self->ofs = NULL;
Py_RETURN_NONE;
}
static PyObject *bpygpu_offscreen_bind_context_enter(BPyGPUOffScreen *UNUSED(self))
{
Py_RETURN_NONE;
Py_RETURN_NONE;
}
static PyObject *bpygpu_offscreen_bind_context_exit(BPyGPUOffScreen *self, PyObject *UNUSED(args))
{
GPU_offscreen_unbind(self->ofs, self->is_saved);
Py_RETURN_NONE;
GPU_offscreen_unbind(self->ofs, self->is_saved);
Py_RETURN_NONE;
}
static void BPyGPUOffScreen__tp_dealloc(BPyGPUOffScreen *self)
{
if (self->ofs) {
GPU_offscreen_free(self->ofs);
}
Py_TYPE(self)->tp_free((PyObject *)self);
if (self->ofs) {
GPU_offscreen_free(self->ofs);
}
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef bpygpu_offscreen_getseters[] = {
{(char *)"color_texture", (getter)bpygpu_offscreen_color_texture_get, (setter)NULL, bpygpu_offscreen_color_texture_doc, NULL},
{(char *)"width", (getter)bpygpu_offscreen_width_get, (setter)NULL, bpygpu_offscreen_width_doc, NULL},
{(char *)"height", (getter)bpygpu_offscreen_height_get, (setter)NULL, bpygpu_offscreen_height_doc, NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
{(char *)"color_texture",
(getter)bpygpu_offscreen_color_texture_get,
(setter)NULL,
bpygpu_offscreen_color_texture_doc,
NULL},
{(char *)"width",
(getter)bpygpu_offscreen_width_get,
(setter)NULL,
bpygpu_offscreen_width_doc,
NULL},
{(char *)"height",
(getter)bpygpu_offscreen_height_get,
(setter)NULL,
bpygpu_offscreen_height_doc,
NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef bpygpu_offscreen_methods[] = {
{"bind", (PyCFunction)bpygpu_offscreen_bind, METH_VARARGS | METH_KEYWORDS, bpygpu_offscreen_bind_doc},
{"unbind", (PyCFunction)bpygpu_offscreen_unbind, METH_VARARGS | METH_KEYWORDS, bpygpu_offscreen_unbind_doc},
{"draw_view3d", (PyCFunction)bpygpu_offscreen_draw_view3d, METH_VARARGS | METH_KEYWORDS, bpygpu_offscreen_draw_view3d_doc},
{"free", (PyCFunction)bpygpu_offscreen_free, METH_NOARGS, bpygpu_offscreen_free_doc},
{"__enter__", (PyCFunction)bpygpu_offscreen_bind_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)bpygpu_offscreen_bind_context_exit, METH_VARARGS},
{NULL, NULL, 0, NULL},
{"bind",
(PyCFunction)bpygpu_offscreen_bind,
METH_VARARGS | METH_KEYWORDS,
bpygpu_offscreen_bind_doc},
{"unbind",
(PyCFunction)bpygpu_offscreen_unbind,
METH_VARARGS | METH_KEYWORDS,
bpygpu_offscreen_unbind_doc},
{"draw_view3d",
(PyCFunction)bpygpu_offscreen_draw_view3d,
METH_VARARGS | METH_KEYWORDS,
bpygpu_offscreen_draw_view3d_doc},
{"free", (PyCFunction)bpygpu_offscreen_free, METH_NOARGS, bpygpu_offscreen_free_doc},
{"__enter__", (PyCFunction)bpygpu_offscreen_bind_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)bpygpu_offscreen_bind_context_exit, METH_VARARGS},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(bpygpu_offscreen_doc,
".. class:: GPUOffScreen(width, height, samples=0)\n"
"\n"
" This object gives access to off screen buffers.\n"
"\n"
" :arg width: Horizontal dimension of the buffer.\n"
" :type width: `int`\n"
" :arg height: Vertical dimension of the buffer.\n"
" :type height: `int`\n"
" :arg samples: OpenGL samples to use for MSAA or zero to disable.\n"
" :type samples: `int`\n"
);
".. class:: GPUOffScreen(width, height, samples=0)\n"
"\n"
" This object gives access to off screen buffers.\n"
"\n"
" :arg width: Horizontal dimension of the buffer.\n"
" :type width: `int`\n"
" :arg height: Vertical dimension of the buffer.\n"
" :type height: `int`\n"
" :arg samples: OpenGL samples to use for MSAA or zero to disable.\n"
" :type samples: `int`\n");
PyTypeObject BPyGPUOffScreen_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUOffScreen",
.tp_basicsize = sizeof(BPyGPUOffScreen),
.tp_dealloc = (destructor)BPyGPUOffScreen__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = bpygpu_offscreen_doc,
.tp_methods = bpygpu_offscreen_methods,
.tp_getset = bpygpu_offscreen_getseters,
.tp_new = bpygpu_offscreen_new,
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUOffScreen",
.tp_basicsize = sizeof(BPyGPUOffScreen),
.tp_dealloc = (destructor)BPyGPUOffScreen__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = bpygpu_offscreen_doc,
.tp_methods = bpygpu_offscreen_methods,
.tp_getset = bpygpu_offscreen_getseters,
.tp_new = bpygpu_offscreen_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUOffScreen_CreatePyObject(GPUOffScreen *ofs)
{
BPyGPUOffScreen *self;
BPyGPUOffScreen *self;
self = PyObject_New(BPyGPUOffScreen, &BPyGPUOffScreen_Type);
self->ofs = ofs;
self = PyObject_New(BPyGPUOffScreen, &BPyGPUOffScreen_Type);
self->ofs = ofs;
return (PyObject *)self;
return (PyObject *)self;
}
/** \} */

View File

@@ -25,12 +25,11 @@
extern PyTypeObject BPyGPUOffScreen_Type;
#define BPyGPUOffScreen_Check(v) (Py_TYPE(v) == &BPyGPUOffScreen_Type)
#define BPyGPUOffScreen_Check(v) (Py_TYPE(v) == &BPyGPUOffScreen_Type)
typedef struct BPyGPUOffScreen {
PyObject_HEAD
struct GPUOffScreen *ofs;
bool is_saved;
PyObject_HEAD struct GPUOffScreen *ofs;
bool is_saved;
} BPyGPUOffScreen;
PyObject *BPyGPUOffScreen_CreatePyObject(struct GPUOffScreen *ofs) ATTR_NONNULL(1);

View File

@@ -41,21 +41,20 @@
* \{ */
PyDoc_STRVAR(bpygpu_select_load_id_doc,
"load_id(id)\n"
"\n"
" Set the selection ID.\n"
"\n"
" :param id: Number (32-bit unsigned int).\n"
" :type select: int\n"
);
"load_id(id)\n"
"\n"
" Set the selection ID.\n"
"\n"
" :param id: Number (32-bit unsigned int).\n"
" :type select: int\n");
static PyObject *bpygpu_select_load_id(PyObject *UNUSED(self), PyObject *value)
{
uint id;
if ((id = PyC_Long_AsU32(value)) == (uint)-1) {
return NULL;
}
GPU_select_load_id(id);
Py_RETURN_NONE;
uint id;
if ((id = PyC_Long_AsU32(value)) == (uint)-1) {
return NULL;
}
GPU_select_load_id(id);
Py_RETURN_NONE;
}
/** \} */
@@ -64,28 +63,26 @@ static PyObject *bpygpu_select_load_id(PyObject *UNUSED(self), PyObject *value)
* \{ */
static struct PyMethodDef bpygpu_select_methods[] = {
/* Manage Stack */
{"load_id", (PyCFunction)bpygpu_select_load_id, METH_O, bpygpu_select_load_id_doc},
{NULL, NULL, 0, NULL},
/* Manage Stack */
{"load_id", (PyCFunction)bpygpu_select_load_id, METH_O, bpygpu_select_load_id_doc},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(bpygpu_select_doc,
"This module provides access to selection."
);
PyDoc_STRVAR(bpygpu_select_doc, "This module provides access to selection.");
static PyModuleDef BPyGPU_select_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.select",
.m_doc = bpygpu_select_doc,
.m_methods = bpygpu_select_methods,
PyModuleDef_HEAD_INIT,
.m_name = "gpu.select",
.m_doc = bpygpu_select_doc,
.m_methods = bpygpu_select_methods,
};
PyObject *BPyInit_gpu_select(void)
{
PyObject *submodule;
PyObject *submodule;
submodule = PyModule_Create(&BPyGPU_select_module_def);
submodule = PyModule_Create(&BPyGPU_select_module_def);
return submodule;
return submodule;
}
/** \} */

View File

@@ -23,4 +23,4 @@
PyObject *BPyInit_gpu_select(void);
#endif /* __GPU_PY_SELECT_H__ */
#endif /* __GPU_PY_SELECT_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -23,12 +23,11 @@
extern PyTypeObject BPyGPUShader_Type;
#define BPyGPUShader_Check(v) (Py_TYPE(v) == &BPyGPUShader_Type)
#define BPyGPUShader_Check(v) (Py_TYPE(v) == &BPyGPUShader_Type)
typedef struct BPyGPUShader {
PyObject_VAR_HEAD
struct GPUShader *shader;
bool is_builtin;
PyObject_VAR_HEAD struct GPUShader *shader;
bool is_builtin;
} BPyGPUShader;
PyObject *BPyGPUShader_CreatePyObject(struct GPUShader *shader, bool is_builtin);

View File

@@ -30,53 +30,53 @@
/* -------------------------------------------------------------------- */
/** \name GPU Types Module
* \{ */
static struct PyModuleDef BPyGPU_types_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.types",
PyModuleDef_HEAD_INIT,
.m_name = "gpu.types",
};
PyObject *BPyInit_gpu_types(void)
{
PyObject *submodule;
PyObject *submodule;
submodule = PyModule_Create(&BPyGPU_types_module_def);
submodule = PyModule_Create(&BPyGPU_types_module_def);
if (PyType_Ready(&BPyGPUVertFormat_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUVertBuf_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUIndexBuf_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUBatch_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUOffScreen_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUShader_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUVertFormat_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUVertBuf_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUIndexBuf_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUBatch_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUOffScreen_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUShader_Type) < 0) {
return NULL;
}
#define MODULE_TYPE_ADD(s, t) \
PyModule_AddObject(s, t.tp_name, (PyObject *)&t); Py_INCREF((PyObject *)&t)
PyModule_AddObject(s, t.tp_name, (PyObject *)&t); \
Py_INCREF((PyObject *)&t)
MODULE_TYPE_ADD(submodule, BPyGPUVertFormat_Type);
MODULE_TYPE_ADD(submodule, BPyGPUVertBuf_Type);
MODULE_TYPE_ADD(submodule, BPyGPUIndexBuf_Type);
MODULE_TYPE_ADD(submodule, BPyGPUBatch_Type);
MODULE_TYPE_ADD(submodule, BPyGPUOffScreen_Type);
MODULE_TYPE_ADD(submodule, BPyGPUShader_Type);
MODULE_TYPE_ADD(submodule, BPyGPUVertFormat_Type);
MODULE_TYPE_ADD(submodule, BPyGPUVertBuf_Type);
MODULE_TYPE_ADD(submodule, BPyGPUIndexBuf_Type);
MODULE_TYPE_ADD(submodule, BPyGPUBatch_Type);
MODULE_TYPE_ADD(submodule, BPyGPUOffScreen_Type);
MODULE_TYPE_ADD(submodule, BPyGPUShader_Type);
#undef MODULE_TYPE_ADD
return submodule;
return submodule;
}
/** \} */

View File

@@ -40,48 +40,74 @@
* \{ */
#define PY_AS_NATIVE_SWITCH(attr) \
switch (attr->comp_type) { \
case GPU_COMP_I8: { PY_AS_NATIVE(int8_t, PyC_Long_AsI8); break; } \
case GPU_COMP_U8: { PY_AS_NATIVE(uint8_t, PyC_Long_AsU8); break; } \
case GPU_COMP_I16: { PY_AS_NATIVE(int16_t, PyC_Long_AsI16); break; } \
case GPU_COMP_U16: { PY_AS_NATIVE(uint16_t, PyC_Long_AsU16); break; } \
case GPU_COMP_I32: { PY_AS_NATIVE(int32_t, PyC_Long_AsI32); break; } \
case GPU_COMP_U32: { PY_AS_NATIVE(uint32_t, PyC_Long_AsU32); break; } \
case GPU_COMP_F32: { PY_AS_NATIVE(float, PyFloat_AsDouble); break; } \
default: \
BLI_assert(0); \
} ((void)0)
switch (attr->comp_type) { \
case GPU_COMP_I8: { \
PY_AS_NATIVE(int8_t, PyC_Long_AsI8); \
break; \
} \
case GPU_COMP_U8: { \
PY_AS_NATIVE(uint8_t, PyC_Long_AsU8); \
break; \
} \
case GPU_COMP_I16: { \
PY_AS_NATIVE(int16_t, PyC_Long_AsI16); \
break; \
} \
case GPU_COMP_U16: { \
PY_AS_NATIVE(uint16_t, PyC_Long_AsU16); \
break; \
} \
case GPU_COMP_I32: { \
PY_AS_NATIVE(int32_t, PyC_Long_AsI32); \
break; \
} \
case GPU_COMP_U32: { \
PY_AS_NATIVE(uint32_t, PyC_Long_AsU32); \
break; \
} \
case GPU_COMP_F32: { \
PY_AS_NATIVE(float, PyFloat_AsDouble); \
break; \
} \
default: \
BLI_assert(0); \
} \
((void)0)
/* No error checking, callers must run PyErr_Occurred */
static void fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
{
#define PY_AS_NATIVE(ty_dst, py_as_native) \
{ \
ty_dst *data_dst = data_dst_void; \
*data_dst = py_as_native(py_src); \
} ((void)0)
{ \
ty_dst *data_dst = data_dst_void; \
*data_dst = py_as_native(py_src); \
} \
((void)0)
PY_AS_NATIVE_SWITCH(attr);
PY_AS_NATIVE_SWITCH(attr);
#undef PY_AS_NATIVE
}
/* No error checking, callers must run PyErr_Occurred */
static void fill_format_sequence(void *data_dst_void, PyObject *py_seq_fast, const GPUVertAttr *attr)
static void fill_format_sequence(void *data_dst_void,
PyObject *py_seq_fast,
const GPUVertAttr *attr)
{
const uint len = attr->comp_len;
PyObject **value_fast_items = PySequence_Fast_ITEMS(py_seq_fast);
const uint len = attr->comp_len;
PyObject **value_fast_items = PySequence_Fast_ITEMS(py_seq_fast);
/**
* Args are constants, so range checks will be optimized out if they're nop's.
*/
#define PY_AS_NATIVE(ty_dst, py_as_native) \
ty_dst *data_dst = data_dst_void; \
for (uint i = 0; i < len; i++) { \
data_dst[i] = py_as_native(value_fast_items[i]); \
} ((void)0)
ty_dst *data_dst = data_dst_void; \
for (uint i = 0; i < len; i++) { \
data_dst[i] = py_as_native(value_fast_items[i]); \
} \
((void)0)
PY_AS_NATIVE_SWITCH(attr);
PY_AS_NATIVE_SWITCH(attr);
#undef PY_AS_NATIVE
}
@@ -90,256 +116,244 @@ static void fill_format_sequence(void *data_dst_void, PyObject *py_seq_fast, con
#undef WARN_TYPE_LIMIT_PUSH
#undef WARN_TYPE_LIMIT_POP
static bool bpygpu_vertbuf_fill_impl(
GPUVertBuf *vbo,
uint data_id, PyObject *seq, const char *error_prefix)
static bool bpygpu_vertbuf_fill_impl(GPUVertBuf *vbo,
uint data_id,
PyObject *seq,
const char *error_prefix)
{
const char *exc_str_size_mismatch = "Expected a %s of size %d, got %u";
const char *exc_str_size_mismatch = "Expected a %s of size %d, got %u";
bool ok = true;
const GPUVertAttr *attr = &vbo->format.attrs[data_id];
bool ok = true;
const GPUVertAttr *attr = &vbo->format.attrs[data_id];
if (PyObject_CheckBuffer(seq)) {
Py_buffer pybuffer;
if (PyObject_CheckBuffer(seq)) {
Py_buffer pybuffer;
if (PyObject_GetBuffer(seq, &pybuffer, PyBUF_STRIDES | PyBUF_ND) == -1) {
/* PyObject_GetBuffer raise a PyExc_BufferError */
return false;
}
if (PyObject_GetBuffer(seq, &pybuffer, PyBUF_STRIDES | PyBUF_ND) == -1) {
/* PyObject_GetBuffer raise a PyExc_BufferError */
return false;
}
uint comp_len = pybuffer.ndim == 1 ? 1 : (uint)pybuffer.shape[1];
uint comp_len = pybuffer.ndim == 1 ? 1 : (uint)pybuffer.shape[1];
if (pybuffer.shape[0] != vbo->vertex_len) {
PyErr_Format(PyExc_ValueError, exc_str_size_mismatch,
"sequence", vbo->vertex_len, pybuffer.shape[0]);
ok = false;
}
else if (comp_len != attr->comp_len) {
PyErr_Format(PyExc_ValueError, exc_str_size_mismatch,
"component", attr->comp_len, comp_len);
ok = false;
}
else {
GPU_vertbuf_attr_fill_stride(vbo, data_id, pybuffer.strides[0], pybuffer.buf);
}
if (pybuffer.shape[0] != vbo->vertex_len) {
PyErr_Format(
PyExc_ValueError, exc_str_size_mismatch, "sequence", vbo->vertex_len, pybuffer.shape[0]);
ok = false;
}
else if (comp_len != attr->comp_len) {
PyErr_Format(PyExc_ValueError, exc_str_size_mismatch, "component", attr->comp_len, comp_len);
ok = false;
}
else {
GPU_vertbuf_attr_fill_stride(vbo, data_id, pybuffer.strides[0], pybuffer.buf);
}
PyBuffer_Release(&pybuffer);
}
else {
GPUVertBufRaw data_step;
GPU_vertbuf_attr_get_raw_data(vbo, data_id, &data_step);
PyBuffer_Release(&pybuffer);
}
else {
GPUVertBufRaw data_step;
GPU_vertbuf_attr_get_raw_data(vbo, data_id, &data_step);
PyObject *seq_fast = PySequence_Fast(seq, "Vertex buffer fill");
if (seq_fast == NULL) {
return false;
}
PyObject *seq_fast = PySequence_Fast(seq, "Vertex buffer fill");
if (seq_fast == NULL) {
return false;
}
const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
if (seq_len != vbo->vertex_len) {
PyErr_Format(PyExc_ValueError, exc_str_size_mismatch,
"sequence", vbo->vertex_len, seq_len);
}
if (seq_len != vbo->vertex_len) {
PyErr_Format(PyExc_ValueError, exc_str_size_mismatch, "sequence", vbo->vertex_len, seq_len);
}
PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
if (attr->comp_len == 1) {
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *item = seq_items[i];
fill_format_elem(data, item, attr);
}
}
else {
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *seq_fast_item = PySequence_Fast(seq_items[i], error_prefix);
if (attr->comp_len == 1) {
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *item = seq_items[i];
fill_format_elem(data, item, attr);
}
}
else {
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *seq_fast_item = PySequence_Fast(seq_items[i], error_prefix);
if (seq_fast_item == NULL) {
ok = false;
goto finally;
}
if (PySequence_Fast_GET_SIZE(seq_fast_item) != attr->comp_len) {
PyErr_Format(PyExc_ValueError, exc_str_size_mismatch,
"sequence", attr->comp_len, PySequence_Fast_GET_SIZE(seq_fast_item));
ok = false;
Py_DECREF(seq_fast_item);
goto finally;
}
if (seq_fast_item == NULL) {
ok = false;
goto finally;
}
if (PySequence_Fast_GET_SIZE(seq_fast_item) != attr->comp_len) {
PyErr_Format(PyExc_ValueError,
exc_str_size_mismatch,
"sequence",
attr->comp_len,
PySequence_Fast_GET_SIZE(seq_fast_item));
ok = false;
Py_DECREF(seq_fast_item);
goto finally;
}
/* May trigger error, check below */
fill_format_sequence(data, seq_fast_item, attr);
Py_DECREF(seq_fast_item);
}
}
/* May trigger error, check below */
fill_format_sequence(data, seq_fast_item, attr);
Py_DECREF(seq_fast_item);
}
}
if (PyErr_Occurred()) {
ok = false;
}
if (PyErr_Occurred()) {
ok = false;
}
finally:
finally:
Py_DECREF(seq_fast);
}
return ok;
Py_DECREF(seq_fast);
}
return ok;
}
static int bpygpu_attr_fill(GPUVertBuf *buf, int id, PyObject *py_seq_data, const char *error_prefix)
static int bpygpu_attr_fill(GPUVertBuf *buf,
int id,
PyObject *py_seq_data,
const char *error_prefix)
{
if (id < 0 || id >= buf->format.attr_len) {
PyErr_Format(PyExc_ValueError,
"Format id %d out of range",
id);
return 0;
}
if (id < 0 || id >= buf->format.attr_len) {
PyErr_Format(PyExc_ValueError, "Format id %d out of range", id);
return 0;
}
if (buf->data == NULL) {
PyErr_SetString(PyExc_ValueError,
"Can't fill, static buffer already in use");
return 0;
}
if (buf->data == NULL) {
PyErr_SetString(PyExc_ValueError, "Can't fill, static buffer already in use");
return 0;
}
if (!bpygpu_vertbuf_fill_impl(buf, (uint)id, py_seq_data, error_prefix)) {
return 0;
}
if (!bpygpu_vertbuf_fill_impl(buf, (uint)id, py_seq_data, error_prefix)) {
return 0;
}
return 1;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name VertBuf Type
* \{ */
static PyObject *bpygpu_VertBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
struct {
PyObject *py_fmt;
uint len;
} params;
struct {
PyObject *py_fmt;
uint len;
} params;
static const char *_keywords[] = {"format", "len", NULL};
static _PyArg_Parser _parser = {"O!I:GPUVertBuf.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
&BPyGPUVertFormat_Type, &params.py_fmt,
&params.len))
{
return NULL;
}
static const char *_keywords[] = {"format", "len", NULL};
static _PyArg_Parser _parser = {"O!I:GPUVertBuf.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, &BPyGPUVertFormat_Type, &params.py_fmt, &params.len)) {
return NULL;
}
const GPUVertFormat *fmt = &((BPyGPUVertFormat *)params.py_fmt)->fmt;
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(fmt);
const GPUVertFormat *fmt = &((BPyGPUVertFormat *)params.py_fmt)->fmt;
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(fmt);
GPU_vertbuf_data_alloc(vbo, params.len);
GPU_vertbuf_data_alloc(vbo, params.len);
return BPyGPUVertBuf_CreatePyObject(vbo);
return BPyGPUVertBuf_CreatePyObject(vbo);
}
PyDoc_STRVAR(bpygpu_VertBuf_attr_fill_doc,
".. method:: attr_fill(id, data)\n"
"\n"
" Insert data into the buffer for a single attribute.\n"
"\n"
" :param id: Either the name or the id of the attribute.\n"
" :type id: int or str\n"
" :param data: Sequence of data that should be stored in the buffer\n"
" :type data: sequence of values or tuples\n"
);
".. method:: attr_fill(id, data)\n"
"\n"
" Insert data into the buffer for a single attribute.\n"
"\n"
" :param id: Either the name or the id of the attribute.\n"
" :type id: int or str\n"
" :param data: Sequence of data that should be stored in the buffer\n"
" :type data: sequence of values or tuples\n");
static PyObject *bpygpu_VertBuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
{
PyObject *data;
PyObject *identifier;
PyObject *data;
PyObject *identifier;
static const char *_keywords[] = {"id", "data", NULL};
static _PyArg_Parser _parser = {"OO:attr_fill", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
&identifier, &data))
{
return NULL;
}
static const char *_keywords[] = {"id", "data", NULL};
static _PyArg_Parser _parser = {"OO:attr_fill", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &identifier, &data)) {
return NULL;
}
int id;
int id;
if (PyLong_Check(identifier)) {
id = PyLong_AsLong(identifier);
}
else if (PyUnicode_Check(identifier)) {
const char *name = PyUnicode_AsUTF8(identifier);
id = GPU_vertformat_attr_id_get(&self->buf->format, name);
if (id == -1) {
PyErr_SetString(PyExc_ValueError,
"Unknown attribute name");
return NULL;
}
}
else {
PyErr_SetString(PyExc_TypeError,
"expected int or str type as identifier");
return NULL;
}
if (PyLong_Check(identifier)) {
id = PyLong_AsLong(identifier);
}
else if (PyUnicode_Check(identifier)) {
const char *name = PyUnicode_AsUTF8(identifier);
id = GPU_vertformat_attr_id_get(&self->buf->format, name);
if (id == -1) {
PyErr_SetString(PyExc_ValueError, "Unknown attribute name");
return NULL;
}
}
else {
PyErr_SetString(PyExc_TypeError, "expected int or str type as identifier");
return NULL;
}
if (!bpygpu_attr_fill(self->buf, id, data, "GPUVertBuf.attr_fill")) {
return NULL;
}
if (!bpygpu_attr_fill(self->buf, id, data, "GPUVertBuf.attr_fill")) {
return NULL;
}
Py_RETURN_NONE;
Py_RETURN_NONE;
}
static struct PyMethodDef bpygpu_VertBuf_methods[] = {
{"attr_fill", (PyCFunction) bpygpu_VertBuf_attr_fill,
METH_VARARGS | METH_KEYWORDS, bpygpu_VertBuf_attr_fill_doc},
{NULL, NULL, 0, NULL},
{"attr_fill",
(PyCFunction)bpygpu_VertBuf_attr_fill,
METH_VARARGS | METH_KEYWORDS,
bpygpu_VertBuf_attr_fill_doc},
{NULL, NULL, 0, NULL},
};
static void bpygpu_VertBuf_dealloc(BPyGPUVertBuf *self)
{
GPU_vertbuf_discard(self->buf);
Py_TYPE(self)->tp_free(self);
GPU_vertbuf_discard(self->buf);
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(py_gpu_vertex_buffer_doc,
".. class:: GPUVertBuf(len, format)\n"
"\n"
" Contains a VBO.\n"
"\n"
" :param len: Amount of vertices that will fit into this buffer.\n"
" :type type: `int`\n"
" :param format: Vertex format.\n"
" :type buf: :class:`gpu.types.GPUVertFormat`\n"
);
".. class:: GPUVertBuf(len, format)\n"
"\n"
" Contains a VBO.\n"
"\n"
" :param len: Amount of vertices that will fit into this buffer.\n"
" :type type: `int`\n"
" :param format: Vertex format.\n"
" :type buf: :class:`gpu.types.GPUVertFormat`\n");
PyTypeObject BPyGPUVertBuf_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUVertBuf",
.tp_basicsize = sizeof(BPyGPUVertBuf),
.tp_dealloc = (destructor)bpygpu_VertBuf_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_gpu_vertex_buffer_doc,
.tp_methods = bpygpu_VertBuf_methods,
.tp_new = bpygpu_VertBuf_new,
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUVertBuf",
.tp_basicsize = sizeof(BPyGPUVertBuf),
.tp_dealloc = (destructor)bpygpu_VertBuf_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_gpu_vertex_buffer_doc,
.tp_methods = bpygpu_VertBuf_methods,
.tp_new = bpygpu_VertBuf_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUVertBuf_CreatePyObject(GPUVertBuf *buf)
{
BPyGPUVertBuf *self;
BPyGPUVertBuf *self;
self = PyObject_New(BPyGPUVertBuf, &BPyGPUVertBuf_Type);
self->buf = buf;
self = PyObject_New(BPyGPUVertBuf, &BPyGPUVertBuf_Type);
self->buf = buf;
return (PyObject *)self;
return (PyObject *)self;
}
/** \} */

View File

@@ -25,12 +25,12 @@
extern PyTypeObject BPyGPUVertBuf_Type;
#define BPyGPUVertBuf_Check(v) (Py_TYPE(v) == &BPyGPUVertBuf_Type)
#define BPyGPUVertBuf_Check(v) (Py_TYPE(v) == &BPyGPUVertBuf_Type)
typedef struct BPyGPUVertBuf {
PyObject_VAR_HEAD
/* The buf is owned, we may support thin wrapped batches later. */
struct GPUVertBuf *buf;
PyObject_VAR_HEAD
/* The buf is owned, we may support thin wrapped batches later. */
struct GPUVertBuf *buf;
} BPyGPUVertBuf;
PyObject *BPyGPUVertBuf_CreatePyObject(struct GPUVertBuf *vbo) ATTR_NONNULL(1);

View File

@@ -33,15 +33,15 @@
#include "gpu_py_vertex_format.h" /* own include */
#ifdef __BIG_ENDIAN__
/* big endian */
# define MAKE_ID2(c, d) ((c) << 8 | (d))
# define MAKE_ID3(a, b, c) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 )
# define MAKE_ID4(a, b, c, d) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d) )
/* big endian */
# define MAKE_ID2(c, d) ((c) << 8 | (d))
# define MAKE_ID3(a, b, c) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8)
# define MAKE_ID4(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
#else
/* little endian */
# define MAKE_ID2(c, d) ((d) << 8 | (c))
# define MAKE_ID3(a, b, c) ( (int)(c) << 16 | (b) << 8 | (a) )
# define MAKE_ID4(a, b, c, d) ( (int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a) )
/* little endian */
# define MAKE_ID2(c, d) ((d) << 8 | (c))
# define MAKE_ID3(a, b, c) ((int)(c) << 16 | (b) << 8 | (a))
# define MAKE_ID4(a, b, c, d) ((int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a))
#endif
/* -------------------------------------------------------------------- */
@@ -52,205 +52,211 @@
static int bpygpu_parse_component_type(const char *str, int length)
{
if (length == 2) {
switch (*((ushort *)str)) {
case MAKE_ID2('I', '8'): return GPU_COMP_I8;
case MAKE_ID2('U', '8'): return GPU_COMP_U8;
default: break;
}
}
else if (length == 3) {
switch (*((uint *)str)) {
case MAKE_ID3('I', '1', '6'): return GPU_COMP_I16;
case MAKE_ID3('U', '1', '6'): return GPU_COMP_U16;
case MAKE_ID3('I', '3', '2'): return GPU_COMP_I32;
case MAKE_ID3('U', '3', '2'): return GPU_COMP_U32;
case MAKE_ID3('F', '3', '2'): return GPU_COMP_F32;
case MAKE_ID3('I', '1', '0'): return GPU_COMP_I10;
default: break;
}
}
return -1;
if (length == 2) {
switch (*((ushort *)str)) {
case MAKE_ID2('I', '8'):
return GPU_COMP_I8;
case MAKE_ID2('U', '8'):
return GPU_COMP_U8;
default:
break;
}
}
else if (length == 3) {
switch (*((uint *)str)) {
case MAKE_ID3('I', '1', '6'):
return GPU_COMP_I16;
case MAKE_ID3('U', '1', '6'):
return GPU_COMP_U16;
case MAKE_ID3('I', '3', '2'):
return GPU_COMP_I32;
case MAKE_ID3('U', '3', '2'):
return GPU_COMP_U32;
case MAKE_ID3('F', '3', '2'):
return GPU_COMP_F32;
case MAKE_ID3('I', '1', '0'):
return GPU_COMP_I10;
default:
break;
}
}
return -1;
}
static int bpygpu_parse_fetch_mode(const char *str, int length)
{
#define MATCH_ID(id) \
if (length == strlen(STRINGIFY(id))) { \
if (STREQ(str, STRINGIFY(id))) { \
return GPU_FETCH_##id; \
} \
} ((void)0)
if (length == strlen(STRINGIFY(id))) { \
if (STREQ(str, STRINGIFY(id))) { \
return GPU_FETCH_##id; \
} \
} \
((void)0)
MATCH_ID(FLOAT);
MATCH_ID(INT);
MATCH_ID(INT_TO_FLOAT_UNIT);
MATCH_ID(INT_TO_FLOAT);
MATCH_ID(FLOAT);
MATCH_ID(INT);
MATCH_ID(INT_TO_FLOAT_UNIT);
MATCH_ID(INT_TO_FLOAT);
#undef MATCH_ID
return -1;
return -1;
}
static int bpygpu_ParseVertCompType(PyObject *o, void *p)
{
Py_ssize_t length;
const char *str = _PyUnicode_AsStringAndSize(o, &length);
Py_ssize_t length;
const char *str = _PyUnicode_AsStringAndSize(o, &length);
if (str == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
if (str == NULL) {
PyErr_Format(PyExc_ValueError, "expected a string, got %s", Py_TYPE(o)->tp_name);
return 0;
}
int comp_type = bpygpu_parse_component_type(str, length);
if (comp_type == -1) {
PyErr_Format(PyExc_ValueError,
"unkown component type: '%s",
str);
return 0;
}
int comp_type = bpygpu_parse_component_type(str, length);
if (comp_type == -1) {
PyErr_Format(PyExc_ValueError, "unkown component type: '%s", str);
return 0;
}
*((GPUVertCompType *)p) = comp_type;
return 1;
*((GPUVertCompType *)p) = comp_type;
return 1;
}
static int bpygpu_ParseVertFetchMode(PyObject *o, void *p)
{
Py_ssize_t length;
const char *str = _PyUnicode_AsStringAndSize(o, &length);
Py_ssize_t length;
const char *str = _PyUnicode_AsStringAndSize(o, &length);
if (str == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
if (str == NULL) {
PyErr_Format(PyExc_ValueError, "expected a string, got %s", Py_TYPE(o)->tp_name);
return 0;
}
int fetch_mode = bpygpu_parse_fetch_mode(str, length);
if (fetch_mode == -1) {
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
str);
return 0;
}
int fetch_mode = bpygpu_parse_fetch_mode(str, length);
if (fetch_mode == -1) {
PyErr_Format(PyExc_ValueError, "unknown type literal: '%s'", str);
return 0;
}
(*(GPUVertFetchMode *)p) = fetch_mode;
return 1;
(*(GPUVertFetchMode *)p) = fetch_mode;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name VertFormat Type
* \{ */
static PyObject *bpygpu_VertFormat_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
if (PyTuple_GET_SIZE(args) || (kwds && PyDict_Size(kwds))) {
PyErr_SetString(PyExc_ValueError, "This function takes no arguments");
return NULL;
}
return BPyGPUVertFormat_CreatePyObject(NULL);
if (PyTuple_GET_SIZE(args) || (kwds && PyDict_Size(kwds))) {
PyErr_SetString(PyExc_ValueError, "This function takes no arguments");
return NULL;
}
return BPyGPUVertFormat_CreatePyObject(NULL);
}
PyDoc_STRVAR(bpygpu_VertFormat_attr_add_doc,
".. method:: attr_add(id, comp_type, len, fetch_mode)\n"
"\n"
" Add a new attribute to the format.\n"
"\n"
" :param id: Name the attribute. Often `position`, `normal`, ...\n"
" :type id: str\n"
" :param comp_type: The data type that will be used store the value in memory.\n"
" Possible values are `I8`, `U8`, `I16`, `U16`, `I32`, `U32`, `F32` and `I10`.\n"
" :type comp_type: `str`\n"
" :param len: How many individual values the attribute consists of (e.g. 2 for uv coordinates).\n"
" :type len: int\n"
" :param fetch_mode: How values from memory will be converted when used in the shader.\n"
" This is mainly useful for memory optimizations when you want to store values with reduced precision.\n"
" E.g. you can store a float in only 1 byte but it will be converted to a normal 4 byte float when used.\n"
" Possible values are `FLOAT`, `INT`, `INT_TO_FLOAT_UNIT` and `INT_TO_FLOAT`.\n"
" :type fetch_mode: `str`\n"
);
PyDoc_STRVAR(
bpygpu_VertFormat_attr_add_doc,
".. method:: attr_add(id, comp_type, len, fetch_mode)\n"
"\n"
" Add a new attribute to the format.\n"
"\n"
" :param id: Name the attribute. Often `position`, `normal`, ...\n"
" :type id: str\n"
" :param comp_type: The data type that will be used store the value in memory.\n"
" Possible values are `I8`, `U8`, `I16`, `U16`, `I32`, `U32`, `F32` and `I10`.\n"
" :type comp_type: `str`\n"
" :param len: How many individual values the attribute consists of (e.g. 2 for uv "
"coordinates).\n"
" :type len: int\n"
" :param fetch_mode: How values from memory will be converted when used in the shader.\n"
" This is mainly useful for memory optimizations when you want to store values with "
"reduced precision.\n"
" E.g. you can store a float in only 1 byte but it will be converted to a normal 4 byte "
"float when used.\n"
" Possible values are `FLOAT`, `INT`, `INT_TO_FLOAT_UNIT` and `INT_TO_FLOAT`.\n"
" :type fetch_mode: `str`\n");
static PyObject *bpygpu_VertFormat_attr_add(BPyGPUVertFormat *self, PyObject *args, PyObject *kwds)
{
struct {
const char *id;
GPUVertCompType comp_type;
uint len;
GPUVertFetchMode fetch_mode;
} params;
struct {
const char *id;
GPUVertCompType comp_type;
uint len;
GPUVertFetchMode fetch_mode;
} params;
if (self->fmt.attr_len == GPU_VERT_ATTR_MAX_LEN) {
PyErr_SetString(PyExc_ValueError, "Maxumum attr reached " STRINGIFY(GPU_VERT_ATTR_MAX_LEN));
return NULL;
}
if (self->fmt.attr_len == GPU_VERT_ATTR_MAX_LEN) {
PyErr_SetString(PyExc_ValueError, "Maxumum attr reached " STRINGIFY(GPU_VERT_ATTR_MAX_LEN));
return NULL;
}
static const char *_keywords[] = {"id", "comp_type", "len", "fetch_mode", NULL};
static _PyArg_Parser _parser = {"$sO&IO&:attr_add", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser,
&params.id,
bpygpu_ParseVertCompType, &params.comp_type,
&params.len,
bpygpu_ParseVertFetchMode, &params.fetch_mode))
{
return NULL;
}
static const char *_keywords[] = {"id", "comp_type", "len", "fetch_mode", NULL};
static _PyArg_Parser _parser = {"$sO&IO&:attr_add", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
&params.id,
bpygpu_ParseVertCompType,
&params.comp_type,
&params.len,
bpygpu_ParseVertFetchMode,
&params.fetch_mode)) {
return NULL;
}
uint attr_id = GPU_vertformat_attr_add(&self->fmt, params.id, params.comp_type, params.len, params.fetch_mode);
return PyLong_FromLong(attr_id);
uint attr_id = GPU_vertformat_attr_add(
&self->fmt, params.id, params.comp_type, params.len, params.fetch_mode);
return PyLong_FromLong(attr_id);
}
static struct PyMethodDef bpygpu_VertFormat_methods[] = {
{"attr_add", (PyCFunction)bpygpu_VertFormat_attr_add,
METH_VARARGS | METH_KEYWORDS, bpygpu_VertFormat_attr_add_doc},
{NULL, NULL, 0, NULL},
{"attr_add",
(PyCFunction)bpygpu_VertFormat_attr_add,
METH_VARARGS | METH_KEYWORDS,
bpygpu_VertFormat_attr_add_doc},
{NULL, NULL, 0, NULL},
};
static void bpygpu_VertFormat_dealloc(BPyGPUVertFormat *self)
{
Py_TYPE(self)->tp_free(self);
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(bpygpu_VertFormat_doc,
".. class:: GPUVertFormat()\n"
"\n"
" This object contains information about the structure of a vertex buffer.\n"
);
".. class:: GPUVertFormat()\n"
"\n"
" This object contains information about the structure of a vertex buffer.\n");
PyTypeObject BPyGPUVertFormat_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUVertFormat",
.tp_basicsize = sizeof(BPyGPUVertFormat),
.tp_dealloc = (destructor)bpygpu_VertFormat_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = bpygpu_VertFormat_doc,
.tp_methods = bpygpu_VertFormat_methods,
.tp_new = bpygpu_VertFormat_new,
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUVertFormat",
.tp_basicsize = sizeof(BPyGPUVertFormat),
.tp_dealloc = (destructor)bpygpu_VertFormat_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = bpygpu_VertFormat_doc,
.tp_methods = bpygpu_VertFormat_methods,
.tp_new = bpygpu_VertFormat_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUVertFormat_CreatePyObject(GPUVertFormat *fmt)
{
BPyGPUVertFormat *self;
BPyGPUVertFormat *self;
self = PyObject_New(BPyGPUVertFormat, &BPyGPUVertFormat_Type);
if (fmt) {
self->fmt = *fmt;
}
else {
memset(&self->fmt, 0, sizeof(self->fmt));
}
self = PyObject_New(BPyGPUVertFormat, &BPyGPUVertFormat_Type);
if (fmt) {
self->fmt = *fmt;
}
else {
memset(&self->fmt, 0, sizeof(self->fmt));
}
return (PyObject *)self;
return (PyObject *)self;
}
/** \} */

View File

@@ -25,11 +25,10 @@
extern PyTypeObject BPyGPUVertFormat_Type;
#define BPyGPUVertFormat_Check(v) (Py_TYPE(v) == &BPyGPUVertFormat_Type)
#define BPyGPUVertFormat_Check(v) (Py_TYPE(v) == &BPyGPUVertFormat_Type)
typedef struct BPyGPUVertFormat {
PyObject_VAR_HEAD
struct GPUVertFormat fmt;
PyObject_VAR_HEAD struct GPUVertFormat fmt;
} BPyGPUVertFormat;
PyObject *BPyGPUVertFormat_CreatePyObject(struct GPUVertFormat *fmt);