Join the python modules gpu and _gpu into one.

Maybe it's still early to set the new drawing api for python.
But joining these two modules is an initial step.

```
>>> gpu.
        matrix
        select
        types
```
```
>>> gpu.types.GPU
                 Batch(
                 OffScreen(
                 VertBuf(
                 VertFormat(
```
The creation of a new offscreen object is now done by the `GPUOffscreen.__new__` method.

Reviewers: campbellbarton, dfelinto

Reviewed By: campbellbarton, dfelinto

Tags: #bf_blender_2.8

Differential Revision: https://developer.blender.org/D3667
This commit is contained in:
2018-09-05 21:10:42 -03:00
parent b060248324
commit 6d04e48539
22 changed files with 1726 additions and 1000 deletions

View File

@@ -42,7 +42,7 @@ class OffScreenDraw(bpy.types.Operator):
aspect_ratio = scene.render.resolution_x / scene.render.resolution_y aspect_ratio = scene.render.resolution_x / scene.render.resolution_y
try: try:
offscreen = gpu.offscreen.new(512, int(512 / aspect_ratio)) offscreen = gpu.types.GPUOffScreen(512, int(512 / aspect_ratio))
except Exception as e: except Exception as e:
print(e) print(e)
offscreen = None offscreen = None
@@ -52,7 +52,7 @@ class OffScreenDraw(bpy.types.Operator):
@staticmethod @staticmethod
def _update_offscreen(context, offscreen): def _update_offscreen(context, offscreen):
scene = context.scene scene = context.scene
render_layer = context.render_layer view_layer = context.view_layer
render = scene.render render = scene.render
camera = scene.camera camera = scene.camera

View File

@@ -1,20 +0,0 @@
*******************
GPU functions (gpu)
*******************
.. module:: gpu
Functions for GPU offscreen rendering, matrix stacks and selection.
Submodules:
.. toctree::
:maxdepth: 1
gpu.offscreen.rst
Intro
=====
Module to provide functions concerning the GPU implementation in Blender.

View File

@@ -235,7 +235,9 @@ else:
"bpy.utils.previews", "bpy.utils.previews",
"bpy_extras", "bpy_extras",
"gpu", "gpu",
"gpu.offscreen", "gpu.types",
"gpu.matrix",
"gpu.select",
"idprop.types", "idprop.types",
"mathutils", "mathutils",
"mathutils.bvhtree", "mathutils.bvhtree",
@@ -1822,7 +1824,10 @@ def write_rst_importable_modules(basepath):
# C_modules # C_modules
"aud": "Audio System", "aud": "Audio System",
"blf": "Font Drawing", "blf": "Font Drawing",
"gpu.offscreen": "GPU Off-Screen Buffer", "gpu": "GPU Shader Module",
"gpu.types": "GPU Types",
"gpu.matrix": "GPU Matrix",
"gpu.select": "GPU Select",
"bmesh": "BMesh Module", "bmesh": "BMesh Module",
"bmesh.types": "BMesh Types", "bmesh.types": "BMesh Types",
"bmesh.utils": "BMesh Utilities", "bmesh.utils": "BMesh Utilities",
@@ -1865,7 +1870,6 @@ def copy_handwritten_rsts(basepath):
# TODO put this docs in Blender's code and use import as per modules above # TODO put this docs in Blender's code and use import as per modules above
handwritten_modules = [ handwritten_modules = [
"bgl", # "Blender OpenGl wrapper" "bgl", # "Blender OpenGl wrapper"
"gpu", # "GPU Shader Module"
"bmesh.ops", # generated by rst_from_bmesh_opdefines.py "bmesh.ops", # generated by rst_from_bmesh_opdefines.py
# includes... # includes...

View File

@@ -35,10 +35,22 @@ set(INC_SYS
set(SRC set(SRC
gpu_py_api.c gpu_py_api.c
gpu_py_batch.c
gpu_py_matrix.c
gpu_py_offscreen.c
gpu_py_select.c
gpu_py_types.c gpu_py_types.c
gpu_py_vertex_buffer.c
gpu_py_vertex_format.c
gpu_py_api.h gpu_py_api.h
gpu_py_batch.h
gpu_py_matrix.h
gpu_py_offscreen.h
gpu_py_select.h
gpu_py_types.h gpu_py_types.h
gpu_py_vertex_buffer.h
gpu_py_vertex_format.h
) )
add_definitions(${GL_DEFINITIONS}) add_definitions(${GL_DEFINITIONS})

View File

@@ -19,31 +19,44 @@
*/ */
/** \file blender/python/gpu/gpu_py_api.c /** \file blender/python/gpu/gpu_py_api.c
* \ingroup pygpu * \ingroup bpygpu
* *
* Experimental Python API, not considered public yet (called '_gpu'), * Experimental Python API, not considered public yet (called '_gpu'),
* we may re-expose as public later. * we may re-expose as public later.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/ */
#include <Python.h> #include <Python.h>
#include "GPU_batch.h"
#include "GPU_vertex_format.h"
#include "gpu_py_api.h"
#include "gpu_py_types.h"
#include "BLI_utildefines.h" #include "BLI_utildefines.h"
#include "../generic/python_utildefines.h" #include "../generic/python_utildefines.h"
#include "gpu_py_matrix.h"
#include "gpu_py_select.h"
#include "gpu_py_types.h"
#include "gpu_py_api.h" /* own include */
PyDoc_STRVAR(GPU_doc, PyDoc_STRVAR(GPU_doc,
"This module provides access to gpu drawing functions." "This module to provide functions concerning the GPU implementation in Blender."
"\n\n"
"Submodules:\n"
"\n"
".. toctree::\n"
" :maxdepth: 1\n"
"\n"
" gpu.types.rst\n"
" gpu.matrix.rst\n"
" gpu.select.rst\n"
"\n"
); );
static struct PyModuleDef GPU_module_def = { static struct PyModuleDef GPU_module_def = {
PyModuleDef_HEAD_INIT, PyModuleDef_HEAD_INIT,
.m_name = "_gpu", /* m_name */ .m_name = "gpu",
.m_doc = GPU_doc, /* m_doc */ .m_doc = GPU_doc,
}; };
PyObject *BPyInit_gpu(void) PyObject *BPyInit_gpu(void)
@@ -54,10 +67,17 @@ PyObject *BPyInit_gpu(void)
mod = PyModule_Create(&GPU_module_def); mod = PyModule_Create(&GPU_module_def);
/* _gpu.types */
PyModule_AddObject(mod, "types", (submodule = BPyInit_gpu_types())); PyModule_AddObject(mod, "types", (submodule = BPyInit_gpu_types()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule); PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
Py_INCREF(submodule); Py_INCREF(submodule);
PyModule_AddObject(mod, "matrix", (submodule = BPyInit_gpu_matrix()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
Py_INCREF(submodule);
PyModule_AddObject(mod, "select", (submodule = BPyInit_gpu_select()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
Py_INCREF(submodule);
return mod; return mod;
} }

View File

@@ -18,13 +18,13 @@
* ***** END GPL LICENSE BLOCK ***** * ***** END GPL LICENSE BLOCK *****
*/ */
/** \file blender/python/gpu/gpu_py_api.h
* \ingroup bpygpu
*/
#ifndef __GPU_PY_API_H__ #ifndef __GPU_PY_API_H__
#define __GPU_PY_API_H__ #define __GPU_PY_API_H__
/** \file blender/python/gpu/gpu_py_api.h
* \ingroup pygpu
*/
PyObject *BPyInit_gpu(void); PyObject *BPyInit_gpu(void);
#endif /* __GPU_PY_API_H__ */ #endif /* __GPU_PY_API_H__ */

View File

@@ -0,0 +1,414 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2015, Blender Foundation.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_batch.c
* \ingroup bpygpu
*
* This file defines the offscreen functionalities of the 'gpu' module
* used for off-screen OpenGL rendering.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BKE_global.h"
#include "BKE_library.h"
#include "GPU_batch.h"
#include "../mathutils/mathutils.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py_vertex_buffer.h"
#include "gpu_py_batch.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name VertBatch Type
* \{ */
static int bpygpu_ParsePrimType(PyObject *o, void *p)
{
Py_ssize_t mode_id_len;
const char *mode_id = _PyUnicode_AsStringAndSize(o, &mode_id_len);
if (mode_id == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
#define MATCH_ID(id) \
if (mode_id_len == strlen(STRINGIFY(id))) { \
if (STREQ(mode_id, STRINGIFY(id))) { \
mode = GPU_PRIM_##id; \
goto success; \
} \
} ((void)0)
GPUPrimType mode;
MATCH_ID(POINTS);
MATCH_ID(LINES);
MATCH_ID(TRIS);
MATCH_ID(LINE_STRIP);
MATCH_ID(LINE_LOOP);
MATCH_ID(TRI_STRIP);
MATCH_ID(TRI_FAN);
MATCH_ID(LINE_STRIP_ADJ);
#undef MATCH_ID
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
mode_id);
return 0;
success:
(*(GPUPrimType *)p) = mode;
return 1;
}
static PyObject *bpygpu_Batch_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
const char * const keywords[] = {"type", "buf", NULL};
struct {
GPUPrimType type_id;
BPyGPUVertBuf *py_buf;
} params;
if (!PyArg_ParseTupleAndKeywords(
args, kwds,
"$O&O!:GPUBatch.__new__", (char **)keywords,
bpygpu_ParsePrimType, &params.type_id,
&BPyGPUVertBuf_Type, &params.py_buf))
{
return NULL;
}
GPUBatch *batch = GPU_batch_create(params.type_id, params.py_buf->buf, NULL);
BPyGPUBatch *ret = (BPyGPUBatch *)BPyGPUBatch_CreatePyObject(batch);
#ifdef USE_GPU_PY_REFERENCES
ret->references = PyList_New(1);
PyList_SET_ITEM(ret->references, 0, (PyObject *)params.py_buf);
Py_INCREF(params.py_buf);
PyObject_GC_Track(ret);
#endif
return (PyObject *)ret;
}
PyDoc_STRVAR(bpygpu_VertBatch_vertbuf_add_doc,
"TODO"
);
static PyObject *bpygpu_VertBatch_vertbuf_add(BPyGPUBatch *self, BPyGPUVertBuf *py_buf)
{
if (!BPyGPUVertBuf_Check(py_buf)) {
PyErr_Format(PyExc_TypeError,
"Expected a GPUVertBuf, got %s",
Py_TYPE(py_buf)->tp_name);
return NULL;
}
if (self->batch->verts[0]->vertex_len != py_buf->buf->vertex_len) {
PyErr_Format(PyExc_TypeError,
"Expected %d length, got %d",
self->batch->verts[0]->vertex_len, py_buf->buf->vertex_len);
return NULL;
}
#ifdef USE_GPU_PY_REFERENCES
/* Hold user */
PyList_Append(self->references, (PyObject *)py_buf);
#endif
GPU_batch_vertbuf_add(self->batch, py_buf->buf);
Py_RETURN_NONE;
}
/* Currently magic number from Py perspective. */
PyDoc_STRVAR(bpygpu_VertBatch_program_set_builtin_doc,
"TODO"
);
static PyObject *bpygpu_VertBatch_program_set_builtin(BPyGPUBatch *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"id", NULL};
struct {
const char *shader;
} params;
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "s:program_set_builtin", (char **)kwlist,
&params.shader))
{
return NULL;
}
GPUBuiltinShader shader;
#define MATCH_ID(id) \
if (STREQ(params.shader, STRINGIFY(id))) { \
shader = GPU_SHADER_##id; \
goto success; \
} ((void)0)
MATCH_ID(2D_FLAT_COLOR);
MATCH_ID(2D_SMOOTH_COLOR);
MATCH_ID(2D_UNIFORM_COLOR);
MATCH_ID(3D_FLAT_COLOR);
MATCH_ID(3D_SMOOTH_COLOR);
MATCH_ID(3D_UNIFORM_COLOR);
#undef MATCH_ID
PyErr_SetString(PyExc_ValueError,
"shader name not known");
return NULL;
success:
GPU_batch_program_set_builtin(self->batch, shader);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_uniform_bool(BPyGPUBatch *self, PyObject *args)
{
struct {
const char *id;
bool values[1];
} params;
if (!PyArg_ParseTuple(
args, "sO&:uniform_bool",
&params.id,
PyC_ParseBool, &params.values[0]))
{
return NULL;
}
GPU_batch_uniform_1b(self->batch, params.id, params.values[0]);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_uniform_i32(BPyGPUBatch *self, PyObject *args)
{
struct {
const char *id;
int values[1];
} params;
if (!PyArg_ParseTuple(
args, "si:uniform_i32",
&params.id,
&params.values[0]))
{
return NULL;
}
GPU_batch_uniform_1i(self->batch, params.id, params.values[0]);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_uniform_f32(BPyGPUBatch *self, PyObject *args)
{
struct {
const char *id;
float values[4];
} params;
if (!PyArg_ParseTuple(
args, "sf|fff:uniform_f32",
&params.id,
&params.values[0], &params.values[1], &params.values[2], &params.values[3]))
{
return NULL;
}
switch (PyTuple_GET_SIZE(args)) {
case 2: GPU_batch_uniform_1f(self->batch, params.id, params.values[0]); break;
case 3: GPU_batch_uniform_2f(self->batch, params.id, UNPACK2(params.values)); break;
case 4: GPU_batch_uniform_3f(self->batch, params.id, UNPACK3(params.values)); break;
case 5: GPU_batch_uniform_4f(self->batch, params.id, UNPACK4(params.values)); break;
default:
BLI_assert(0);
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_VertBatch_draw_doc,
"TODO"
);
static PyObject *bpygpu_VertBatch_draw(BPyGPUBatch *self)
{
if (!glIsProgram(self->batch->program)) {
PyErr_SetString(PyExc_ValueError,
"batch program has not not set");
}
GPU_batch_draw(self->batch);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_program_use_begin(BPyGPUBatch *self)
{
if (!glIsProgram(self->batch->program)) {
PyErr_SetString(PyExc_ValueError,
"batch program has not not set");
}
GPU_batch_program_use_begin(self->batch);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_program_use_end(BPyGPUBatch *self)
{
if (!glIsProgram(self->batch->program)) {
PyErr_SetString(PyExc_ValueError,
"batch program has not not set");
}
GPU_batch_program_use_end(self->batch);
Py_RETURN_NONE;
}
static struct PyMethodDef bpygpu_VertBatch_methods[] = {
{"vertbuf_add", (PyCFunction)bpygpu_VertBatch_vertbuf_add,
METH_O, bpygpu_VertBatch_vertbuf_add_doc},
{"program_set_builtin", (PyCFunction)bpygpu_VertBatch_program_set_builtin,
METH_VARARGS | METH_KEYWORDS, bpygpu_VertBatch_program_set_builtin_doc},
{"uniform_bool", (PyCFunction)bpygpu_VertBatch_uniform_bool,
METH_VARARGS, NULL},
{"uniform_i32", (PyCFunction)bpygpu_VertBatch_uniform_i32,
METH_VARARGS, NULL},
{"uniform_f32", (PyCFunction)bpygpu_VertBatch_uniform_f32,
METH_VARARGS, NULL},
{"draw", (PyCFunction) bpygpu_VertBatch_draw,
METH_NOARGS, bpygpu_VertBatch_draw_doc},
{"program_use_begin", (PyCFunction)bpygpu_VertBatch_program_use_begin,
METH_NOARGS, ""},
{"program_use_end", (PyCFunction)bpygpu_VertBatch_program_use_end,
METH_NOARGS, ""},
{NULL, NULL, 0, NULL}
};
#ifdef USE_GPU_PY_REFERENCES
static int bpygpu_Batch_traverse(BPyGPUBatch *self, visitproc visit, void *arg)
{
Py_VISIT(self->references);
return 0;
}
static int bpygpu_Batch_clear(BPyGPUBatch *self)
{
Py_CLEAR(self->references);
return 0;
}
#endif
static void bpygpu_Batch_dealloc(BPyGPUBatch *self)
{
GPU_batch_discard(self->batch);
#ifdef USE_GPU_PY_REFERENCES
if (self->references) {
PyObject_GC_UnTrack(self);
bpygpu_Batch_clear(self);
Py_XDECREF(self->references);
}
#endif
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(py_gpu_batch_doc,
"GPUBatch(type, buf)\n"
"\n"
"Contains VAOs + VBOs + Shader representing a drawable entity."
"\n"
" :param type: One of these primitive types: {\n"
" \"GPU_PRIM_POINTS\",\n"
" \"GPU_PRIM_LINES\",\n"
" \"GPU_PRIM_TRIS\",\n"
" \"GPU_PRIM_LINE_STRIP\",\n"
" \"GPU_PRIM_LINE_LOOP\",\n"
" \"GPU_PRIM_TRI_STRIP\",\n"
" \"GPU_PRIM_TRI_FAN\",\n"
" \"GPU_PRIM_LINES_ADJ\",\n"
" \"GPU_PRIM_TRIS_ADJ\",\n"
" \"GPU_PRIM_LINE_STRIP_ADJ\",\n"
" \"GPU_PRIM_NONE\n"
" }.\n"
" :type type: str`\n"
" :param buf: Vertex buffer.\n"
" :type buf: GPUVertBuf`\n"
);
PyTypeObject BPyGPUBatch_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUBatch",
.tp_basicsize = sizeof(BPyGPUBatch),
.tp_dealloc = (destructor)bpygpu_Batch_dealloc,
#ifdef USE_GPU_PY_REFERENCES
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_doc = py_gpu_batch_doc,
.tp_traverse = (traverseproc)bpygpu_Batch_traverse,
.tp_clear = (inquiry)bpygpu_Batch_clear,
#else
.tp_flags = Py_TPFLAGS_DEFAULT,
#endif
.tp_methods = bpygpu_VertBatch_methods,
.tp_new = bpygpu_Batch_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUBatch_CreatePyObject(GPUBatch *batch)
{
BPyGPUBatch *self;
#ifdef USE_GPU_PY_REFERENCES
self = (BPyGPUBatch *)_PyObject_GC_New(&BPyGPUBatch_Type);
self->references = NULL;
#else
self = PyObject_New(BPyGPUBatch, &BPyGPUBatch_Type);
#endif
self->batch = batch;
return (PyObject *)self;
}
/** \} */
#undef BPY_GPU_BATCH_CHECK_OBJ

View File

@@ -0,0 +1,48 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_batch.h
* \ingroup bpygpu
*/
#ifndef __GPU_PY_BATCH_H__
#define __GPU_PY_BATCH_H__
#include "BLI_compiler_attrs.h"
#define USE_GPU_PY_REFERENCES
extern PyTypeObject BPyGPUBatch_Type;
#define BPyGPUBatch_Check(v) (Py_TYPE(v) == &BPyGPUBatch_Type)
typedef struct BPyGPUBatch {
PyObject_VAR_HEAD
/* The batch is owned, we may support thin wrapped batches later. */
struct GPUBatch *batch;
#ifdef USE_GPU_PY_REFERENCES
/* Just to keep a user to prevent freeing buf's we're using */
PyObject *references;
#endif
} BPyGPUBatch;
PyObject *BPyGPUBatch_CreatePyObject(struct GPUBatch *batch) ATTR_NONNULL(1);
#endif /* __GPU_PY_BATCH_H__ */

View File

@@ -18,13 +18,16 @@
* ***** END GPL LICENSE BLOCK ***** * ***** END GPL LICENSE BLOCK *****
*/ */
/** \file blender/python/intern/gpu_py_matrix.c /** \file blender/python/gpu/gpu_py_matrix.c
* \ingroup pythonintern * \ingroup bpygpu
* *
* This file defines the gpu.matrix stack API. * This file defines the gpu.matrix stack API.
* *
* \warning While these functions attempt to ensure correct stack usage. * \warning While these functions attempt to ensure correct stack usage.
* Mixing Python and C functions may still crash on invalid use. * Mixing Python and C functions may still crash on invalid use.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/ */
#include <Python.h> #include <Python.h>
@@ -36,17 +39,17 @@
#include "../generic/py_capi_utils.h" #include "../generic/py_capi_utils.h"
#include "gpu.h"
#define USE_GPU_PY_MATRIX_API #define USE_GPU_PY_MATRIX_API
#include "GPU_matrix.h" #include "GPU_matrix.h"
#undef USE_GPU_PY_MATRIX_API #undef USE_GPU_PY_MATRIX_API
#include "gpu_py_matrix.h" /* own include */
/* -------------------------------------------------------------------- */ /* -------------------------------------------------------------------- */
/** \name Helper Functions /** \name Helper Functions
* \{ */ * \{ */
static bool pygpu_stack_is_push_model_view_ok_or_error(void) static bool bpygpu_stack_is_push_model_view_ok_or_error(void)
{ {
if (GPU_matrix_stack_level_get_model_view() >= GPU_PY_MATRIX_STACK_LEN) { if (GPU_matrix_stack_level_get_model_view() >= GPU_PY_MATRIX_STACK_LEN) {
PyErr_SetString(PyExc_RuntimeError, PyErr_SetString(PyExc_RuntimeError,
@@ -56,7 +59,7 @@ static bool pygpu_stack_is_push_model_view_ok_or_error(void)
return true; return true;
} }
static bool pygpu_stack_is_push_projection_ok_or_error(void) static bool bpygpu_stack_is_push_projection_ok_or_error(void)
{ {
if (GPU_matrix_stack_level_get_projection() >= GPU_PY_MATRIX_STACK_LEN) { if (GPU_matrix_stack_level_get_projection() >= GPU_PY_MATRIX_STACK_LEN) {
PyErr_SetString(PyExc_RuntimeError, PyErr_SetString(PyExc_RuntimeError,
@@ -66,7 +69,7 @@ static bool pygpu_stack_is_push_projection_ok_or_error(void)
return true; return true;
} }
static bool pygpu_stack_is_pop_model_view_ok_or_error(void) static bool bpygpu_stack_is_pop_model_view_ok_or_error(void)
{ {
if (GPU_matrix_stack_level_get_model_view() == 0) { if (GPU_matrix_stack_level_get_model_view() == 0) {
PyErr_SetString(PyExc_RuntimeError, PyErr_SetString(PyExc_RuntimeError,
@@ -76,7 +79,7 @@ static bool pygpu_stack_is_pop_model_view_ok_or_error(void)
return true; return true;
} }
static bool pygpu_stack_is_pop_projection_ok_or_error(void) static bool bpygpu_stack_is_pop_projection_ok_or_error(void)
{ {
if (GPU_matrix_stack_level_get_projection() == 0) { if (GPU_matrix_stack_level_get_projection() == 0) {
PyErr_SetString(PyExc_RuntimeError, PyErr_SetString(PyExc_RuntimeError,
@@ -92,56 +95,56 @@ static bool pygpu_stack_is_pop_projection_ok_or_error(void)
/** \name Manage Stack /** \name Manage Stack
* \{ */ * \{ */
PyDoc_STRVAR(pygpu_matrix_push_doc, PyDoc_STRVAR(bpygpu_matrix_push_doc,
"push()\n" "push()\n"
"\n" "\n"
" Add to the model-view matrix stack.\n" " Add to the model-view matrix stack.\n"
); );
static PyObject *pygpu_matrix_push(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_push(PyObject *UNUSED(self))
{ {
if (!pygpu_stack_is_push_model_view_ok_or_error()) { if (!bpygpu_stack_is_push_model_view_ok_or_error()) {
return NULL; return NULL;
} }
GPU_matrix_push(); GPU_matrix_push();
Py_RETURN_NONE; Py_RETURN_NONE;
} }
PyDoc_STRVAR(pygpu_matrix_pop_doc, PyDoc_STRVAR(bpygpu_matrix_pop_doc,
"pop()\n" "pop()\n"
"\n" "\n"
" Remove the last model-view matrix from the stack.\n" " Remove the last model-view matrix from the stack.\n"
); );
static PyObject *pygpu_matrix_pop(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_pop(PyObject *UNUSED(self))
{ {
if (!pygpu_stack_is_pop_model_view_ok_or_error()) { if (!bpygpu_stack_is_pop_model_view_ok_or_error()) {
return NULL; return NULL;
} }
GPU_matrix_pop(); GPU_matrix_pop();
Py_RETURN_NONE; Py_RETURN_NONE;
} }
PyDoc_STRVAR(pygpu_matrix_push_projection_doc, PyDoc_STRVAR(bpygpu_matrix_push_projection_doc,
"push_projection()\n" "push_projection()\n"
"\n" "\n"
" Add to the projection matrix stack.\n" " Add to the projection matrix stack.\n"
); );
static PyObject *pygpu_matrix_push_projection(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_push_projection(PyObject *UNUSED(self))
{ {
if (!pygpu_stack_is_push_projection_ok_or_error()) { if (!bpygpu_stack_is_push_projection_ok_or_error()) {
return NULL; return NULL;
} }
GPU_matrix_push_projection(); GPU_matrix_push_projection();
Py_RETURN_NONE; Py_RETURN_NONE;
} }
PyDoc_STRVAR(pygpu_matrix_pop_projection_doc, PyDoc_STRVAR(bpygpu_matrix_pop_projection_doc,
"pop_projection()\n" "pop_projection()\n"
"\n" "\n"
" Remove the last projection matrix from the stack.\n" " Remove the last projection matrix from the stack.\n"
); );
static PyObject *pygpu_matrix_pop_projection(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_pop_projection(PyObject *UNUSED(self))
{ {
if (!pygpu_stack_is_pop_projection_ok_or_error()) { if (!bpygpu_stack_is_pop_projection_ok_or_error()) {
return NULL; return NULL;
} }
GPU_matrix_pop_projection(); GPU_matrix_pop_projection();
@@ -161,31 +164,31 @@ typedef struct {
PyObject_HEAD /* required python macro */ PyObject_HEAD /* required python macro */
int type; int type;
int level; int level;
} BPy_GPU_MatrixStackContext; } BPyGPU_MatrixStackContext;
enum { enum {
PYGPU_MATRIX_TYPE_MODEL_VIEW = 1, PYGPU_MATRIX_TYPE_MODEL_VIEW = 1,
PYGPU_MATRIX_TYPE_PROJECTION = 2, PYGPU_MATRIX_TYPE_PROJECTION = 2,
}; };
static PyObject *pygpu_matrix_stack_context_enter(BPy_GPU_MatrixStackContext *self); static PyObject *bpygpu_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self);
static PyObject *pygpu_matrix_stack_context_exit(BPy_GPU_MatrixStackContext *self, PyObject *args); static PyObject *bpygpu_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self, PyObject *args);
static PyMethodDef pygpu_matrix_stack_context_methods[] = { static PyMethodDef bpygpu_matrix_stack_context_methods[] = {
{"__enter__", (PyCFunction)pygpu_matrix_stack_context_enter, METH_NOARGS}, {"__enter__", (PyCFunction)bpygpu_matrix_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)pygpu_matrix_stack_context_exit, METH_VARARGS}, {"__exit__", (PyCFunction)bpygpu_matrix_stack_context_exit, METH_VARARGS},
{NULL} {NULL}
}; };
static PyTypeObject pygpu_matrix_stack_context_Type = { static PyTypeObject BPyGPU_matrix_stack_context_Type = {
PyVarObject_HEAD_INIT(NULL, 0) PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUMatrixStackContext", .tp_name = "GPUMatrixStackContext",
.tp_basicsize = sizeof(BPy_GPU_MatrixStackContext), .tp_basicsize = sizeof(BPyGPU_MatrixStackContext),
.tp_flags = Py_TPFLAGS_DEFAULT, .tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = pygpu_matrix_stack_context_methods, .tp_methods = bpygpu_matrix_stack_context_methods,
}; };
static PyObject *pygpu_matrix_stack_context_enter(BPy_GPU_MatrixStackContext *self) static PyObject *bpygpu_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self)
{ {
/* sanity - should never happen */ /* sanity - should never happen */
if (self->level != -1) { if (self->level != -1) {
@@ -194,14 +197,14 @@ static PyObject *pygpu_matrix_stack_context_enter(BPy_GPU_MatrixStackContext *se
} }
if (self->type == PYGPU_MATRIX_TYPE_MODEL_VIEW) { if (self->type == PYGPU_MATRIX_TYPE_MODEL_VIEW) {
if (!pygpu_stack_is_push_model_view_ok_or_error()) { if (!bpygpu_stack_is_push_model_view_ok_or_error()) {
return NULL; return NULL;
} }
GPU_matrix_push(); GPU_matrix_push();
self->level = GPU_matrix_stack_level_get_model_view(); self->level = GPU_matrix_stack_level_get_model_view();
} }
else if (self->type == PYGPU_MATRIX_TYPE_PROJECTION) { else if (self->type == PYGPU_MATRIX_TYPE_PROJECTION) {
if (!pygpu_stack_is_push_projection_ok_or_error()) { if (!bpygpu_stack_is_push_projection_ok_or_error()) {
return NULL; return NULL;
} }
GPU_matrix_push_projection(); GPU_matrix_push_projection();
@@ -213,7 +216,7 @@ static PyObject *pygpu_matrix_stack_context_enter(BPy_GPU_MatrixStackContext *se
Py_RETURN_NONE; Py_RETURN_NONE;
} }
static PyObject *pygpu_matrix_stack_context_exit(BPy_GPU_MatrixStackContext *self, PyObject *UNUSED(args)) static PyObject *bpygpu_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self, PyObject *UNUSED(args))
{ {
/* sanity - should never happen */ /* sanity - should never happen */
if (self->level == -1) { if (self->level == -1) {
@@ -246,32 +249,32 @@ finally:
Py_RETURN_NONE; Py_RETURN_NONE;
} }
static PyObject *pygpu_matrix_push_pop_impl(int type) static PyObject *bpygpu_matrix_push_pop_impl(int type)
{ {
BPy_GPU_MatrixStackContext *ret = PyObject_New(BPy_GPU_MatrixStackContext, &pygpu_matrix_stack_context_Type); BPyGPU_MatrixStackContext *ret = PyObject_New(BPyGPU_MatrixStackContext, &BPyGPU_matrix_stack_context_Type);
ret->type = type; ret->type = type;
ret->level = -1; ret->level = -1;
return (PyObject *)ret; return (PyObject *)ret;
} }
PyDoc_STRVAR(pygpu_matrix_push_pop_doc, PyDoc_STRVAR(bpygpu_matrix_push_pop_doc,
"push_pop()\n" "push_pop()\n"
"\n" "\n"
" Context manager to ensure balanced push/pop calls, even in the case of an error.\n" " Context manager to ensure balanced push/pop calls, even in the case of an error.\n"
); );
static PyObject *pygpu_matrix_push_pop(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_push_pop(PyObject *UNUSED(self))
{ {
return pygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_MODEL_VIEW); return bpygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_MODEL_VIEW);
} }
PyDoc_STRVAR(pygpu_matrix_push_pop_projection_doc, PyDoc_STRVAR(bpygpu_matrix_push_pop_projection_doc,
"push_pop_projection()\n" "push_pop_projection()\n"
"\n" "\n"
" Context manager to ensure balanced push/pop calls, even in the case of an error.\n" " Context manager to ensure balanced push/pop calls, even in the case of an error.\n"
); );
static PyObject *pygpu_matrix_push_pop_projection(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_push_pop_projection(PyObject *UNUSED(self))
{ {
return pygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_PROJECTION); return bpygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_PROJECTION);
} }
/** \} */ /** \} */
@@ -280,7 +283,7 @@ static PyObject *pygpu_matrix_push_pop_projection(PyObject *UNUSED(self))
/** \name Manipulate State /** \name Manipulate State
* \{ */ * \{ */
PyDoc_STRVAR(pygpu_matrix_multiply_matrix_doc, PyDoc_STRVAR(bpygpu_matrix_multiply_matrix_doc,
"multiply_matrix(matrix)\n" "multiply_matrix(matrix)\n"
"\n" "\n"
" Multiply the current stack matrix.\n" " Multiply the current stack matrix.\n"
@@ -288,7 +291,7 @@ PyDoc_STRVAR(pygpu_matrix_multiply_matrix_doc,
" :param matrix: A 4x4 matrix.\n" " :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n" " :type matrix: :class:`mathutils.Matrix`\n"
); );
static PyObject *pygpu_matrix_multiply_matrix(PyObject *UNUSED(self), PyObject *value) static PyObject *bpygpu_matrix_multiply_matrix(PyObject *UNUSED(self), PyObject *value)
{ {
MatrixObject *pymat; MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) { if (!Matrix_Parse4x4(value, &pymat)) {
@@ -298,7 +301,7 @@ static PyObject *pygpu_matrix_multiply_matrix(PyObject *UNUSED(self), PyObject *
Py_RETURN_NONE; Py_RETURN_NONE;
} }
PyDoc_STRVAR(pygpu_matrix_scale_doc, PyDoc_STRVAR(bpygpu_matrix_scale_doc,
"scale(scale)\n" "scale(scale)\n"
"\n" "\n"
" Scale the current stack matrix.\n" " Scale the current stack matrix.\n"
@@ -306,7 +309,7 @@ PyDoc_STRVAR(pygpu_matrix_scale_doc,
" :param scale: Scale the current stack matrix.\n" " :param scale: Scale the current stack matrix.\n"
" :type scale: sequence of 2 or 3 floats\n" " :type scale: sequence of 2 or 3 floats\n"
); );
static PyObject *pygpu_matrix_scale(PyObject *UNUSED(self), PyObject *value) static PyObject *bpygpu_matrix_scale(PyObject *UNUSED(self), PyObject *value)
{ {
float scale[3]; float scale[3];
int len; int len;
@@ -322,13 +325,13 @@ static PyObject *pygpu_matrix_scale(PyObject *UNUSED(self), PyObject *value)
Py_RETURN_NONE; Py_RETURN_NONE;
} }
PyDoc_STRVAR(pygpu_matrix_scale_uniform_doc, PyDoc_STRVAR(bpygpu_matrix_scale_uniform_doc,
"scale_uniform(scale)\n" "scale_uniform(scale)\n"
"\n" "\n"
" :param scale: Scale the current stack matrix.\n" " :param scale: Scale the current stack matrix.\n"
" :type scale: sequence of 2 or 3 floats\n" " :type scale: sequence of 2 or 3 floats\n"
); );
static PyObject *pygpu_matrix_scale_uniform(PyObject *UNUSED(self), PyObject *value) static PyObject *bpygpu_matrix_scale_uniform(PyObject *UNUSED(self), PyObject *value)
{ {
float scalar; float scalar;
if ((scalar = PyFloat_AsDouble(value)) == -1.0f && PyErr_Occurred()) { if ((scalar = PyFloat_AsDouble(value)) == -1.0f && PyErr_Occurred()) {
@@ -341,7 +344,7 @@ static PyObject *pygpu_matrix_scale_uniform(PyObject *UNUSED(self), PyObject *va
Py_RETURN_NONE; Py_RETURN_NONE;
} }
PyDoc_STRVAR(pygpu_matrix_translate_doc, PyDoc_STRVAR(bpygpu_matrix_translate_doc,
"translate(offset)\n" "translate(offset)\n"
"\n" "\n"
" Scale the current stack matrix.\n" " Scale the current stack matrix.\n"
@@ -349,7 +352,7 @@ PyDoc_STRVAR(pygpu_matrix_translate_doc,
" :param offset: Translate the current stack matrix.\n" " :param offset: Translate the current stack matrix.\n"
" :type offset: sequence of 2 or 3 floats\n" " :type offset: sequence of 2 or 3 floats\n"
); );
static PyObject *pygpu_matrix_translate(PyObject *UNUSED(self), PyObject *value) static PyObject *bpygpu_matrix_translate(PyObject *UNUSED(self), PyObject *value)
{ {
float offset[3]; float offset[3];
int len; int len;
@@ -371,29 +374,29 @@ static PyObject *pygpu_matrix_translate(PyObject *UNUSED(self), PyObject *value)
/** \name Write State /** \name Write State
* \{ */ * \{ */
PyDoc_STRVAR(pygpu_matrix_reset_doc, PyDoc_STRVAR(bpygpu_matrix_reset_doc,
"reset()\n" "reset()\n"
"\n" "\n"
" Empty stack and set to identity.\n" " Empty stack and set to identity.\n"
); );
static PyObject *pygpu_matrix_reset(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_reset(PyObject *UNUSED(self))
{ {
GPU_matrix_reset(); GPU_matrix_reset();
Py_RETURN_NONE; Py_RETURN_NONE;
} }
PyDoc_STRVAR(pygpu_matrix_load_identity_doc, PyDoc_STRVAR(bpygpu_matrix_load_identity_doc,
"load_identity()\n" "load_identity()\n"
"\n" "\n"
" Empty stack and set to identity.\n" " Empty stack and set to identity.\n"
); );
static PyObject *pygpu_matrix_load_identity(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_load_identity(PyObject *UNUSED(self))
{ {
GPU_matrix_identity_set(); GPU_matrix_identity_set();
Py_RETURN_NONE; Py_RETURN_NONE;
} }
PyDoc_STRVAR(pygpu_matrix_load_matrix_doc, PyDoc_STRVAR(bpygpu_matrix_load_matrix_doc,
"load_matrix(matrix)\n" "load_matrix(matrix)\n"
"\n" "\n"
" Load a matrix into the stack.\n" " Load a matrix into the stack.\n"
@@ -401,7 +404,7 @@ PyDoc_STRVAR(pygpu_matrix_load_matrix_doc,
" :param matrix: A 4x4 matrix.\n" " :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n" " :type matrix: :class:`mathutils.Matrix`\n"
); );
static PyObject *pygpu_matrix_load_matrix(PyObject *UNUSED(self), PyObject *value) static PyObject *bpygpu_matrix_load_matrix(PyObject *UNUSED(self), PyObject *value)
{ {
MatrixObject *pymat; MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) { if (!Matrix_Parse4x4(value, &pymat)) {
@@ -417,7 +420,7 @@ static PyObject *pygpu_matrix_load_matrix(PyObject *UNUSED(self), PyObject *valu
/** \name Read State /** \name Read State
* \{ */ * \{ */
PyDoc_STRVAR(pygpu_matrix_get_projection_matrix_doc, PyDoc_STRVAR(bpygpu_matrix_get_projection_matrix_doc,
"get_projection_matrix()\n" "get_projection_matrix()\n"
"\n" "\n"
" Return a copy of the projection matrix.\n" " Return a copy of the projection matrix.\n"
@@ -425,7 +428,7 @@ PyDoc_STRVAR(pygpu_matrix_get_projection_matrix_doc,
" :return: A 4x4 projection matrix.\n" " :return: A 4x4 projection matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n" " :rtype: :class:`mathutils.Matrix`\n"
); );
static PyObject *pygpu_matrix_get_projection_matrix(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_get_projection_matrix(PyObject *UNUSED(self))
{ {
float matrix[4][4]; float matrix[4][4];
GPU_matrix_model_view_get(matrix); GPU_matrix_model_view_get(matrix);
@@ -433,7 +436,7 @@ static PyObject *pygpu_matrix_get_projection_matrix(PyObject *UNUSED(self))
} }
PyDoc_STRVAR(pygpu_matrix_get_modal_view_matrix_doc, PyDoc_STRVAR(bpygpu_matrix_get_modal_view_matrix_doc,
"get_view_matrix()\n" "get_view_matrix()\n"
"\n" "\n"
" Return a copy of the view matrix.\n" " Return a copy of the view matrix.\n"
@@ -441,14 +444,14 @@ PyDoc_STRVAR(pygpu_matrix_get_modal_view_matrix_doc,
" :return: A 4x4 view matrix.\n" " :return: A 4x4 view matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n" " :rtype: :class:`mathutils.Matrix`\n"
); );
static PyObject *pygpu_matrix_get_modal_view_matrix(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_get_modal_view_matrix(PyObject *UNUSED(self))
{ {
float matrix[4][4]; float matrix[4][4];
GPU_matrix_projection_get(matrix); GPU_matrix_projection_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 4, 4, NULL); return Matrix_CreatePyObject(&matrix[0][0], 4, 4, NULL);
} }
PyDoc_STRVAR(pygpu_matrix_get_normal_matrix_doc, PyDoc_STRVAR(bpygpu_matrix_get_normal_matrix_doc,
"get_normal_matrix()\n" "get_normal_matrix()\n"
"\n" "\n"
" Return a copy of the normal matrix.\n" " Return a copy of the normal matrix.\n"
@@ -456,7 +459,7 @@ PyDoc_STRVAR(pygpu_matrix_get_normal_matrix_doc,
" :return: A 3x3 normal matrix.\n" " :return: A 3x3 normal matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n" " :rtype: :class:`mathutils.Matrix`\n"
); );
static PyObject *pygpu_matrix_get_normal_matrix(PyObject *UNUSED(self)) static PyObject *bpygpu_matrix_get_normal_matrix(PyObject *UNUSED(self))
{ {
float matrix[3][3]; float matrix[3][3];
GPU_matrix_normal_get(matrix); GPU_matrix_normal_get(matrix);
@@ -469,80 +472,80 @@ static PyObject *pygpu_matrix_get_normal_matrix(PyObject *UNUSED(self))
/** \name Module /** \name Module
* \{ */ * \{ */
static struct PyMethodDef BPy_GPU_matrix_methods[] = { static struct PyMethodDef bpygpu_matrix_methods[] = {
/* Manage Stack */ /* Manage Stack */
{"push", (PyCFunction)pygpu_matrix_push, {"push", (PyCFunction)bpygpu_matrix_push,
METH_NOARGS, pygpu_matrix_push_doc}, METH_NOARGS, bpygpu_matrix_push_doc},
{"pop", (PyCFunction)pygpu_matrix_pop, {"pop", (PyCFunction)bpygpu_matrix_pop,
METH_NOARGS, pygpu_matrix_pop_doc}, METH_NOARGS, bpygpu_matrix_pop_doc},
{"push_projection", (PyCFunction)pygpu_matrix_push_projection, {"push_projection", (PyCFunction)bpygpu_matrix_push_projection,
METH_NOARGS, pygpu_matrix_push_projection_doc}, METH_NOARGS, bpygpu_matrix_push_projection_doc},
{"pop_projection", (PyCFunction)pygpu_matrix_pop_projection, {"pop_projection", (PyCFunction)bpygpu_matrix_pop_projection,
METH_NOARGS, pygpu_matrix_pop_projection_doc}, METH_NOARGS, bpygpu_matrix_pop_projection_doc},
/* Stack (Context Manager) */ /* Stack (Context Manager) */
{"push_pop", (PyCFunction)pygpu_matrix_push_pop, {"push_pop", (PyCFunction)bpygpu_matrix_push_pop,
METH_NOARGS, pygpu_matrix_push_pop_doc}, METH_NOARGS, bpygpu_matrix_push_pop_doc},
{"push_pop_projection", (PyCFunction)pygpu_matrix_push_pop_projection, {"push_pop_projection", (PyCFunction)bpygpu_matrix_push_pop_projection,
METH_NOARGS, pygpu_matrix_push_pop_projection_doc}, METH_NOARGS, bpygpu_matrix_push_pop_projection_doc},
/* Manipulate State */ /* Manipulate State */
{"multiply_matrix", (PyCFunction)pygpu_matrix_multiply_matrix, {"multiply_matrix", (PyCFunction)bpygpu_matrix_multiply_matrix,
METH_O, pygpu_matrix_multiply_matrix_doc}, METH_O, bpygpu_matrix_multiply_matrix_doc},
{"scale", (PyCFunction)pygpu_matrix_scale, {"scale", (PyCFunction)bpygpu_matrix_scale,
METH_O, pygpu_matrix_scale_doc}, METH_O, bpygpu_matrix_scale_doc},
{"scale_uniform", (PyCFunction)pygpu_matrix_scale_uniform, {"scale_uniform", (PyCFunction)bpygpu_matrix_scale_uniform,
METH_O, pygpu_matrix_scale_uniform_doc}, METH_O, bpygpu_matrix_scale_uniform_doc},
{"translate", (PyCFunction)pygpu_matrix_translate, {"translate", (PyCFunction)bpygpu_matrix_translate,
METH_O, pygpu_matrix_translate_doc}, METH_O, bpygpu_matrix_translate_doc},
/* TODO */ /* TODO */
#if 0 #if 0
{"rotate", (PyCFunction)pygpu_matrix_rotate, {"rotate", (PyCFunction)bpygpu_matrix_rotate,
METH_O, pygpu_matrix_rotate_doc}, METH_O, bpygpu_matrix_rotate_doc},
{"rotate_axis", (PyCFunction)pygpu_matrix_rotate_axis, {"rotate_axis", (PyCFunction)bpygpu_matrix_rotate_axis,
METH_O, pygpu_matrix_rotate_axis_doc}, METH_O, bpygpu_matrix_rotate_axis_doc},
{"look_at", (PyCFunction)pygpu_matrix_look_at, {"look_at", (PyCFunction)bpygpu_matrix_look_at,
METH_O, pygpu_matrix_look_at_doc}, METH_O, bpygpu_matrix_look_at_doc},
#endif #endif
/* Write State */ /* Write State */
{"reset", (PyCFunction)pygpu_matrix_reset, {"reset", (PyCFunction)bpygpu_matrix_reset,
METH_NOARGS, pygpu_matrix_reset_doc}, METH_NOARGS, bpygpu_matrix_reset_doc},
{"load_identity", (PyCFunction)pygpu_matrix_load_identity, {"load_identity", (PyCFunction)bpygpu_matrix_load_identity,
METH_NOARGS, pygpu_matrix_load_identity_doc}, METH_NOARGS, bpygpu_matrix_load_identity_doc},
{"load_matrix", (PyCFunction)pygpu_matrix_load_matrix, {"load_matrix", (PyCFunction)bpygpu_matrix_load_matrix,
METH_O, pygpu_matrix_load_matrix_doc}, METH_O, bpygpu_matrix_load_matrix_doc},
/* Read State */ /* Read State */
{"get_projection_matrix", (PyCFunction)pygpu_matrix_get_projection_matrix, {"get_projection_matrix", (PyCFunction)bpygpu_matrix_get_projection_matrix,
METH_NOARGS, pygpu_matrix_get_projection_matrix_doc}, METH_NOARGS, bpygpu_matrix_get_projection_matrix_doc},
{"get_model_view_matrix", (PyCFunction)pygpu_matrix_get_modal_view_matrix, {"get_model_view_matrix", (PyCFunction)bpygpu_matrix_get_modal_view_matrix,
METH_NOARGS, pygpu_matrix_get_modal_view_matrix_doc}, METH_NOARGS, bpygpu_matrix_get_modal_view_matrix_doc},
{"get_normal_matrix", (PyCFunction)pygpu_matrix_get_normal_matrix, {"get_normal_matrix", (PyCFunction)bpygpu_matrix_get_normal_matrix,
METH_NOARGS, pygpu_matrix_get_normal_matrix_doc}, METH_NOARGS, bpygpu_matrix_get_normal_matrix_doc},
{NULL, NULL, 0, NULL} {NULL, NULL, 0, NULL}
}; };
PyDoc_STRVAR(BPy_GPU_matrix_doc, PyDoc_STRVAR(bpygpu_matrix_doc,
"This module provides access to the matrix stack." "This module provides access to the matrix stack."
); );
static PyModuleDef BPy_GPU_matrix_module_def = { static PyModuleDef BPyGPU_matrix_module_def = {
PyModuleDef_HEAD_INIT, PyModuleDef_HEAD_INIT,
.m_name = "gpu.matrix", .m_name = "gpu.matrix",
.m_doc = BPy_GPU_matrix_doc, .m_doc = bpygpu_matrix_doc,
.m_methods = BPy_GPU_matrix_methods, .m_methods = bpygpu_matrix_methods,
}; };
PyObject *BPyInit_gpu_matrix(void) PyObject *BPyInit_gpu_matrix(void)
{ {
PyObject *submodule; PyObject *submodule;
submodule = PyModule_Create(&BPy_GPU_matrix_module_def); submodule = PyModule_Create(&BPyGPU_matrix_module_def);
if (PyType_Ready(&pygpu_matrix_stack_context_Type) < 0) { if (PyType_Ready(&BPyGPU_matrix_stack_context_Type) < 0) {
return NULL; return NULL;
} }

View File

@@ -15,29 +15,16 @@
* along with this program; if not, write to the Free Software Foundation, * along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *
* The Original Code is Copyright (C) 2005 Blender Foundation.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): Benoit Bolsee.
*
* ***** END GPL LICENSE BLOCK ***** * ***** END GPL LICENSE BLOCK *****
*/ */
/** \file blender/python/intern/gpu.h /** \file blender/python/gpu/gpu_py_matrix.h
* \ingroup pythonintern * \ingroup bpygpu
*
* Initializes the gpu Python module.
*/ */
#ifndef __GPU_H__ #ifndef __GPU_PY_MATRIX_H__
#define __GPU_H__ #define __GPU_PY_MATRIX_H__
PyObject *GPU_initPython(void);
PyObject *BPyInit_gpu_offscreen(void);
PyObject *BPyInit_gpu_matrix(void); PyObject *BPyInit_gpu_matrix(void);
PyObject *BPyInit_gpu_select(void);
#endif /* __GPU_H__ */ #endif /* __GPU_PY_MATRIX_H__ */

View File

@@ -0,0 +1,336 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2015, Blender Foundation.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_offscreen.c
* \ingroup bpygpu
*
* This file defines the offscreen functionalities of the 'gpu' module
* used for off-screen OpenGL rendering.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BKE_global.h"
#include "BKE_library.h"
#include "BKE_scene.h"
#include "DNA_screen_types.h"
#include "DNA_view3d_types.h"
#include "GPU_framebuffer.h"
#include "GPU_texture.h"
#include "../editors/include/ED_view3d.h"
#include "../mathutils/mathutils.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py_offscreen.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPUOffscreen Type
* \{ */
static PyObject *bpygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"width", "height", "samples", NULL};
GPUOffScreen *ofs;
int width, height, samples = 0;
char err_out[256];
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "ii|i:new", (char **)(kwlist),
&width, &height, &samples))
{
return NULL;
}
ofs = GPU_offscreen_create(width, height, samples, true, false, err_out);
if (ofs == NULL) {
PyErr_Format(PyExc_RuntimeError,
"gpu.offscreen.new(...) failed with '%s'",
err_out[0] ? err_out : "unknown error");
return NULL;
}
return BPyGPUOffScreen_CreatePyObject(ofs);
}
static int bpygpu_offscreen_valid_check(BPyGPUOffScreen *bpygpu_ofs)
{
if (UNLIKELY(bpygpu_ofs->ofs == NULL)) {
PyErr_SetString(PyExc_ReferenceError, "GPU offscreen was freed, no further access is valid");
return -1;
}
return 0;
}
#define BPY_GPU_OFFSCREEN_CHECK_OBJ(bpygpu) { \
if (UNLIKELY(bpygpu_offscreen_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} ((void)0)
PyDoc_STRVAR(bpygpu_offscreen_width_doc, "Texture width.\n\n:type: int");
static PyObject *bpygpu_offscreen_width_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_width(self->ofs));
}
PyDoc_STRVAR(bpygpu_offscreen_height_doc, "Texture height.\n\n:type: int");
static PyObject *bpygpu_offscreen_height_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_height(self->ofs));
}
PyDoc_STRVAR(bpygpu_offscreen_color_texture_doc, "Color texture.\n\n:type: int");
static PyObject *bpygpu_offscreen_color_texture_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
GPUTexture *texture = GPU_offscreen_color_texture(self->ofs);
return PyLong_FromLong(GPU_texture_opengl_bindcode(texture));
}
PyDoc_STRVAR(bpygpu_offscreen_bind_doc,
"bind(save=True)\n"
"\n"
" Bind the offscreen object.\n"
"\n"
" :param save: save OpenGL current states.\n"
" :type save: bool\n"
);
static PyObject *bpygpu_offscreen_bind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"save", NULL};
bool save = true;
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "|O&:bind", (char **)(kwlist),
PyC_ParseBool, &save))
{
return NULL;
}
GPU_offscreen_bind(self->ofs, save);
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_offscreen_unbind_doc,
"unbind(restore=True)\n"
"\n"
" Unbind the offscreen object.\n"
"\n"
" :param restore: restore OpenGL previous states.\n"
" :type restore: bool\n"
);
static PyObject *bpygpu_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"restore", NULL};
bool restore = true;
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "|O&:unbind", (char **)(kwlist),
PyC_ParseBool, &restore))
{
return NULL;
}
GPU_offscreen_unbind(self->ofs, restore);
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_offscreen_draw_view3d_doc,
"draw_view3d(scene, view3d, region, modelview_matrix, projection_matrix)\n"
"\n"
" Draw the 3d viewport in the offscreen object.\n"
"\n"
" :param scene: Scene to draw.\n"
" :type scene: :class:`bpy.types.Scene`\n"
" :param view3d: 3D View to get the drawing settings from.\n"
" :type view3d: :class:`bpy.types.SpaceView3D`\n"
" :param region: Region of the 3D View.\n"
" :type region: :class:`bpy.types.Region`\n"
" :param modelview_matrix: ModelView Matrix.\n"
" :type modelview_matrix: :class:`mathutils.Matrix`\n"
" :param projection_matrix: Projection Matrix.\n"
" :type projection_matrix: :class:`mathutils.Matrix`\n"
);
static PyObject *bpygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"scene", "view_layer", "view3d", "region", "projection_matrix", "modelview_matrix", NULL};
MatrixObject *py_mat_modelview, *py_mat_projection;
PyObject *py_scene, *py_view_layer, *py_region, *py_view3d;
struct Depsgraph *depsgraph;
struct Scene *scene;
struct ViewLayer *view_layer;
View3D *v3d;
ARegion *ar;
struct RV3DMatrixStore *rv3d_mats;
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "OOOOO&O&:draw_view3d", (char **)(kwlist),
&py_scene, &py_view_layer, &py_view3d, &py_region,
Matrix_Parse4x4, &py_mat_projection,
Matrix_Parse4x4, &py_mat_modelview) ||
(!(scene = PyC_RNA_AsPointer(py_scene, "Scene")) ||
!(view_layer = PyC_RNA_AsPointer(py_view_layer, "ViewLayer")) ||
!(v3d = PyC_RNA_AsPointer(py_view3d, "SpaceView3D")) ||
!(ar = PyC_RNA_AsPointer(py_region, "Region"))))
{
return NULL;
}
BLI_assert(BKE_id_is_in_gobal_main(&scene->id));
depsgraph = BKE_scene_get_depsgraph(scene, view_layer, true);
rv3d_mats = ED_view3d_mats_rv3d_backup(ar->regiondata);
GPU_offscreen_bind(self->ofs, true); /* bind */
ED_view3d_draw_offscreen(depsgraph,
scene,
v3d->shading.type,
v3d,
ar,
GPU_offscreen_width(self->ofs),
GPU_offscreen_height(self->ofs),
(float(*)[4])py_mat_modelview->matrix,
(float(*)[4])py_mat_projection->matrix,
false,
true,
"",
NULL,
self->ofs,
NULL);
GPU_offscreen_unbind(self->ofs, true); /* unbind */
ED_view3d_mats_rv3d_restore(ar->regiondata, rv3d_mats);
MEM_freeN(rv3d_mats);
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_offscreen_free_doc,
"free()\n"
"\n"
" Free the offscreen object\n"
" The framebuffer, texture and render objects will no longer be accessible.\n"
);
static PyObject *bpygpu_offscreen_free(BPyGPUOffScreen *self)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
GPU_offscreen_free(self->ofs);
self->ofs = NULL;
Py_RETURN_NONE;
}
static void BPyGPUOffScreen__tp_dealloc(BPyGPUOffScreen *self)
{
if (self->ofs)
GPU_offscreen_free(self->ofs);
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef bpygpu_offscreen_getseters[] = {
{(char *)"color_texture", (getter)bpygpu_offscreen_color_texture_get, (setter)NULL, bpygpu_offscreen_color_texture_doc, NULL},
{(char *)"width", (getter)bpygpu_offscreen_width_get, (setter)NULL, bpygpu_offscreen_width_doc, NULL},
{(char *)"height", (getter)bpygpu_offscreen_height_get, (setter)NULL, bpygpu_offscreen_height_doc, NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef bpygpu_offscreen_methods[] = {
{"bind", (PyCFunction)bpygpu_offscreen_bind, METH_VARARGS | METH_KEYWORDS, bpygpu_offscreen_bind_doc},
{"unbind", (PyCFunction)bpygpu_offscreen_unbind, METH_VARARGS | METH_KEYWORDS, bpygpu_offscreen_unbind_doc},
{"draw_view3d", (PyCFunction)bpygpu_offscreen_draw_view3d, METH_VARARGS | METH_KEYWORDS, bpygpu_offscreen_draw_view3d_doc},
{"free", (PyCFunction)bpygpu_offscreen_free, METH_NOARGS, bpygpu_offscreen_free_doc},
{NULL, NULL, 0, NULL}
};
PyDoc_STRVAR(bpygpu_offscreen_doc,
"GPUOffScreen(width, height, samples=0)\n"
"\n"
" This object gives access to off screen buffers.\n"
"\n"
" :param width: Horizontal dimension of the buffer.\n"
" :type width: `int`\n"
" :param height: Vertical dimension of the buffer.\n"
" :type height: `int`\n"
" :param samples: OpenGL samples to use for MSAA or zero to disable.\n"
" :type samples: `int`\n"
);
PyTypeObject BPyGPUOffScreen_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUOffScreen",
.tp_basicsize = sizeof(BPyGPUOffScreen),
.tp_dealloc = (destructor)BPyGPUOffScreen__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = bpygpu_offscreen_doc,
.tp_methods = bpygpu_offscreen_methods,
.tp_getset = bpygpu_offscreen_getseters,
.tp_new = bpygpu_offscreen_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUOffScreen_CreatePyObject(GPUOffScreen *ofs)
{
BPyGPUOffScreen *self;
self = PyObject_New(BPyGPUOffScreen, &BPyGPUOffScreen_Type);
self->ofs = ofs;
return (PyObject *)self;
}
/** \} */
#undef BPY_GPU_OFFSCREEN_CHECK_OBJ

View File

@@ -0,0 +1,41 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_offscreen.h
* \ingroup bpygpu
*/
#ifndef __GPU_PY_OFFSCREEN_H__
#define __GPU_PY_OFFSCREEN_H__
#include "BLI_compiler_attrs.h"
extern PyTypeObject BPyGPUOffScreen_Type;
#define BPyGPUOffScreen_Check(v) (Py_TYPE(v) == &BPyGPUOffScreen_Type)
typedef struct BPyGPUOffScreen {
PyObject_HEAD
struct GPUOffScreen *ofs;
} BPyGPUOffScreen;
PyObject *BPyGPUOffScreen_CreatePyObject(struct GPUOffScreen *ofs) ATTR_NONNULL(1);
#endif /* __GPU_PY_OFFSCREEN_H__ */

View File

@@ -18,13 +18,16 @@
* ***** END GPL LICENSE BLOCK ***** * ***** END GPL LICENSE BLOCK *****
*/ */
/** \file blender/python/intern/gpu_py_select.c /** \file blender/python/gpu/gpu_py_select.c
* \ingroup pythonintern * \ingroup bpygpu
* *
* This file defines the gpu.select API. * This file defines the gpu.select API.
* *
* \note Currently only used for gizmo selection, * \note Currently only used for gizmo selection,
* will need to add begin/end and a way to access the hits. * will need to add begin/end and a way to access the hits.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/ */
#include <Python.h> #include <Python.h>
@@ -33,15 +36,15 @@
#include "../generic/py_capi_utils.h" #include "../generic/py_capi_utils.h"
#include "gpu.h"
#include "GPU_select.h" #include "GPU_select.h"
#include "gpu_py_select.h" /* own include */
/* -------------------------------------------------------------------- */ /* -------------------------------------------------------------------- */
/** \name Methods /** \name Methods
* \{ */ * \{ */
PyDoc_STRVAR(pygpu_select_load_id_doc, PyDoc_STRVAR(bpygpu_select_load_id_doc,
"load_id(id)\n" "load_id(id)\n"
"\n" "\n"
" Set the selection ID.\n" " Set the selection ID.\n"
@@ -49,7 +52,7 @@ PyDoc_STRVAR(pygpu_select_load_id_doc,
" :param id: Number (32-bit unsigned int).\n" " :param id: Number (32-bit unsigned int).\n"
" :type select: int\n" " :type select: int\n"
); );
static PyObject *pygpu_select_load_id(PyObject *UNUSED(self), PyObject *value) static PyObject *bpygpu_select_load_id(PyObject *UNUSED(self), PyObject *value)
{ {
uint id; uint id;
if ((id = PyC_Long_AsU32(value)) == (uint)-1) { if ((id = PyC_Long_AsU32(value)) == (uint)-1) {
@@ -64,27 +67,27 @@ static PyObject *pygpu_select_load_id(PyObject *UNUSED(self), PyObject *value)
/** \name Module /** \name Module
* \{ */ * \{ */
static struct PyMethodDef BPy_GPU_select_methods[] = { static struct PyMethodDef bpygpu_select_methods[] = {
/* Manage Stack */ /* Manage Stack */
{"load_id", (PyCFunction)pygpu_select_load_id, METH_O, pygpu_select_load_id_doc}, {"load_id", (PyCFunction)bpygpu_select_load_id, METH_O, bpygpu_select_load_id_doc},
{NULL, NULL, 0, NULL} {NULL, NULL, 0, NULL}
}; };
PyDoc_STRVAR(BPy_GPU_select_doc, PyDoc_STRVAR(bpygpu_select_doc,
"This module provides access to selection." "This module provides access to selection."
); );
static PyModuleDef BPy_GPU_select_module_def = { static PyModuleDef BPyGPU_select_module_def = {
PyModuleDef_HEAD_INIT, PyModuleDef_HEAD_INIT,
.m_name = "gpu.select", .m_name = "gpu.select",
.m_doc = BPy_GPU_select_doc, .m_doc = bpygpu_select_doc,
.m_methods = BPy_GPU_select_methods, .m_methods = bpygpu_select_methods,
}; };
PyObject *BPyInit_gpu_select(void) PyObject *BPyInit_gpu_select(void)
{ {
PyObject *submodule; PyObject *submodule;
submodule = PyModule_Create(&BPy_GPU_select_module_def); submodule = PyModule_Create(&BPyGPU_select_module_def);
return submodule; return submodule;
} }

View File

@@ -0,0 +1,30 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_api.h
* \ingroup bpygpu
*/
#ifndef __GPU_PY_SELECT_H__
#define __GPU_PY_SELECT_H__
PyObject *BPyInit_gpu_select(void);
#endif /* __GPU_PY_SELECT_H__ */

View File

@@ -19,7 +19,7 @@
*/ */
/** \file blender/python/gpu/gpu_py_types.c /** \file blender/python/gpu/gpu_py_types.c
* \ingroup pygpu * \ingroup bpygpu
* *
* - Use ``bpygpu_`` for local API. * - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API. * - Use ``BPyGPU`` for public API.
@@ -27,751 +27,27 @@
#include <Python.h> #include <Python.h>
#include "GPU_batch.h"
#include "GPU_vertex_format.h"
#include "BLI_math.h"
#include "MEM_guardedalloc.h"
#include "../generic/py_capi_utils.h" #include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h" #include "../generic/python_utildefines.h"
#include "gpu_py_types.h" /* own include */ #include "gpu_py_types.h" /* own include */
#ifdef __BIG_ENDIAN__
/* big endian */
# define MAKE_ID2(c, d) ((c) << 8 | (d))
# define MAKE_ID3(a, b, c) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 )
# define MAKE_ID4(a, b, c, d) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d) )
#else
/* little endian */
# define MAKE_ID2(c, d) ((d) << 8 | (c))
# define MAKE_ID3(a, b, c) ( (int)(c) << 16 | (b) << 8 | (a) )
# define MAKE_ID4(a, b, c, d) ( (int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a) )
#endif
/* -------------------------------------------------------------------- */
/** \name Enum Conversion
*
* Use with PyArg_ParseTuple's "O&" formatting.
* \{ */
static int bpygpu_ParseVertCompType(PyObject *o, void *p)
{
Py_ssize_t comp_type_id_len;
const char *comp_type_id = _PyUnicode_AsStringAndSize(o, &comp_type_id_len);
if (comp_type_id == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
GPUVertCompType comp_type;
if (comp_type_id_len == 2) {
switch (*((ushort *)comp_type_id)) {
case MAKE_ID2('I', '8'): { comp_type = GPU_COMP_I8; goto success; }
case MAKE_ID2('U', '8'): { comp_type = GPU_COMP_U8; goto success; }
}
}
else if (comp_type_id_len == 3) {
switch (*((uint *)comp_type_id)) {
case MAKE_ID3('I', '1', '6'): { comp_type = GPU_COMP_I16; goto success; }
case MAKE_ID3('U', '1', '6'): { comp_type = GPU_COMP_U16; goto success; }
case MAKE_ID3('I', '3', '2'): { comp_type = GPU_COMP_I32; goto success; }
case MAKE_ID3('U', '3', '2'): { comp_type = GPU_COMP_U32; goto success; }
case MAKE_ID3('F', '3', '2'): { comp_type = GPU_COMP_F32; goto success; }
case MAKE_ID3('I', '1', '0'): { comp_type = GPU_COMP_I10; goto success; }
}
}
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
comp_type_id);
return 0;
success:
*((GPUVertCompType *)p) = comp_type;
return 1;
}
static int bpygpu_ParseVertFetchMode(PyObject *o, void *p)
{
Py_ssize_t mode_id_len;
const char *mode_id = _PyUnicode_AsStringAndSize(o, &mode_id_len);
if (mode_id == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
#define MATCH_ID(id) \
if (mode_id_len == strlen(STRINGIFY(id))) { \
if (STREQ(mode_id, STRINGIFY(id))) { \
mode = GPU_FETCH_##id; \
goto success; \
} \
} ((void)0)
GPUVertFetchMode mode;
MATCH_ID(FLOAT);
MATCH_ID(INT);
MATCH_ID(INT_TO_FLOAT_UNIT);
MATCH_ID(INT_TO_FLOAT);
#undef MATCH_ID
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
mode_id);
return 0;
success:
(*(GPUVertFetchMode *)p) = mode;
return 1;
}
static int bpygpu_ParsePrimType(PyObject *o, void *p)
{
Py_ssize_t mode_id_len;
const char *mode_id = _PyUnicode_AsStringAndSize(o, &mode_id_len);
if (mode_id == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
#define MATCH_ID(id) \
if (mode_id_len == strlen(STRINGIFY(id))) { \
if (STREQ(mode_id, STRINGIFY(id))) { \
mode = GPU_PRIM_##id; \
goto success; \
} \
} ((void)0)
GPUPrimType mode;
MATCH_ID(POINTS);
MATCH_ID(LINES);
MATCH_ID(TRIS);
MATCH_ID(LINE_STRIP);
MATCH_ID(LINE_LOOP);
MATCH_ID(TRI_STRIP);
MATCH_ID(TRI_FAN);
MATCH_ID(LINE_STRIP_ADJ);
#undef MATCH_ID
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
mode_id);
return 0;
success:
(*(GPUPrimType *)p) = mode;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Utility Functions
* \{ */
#define PY_AS_NATIVE_SWITCH(attr) \
switch (attr->comp_type) { \
case GPU_COMP_I8: { PY_AS_NATIVE(int8_t, PyC_Long_AsI8); break; } \
case GPU_COMP_U8: { PY_AS_NATIVE(uint8_t, PyC_Long_AsU8); break; } \
case GPU_COMP_I16: { PY_AS_NATIVE(int16_t, PyC_Long_AsI16); break; } \
case GPU_COMP_U16: { PY_AS_NATIVE(uint16_t, PyC_Long_AsU16); break; } \
case GPU_COMP_I32: { PY_AS_NATIVE(int32_t, PyC_Long_AsI32); break; } \
case GPU_COMP_U32: { PY_AS_NATIVE(uint32_t, PyC_Long_AsU32); break; } \
case GPU_COMP_F32: { PY_AS_NATIVE(float, PyFloat_AsDouble); break; } \
default: \
BLI_assert(0); \
} ((void)0)
/* No error checking, callers must run PyErr_Occurred */
static void fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
{
#define PY_AS_NATIVE(ty_dst, py_as_native) \
{ \
ty_dst *data_dst = data_dst_void; \
*data_dst = py_as_native(py_src); \
} ((void)0)
PY_AS_NATIVE_SWITCH(attr);
#undef PY_AS_NATIVE
}
/* No error checking, callers must run PyErr_Occurred */
static void fill_format_tuple(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
{
const uint len = attr->comp_len;
/**
* Args are constants, so range checks will be optimized out if they're nop's.
*/
#define PY_AS_NATIVE(ty_dst, py_as_native) \
ty_dst *data_dst = data_dst_void; \
for (uint i = 0; i < len; i++) { \
data_dst[i] = py_as_native(PyTuple_GET_ITEM(py_src, i)); \
} ((void)0)
PY_AS_NATIVE_SWITCH(attr);
#undef PY_AS_NATIVE
}
#undef PY_AS_NATIVE_SWITCH
#undef WARN_TYPE_LIMIT_PUSH
#undef WARN_TYPE_LIMIT_POP
static bool bpygpu_vertbuf_fill_impl(
GPUVertBuf *vbo,
uint data_id, PyObject *seq)
{
bool ok = true;
const GPUVertAttr *attr = &vbo->format.attribs[data_id];
GPUVertBufRaw data_step;
GPU_vertbuf_attr_get_raw_data(vbo, data_id, &data_step);
PyObject *seq_fast = PySequence_Fast(seq, "Vertex buffer fill");
if (seq_fast == NULL) {
goto finally;
}
const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
if (seq_len != vbo->vertex_len) {
PyErr_Format(PyExc_ValueError,
"Expected a sequence of size %d, got %d",
vbo->vertex_len, seq_len);
}
PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
if (attr->comp_len == 1) {
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *item = seq_items[i];
fill_format_elem(data, item, attr);
}
}
else {
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *item = seq_items[i];
if (!PyTuple_CheckExact(item)) {
PyErr_Format(PyExc_ValueError,
"expected a tuple, got %s",
Py_TYPE(item)->tp_name);
ok = false;
goto finally;
}
if (PyTuple_GET_SIZE(item) != attr->comp_len) {
PyErr_Format(PyExc_ValueError,
"expected a tuple of size %d, got %d",
attr->comp_len, PyTuple_GET_SIZE(item));
ok = false;
goto finally;
}
/* May trigger error, check below */
fill_format_tuple(data, item, attr);
}
}
if (PyErr_Occurred()) {
ok = false;
}
finally:
Py_DECREF(seq_fast);
return ok;
}
/* handy, but not used just now */
#if 0
static int bpygpu_find_id(const GPUVertFormat *fmt, const char *id)
{
for (int i = 0; i < fmt->attr_len; i++) {
for (uint j = 0; j < fmt->name_len; j++) {
if (STREQ(fmt->attribs[i].name[j], id)) {
return i;
}
}
}
return -1;
}
#endif
/** \} */
/* -------------------------------------------------------------------- */
/** \name VertFormat Type
* \{ */
static PyObject *bpygpu_VertFormat_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
if (PyTuple_GET_SIZE(args) || (kwds && PyDict_Size(kwds))) {
PyErr_SetString(PyExc_TypeError,
"VertFormat(): takes no arguments");
return NULL;
}
BPyGPUVertFormat *ret = (BPyGPUVertFormat *)BPyGPUVertFormat_CreatePyObject(NULL);
return (PyObject *)ret;
}
PyDoc_STRVAR(bpygpu_VertFormat_attr_add_doc,
"TODO"
);
static PyObject *bpygpu_VertFormat_attr_add(BPyGPUVertFormat *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"id", "comp_type", "len", "fetch_mode", NULL};
struct {
const char *id;
GPUVertCompType comp_type;
uint len;
GPUVertFetchMode fetch_mode;
} params;
if (self->fmt.attr_len == GPU_VERT_ATTR_MAX_LEN) {
PyErr_SetString(PyExc_ValueError, "Maxumum attr reached " STRINGIFY(GPU_VERT_ATTR_MAX_LEN));
return NULL;
}
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "$sO&IO&:attr_add", (char **)kwlist,
&params.id,
bpygpu_ParseVertCompType, &params.comp_type,
&params.len,
bpygpu_ParseVertFetchMode, &params.fetch_mode))
{
return NULL;
}
uint attr_id = GPU_vertformat_attr_add(&self->fmt, params.id, params.comp_type, params.len, params.fetch_mode);
return PyLong_FromLong(attr_id);
}
static struct PyMethodDef bpygpu_VertFormat_methods[] = {
{"attr_add", (PyCFunction)bpygpu_VertFormat_attr_add,
METH_VARARGS | METH_KEYWORDS, bpygpu_VertFormat_attr_add_doc},
{NULL, NULL, 0, NULL}
};
static void bpygpu_VertFormat_dealloc(BPyGPUVertFormat *self)
{
Py_TYPE(self)->tp_free(self);
}
PyTypeObject BPyGPUVertFormat_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUVertFormat",
.tp_basicsize = sizeof(BPyGPUVertFormat),
.tp_dealloc = (destructor)bpygpu_VertFormat_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = bpygpu_VertFormat_methods,
.tp_new = bpygpu_VertFormat_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name VertBuf Type
* \{ */
static PyObject *bpygpu_VertBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
const char * const keywords[] = {"len", "format", NULL};
struct {
BPyGPUVertFormat *py_fmt;
uint len;
} params;
if (!PyArg_ParseTupleAndKeywords(
args, kwds,
"$IO!:GPUVertBuf.__new__", (char **)keywords,
&params.len,
&BPyGPUVertFormat_Type, &params.py_fmt))
{
return NULL;
}
struct GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&params.py_fmt->fmt);
GPU_vertbuf_data_alloc(vbo, params.len);
return BPyGPUVertBuf_CreatePyObject(vbo);
}
PyDoc_STRVAR(bpygpu_VertBuf_fill_doc,
"TODO"
);
static PyObject *bpygpu_VertBuf_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"id", "data", NULL};
struct {
uint id;
PyObject *py_seq_data;
} params;
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "$IO:fill", (char **)kwlist,
&params.id,
&params.py_seq_data))
{
return NULL;
}
if (params.id >= self->buf->format.attr_len) {
PyErr_Format(PyExc_ValueError,
"Format id %d out of range",
params.id);
return NULL;
}
if (self->buf->data == NULL) {
PyErr_SetString(PyExc_ValueError,
"Can't fill, static buffer already in use");
return NULL;
}
if (!bpygpu_vertbuf_fill_impl(self->buf, params.id, params.py_seq_data)) {
return NULL;
}
Py_RETURN_NONE;
}
static struct PyMethodDef bpygpu_VertBuf_methods[] = {
{"fill", (PyCFunction) bpygpu_VertBuf_fill,
METH_VARARGS | METH_KEYWORDS, bpygpu_VertBuf_fill_doc},
{NULL, NULL, 0, NULL}
};
static void bpygpu_VertBuf_dealloc(BPyGPUVertBuf *self)
{
GPU_vertbuf_discard(self->buf);
Py_TYPE(self)->tp_free(self);
}
PyTypeObject BPyGPUVertBuf_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUVertBuf",
.tp_basicsize = sizeof(BPyGPUVertBuf),
.tp_dealloc = (destructor)bpygpu_VertBuf_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = bpygpu_VertBuf_methods,
.tp_new = bpygpu_VertBuf_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name VertBatch Type
* \{ */
static PyObject *bpygpu_Batch_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
const char * const keywords[] = {"type", "buf", NULL};
struct {
GPUPrimType type_id;
BPyGPUVertBuf *py_buf;
} params;
if (!PyArg_ParseTupleAndKeywords(
args, kwds,
"$O&O!:GPUBatch.__new__", (char **)keywords,
bpygpu_ParsePrimType, &params.type_id,
&BPyGPUVertBuf_Type, &params.py_buf))
{
return NULL;
}
GPUBatch *batch = GPU_batch_create(params.type_id, params.py_buf->buf, NULL);
BPyGPUBatch *ret = (BPyGPUBatch *)BPyGPUBatch_CreatePyObject(batch);
#ifdef USE_GPU_PY_REFERENCES
ret->references = PyList_New(1);
PyList_SET_ITEM(ret->references, 0, (PyObject *)params.py_buf);
Py_INCREF(params.py_buf);
PyObject_GC_Track(ret);
#endif
return (PyObject *)ret;
}
PyDoc_STRVAR(bpygpu_VertBatch_vertbuf_add_doc,
"TODO"
);
static PyObject *bpygpu_VertBatch_vertbuf_add(BPyGPUBatch *self, BPyGPUVertBuf *py_buf)
{
if (!BPyGPUVertBuf_Check(py_buf)) {
PyErr_Format(PyExc_TypeError,
"Expected a GPUVertBuf, got %s",
Py_TYPE(py_buf)->tp_name);
return NULL;
}
if (self->batch->verts[0]->vertex_len != py_buf->buf->vertex_len) {
PyErr_Format(PyExc_TypeError,
"Expected %d length, got %d",
self->batch->verts[0]->vertex_len, py_buf->buf->vertex_len);
return NULL;
}
#ifdef USE_GPU_PY_REFERENCES
/* Hold user */
PyList_Append(self->references, (PyObject *)py_buf);
#endif
GPU_batch_vertbuf_add(self->batch, py_buf->buf);
Py_RETURN_NONE;
}
/* Currently magic number from Py perspective. */
PyDoc_STRVAR(bpygpu_VertBatch_program_set_builtin_doc,
"TODO"
);
static PyObject *bpygpu_VertBatch_program_set_builtin(BPyGPUBatch *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"id", NULL};
struct {
const char *shader;
} params;
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "s:program_set_builtin", (char **)kwlist,
&params.shader))
{
return NULL;
}
GPUBuiltinShader shader;
#define MATCH_ID(id) \
if (STREQ(params.shader, STRINGIFY(id))) { \
shader = GPU_SHADER_##id; \
goto success; \
} ((void)0)
MATCH_ID(2D_FLAT_COLOR);
MATCH_ID(2D_SMOOTH_COLOR);
MATCH_ID(2D_UNIFORM_COLOR);
MATCH_ID(3D_FLAT_COLOR);
MATCH_ID(3D_SMOOTH_COLOR);
MATCH_ID(3D_UNIFORM_COLOR);
#undef MATCH_ID
PyErr_SetString(PyExc_ValueError,
"shader name not known");
return NULL;
success:
GPU_batch_program_set_builtin(self->batch, shader);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_uniform_bool(BPyGPUBatch *self, PyObject *args)
{
struct {
const char *id;
bool values[1];
} params;
if (!PyArg_ParseTuple(
args, "sO&:uniform_bool",
&params.id,
PyC_ParseBool, &params.values[0]))
{
return NULL;
}
GPU_batch_uniform_1b(self->batch, params.id, params.values[0]);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_uniform_i32(BPyGPUBatch *self, PyObject *args)
{
struct {
const char *id;
int values[1];
} params;
if (!PyArg_ParseTuple(
args, "si:uniform_i32",
&params.id,
&params.values[0]))
{
return NULL;
}
GPU_batch_uniform_1i(self->batch, params.id, params.values[0]);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_uniform_f32(BPyGPUBatch *self, PyObject *args)
{
struct {
const char *id;
float values[4];
} params;
if (!PyArg_ParseTuple(
args, "sf|fff:uniform_f32",
&params.id,
&params.values[0], &params.values[1], &params.values[2], &params.values[3]))
{
return NULL;
}
switch (PyTuple_GET_SIZE(args)) {
case 2: GPU_batch_uniform_1f(self->batch, params.id, params.values[0]); break;
case 3: GPU_batch_uniform_2f(self->batch, params.id, UNPACK2(params.values)); break;
case 4: GPU_batch_uniform_3f(self->batch, params.id, UNPACK3(params.values)); break;
case 5: GPU_batch_uniform_4f(self->batch, params.id, UNPACK4(params.values)); break;
default:
BLI_assert(0);
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(bpygpu_VertBatch_draw_doc,
"TODO"
);
static PyObject *bpygpu_VertBatch_draw(BPyGPUBatch *self)
{
if (!glIsProgram(self->batch->program)) {
PyErr_SetString(PyExc_ValueError,
"batch program has not not set");
}
GPU_batch_draw(self->batch);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_program_use_begin(BPyGPUBatch *self)
{
if (!glIsProgram(self->batch->program)) {
PyErr_SetString(PyExc_ValueError,
"batch program has not not set");
}
GPU_batch_program_use_begin(self->batch);
Py_RETURN_NONE;
}
static PyObject *bpygpu_VertBatch_program_use_end(BPyGPUBatch *self)
{
if (!glIsProgram(self->batch->program)) {
PyErr_SetString(PyExc_ValueError,
"batch program has not not set");
}
GPU_batch_program_use_end(self->batch);
Py_RETURN_NONE;
}
static struct PyMethodDef bpygpu_VertBatch_methods[] = {
{"vertbuf_add", (PyCFunction)bpygpu_VertBatch_vertbuf_add,
METH_O, bpygpu_VertBatch_vertbuf_add_doc},
{"program_set_builtin", (PyCFunction)bpygpu_VertBatch_program_set_builtin,
METH_VARARGS | METH_KEYWORDS, bpygpu_VertBatch_program_set_builtin_doc},
{"uniform_bool", (PyCFunction)bpygpu_VertBatch_uniform_bool,
METH_VARARGS, NULL},
{"uniform_i32", (PyCFunction)bpygpu_VertBatch_uniform_i32,
METH_VARARGS, NULL},
{"uniform_f32", (PyCFunction)bpygpu_VertBatch_uniform_f32,
METH_VARARGS, NULL},
{"draw", (PyCFunction) bpygpu_VertBatch_draw,
METH_NOARGS, bpygpu_VertBatch_draw_doc},
{"program_use_begin", (PyCFunction)bpygpu_VertBatch_program_use_begin,
METH_NOARGS, ""},
{"program_use_end", (PyCFunction)bpygpu_VertBatch_program_use_end,
METH_NOARGS, ""},
{NULL, NULL, 0, NULL}
};
#ifdef USE_GPU_PY_REFERENCES
static int bpygpu_Batch_traverse(BPyGPUBatch *self, visitproc visit, void *arg)
{
Py_VISIT(self->references);
return 0;
}
static int bpygpu_Batch_clear(BPyGPUBatch *self)
{
Py_CLEAR(self->references);
return 0;
}
#endif
static void bpygpu_Batch_dealloc(BPyGPUBatch *self)
{
GPU_batch_discard(self->batch);
#ifdef USE_GPU_PY_REFERENCES
if (self->references) {
PyObject_GC_UnTrack(self);
bpygpu_Batch_clear(self);
Py_XDECREF(self->references);
}
#endif
Py_TYPE(self)->tp_free(self);
}
PyTypeObject BPyGPUBatch_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUBatch",
.tp_basicsize = sizeof(BPyGPUBatch),
.tp_dealloc = (destructor)bpygpu_Batch_dealloc,
#ifdef USE_GPU_PY_REFERENCES
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_traverse = (traverseproc)bpygpu_Batch_traverse,
.tp_clear = (inquiry)bpygpu_Batch_clear,
#else
.tp_flags = Py_TPFLAGS_DEFAULT,
#endif
.tp_methods = bpygpu_VertBatch_methods,
.tp_new = bpygpu_Batch_new,
};
/* -------------------------------------------------------------------- */ /* -------------------------------------------------------------------- */
/** \name GPU Types Module /** \name GPU Types Module
* \{ */ * \{ */
static struct PyModuleDef BPy_BM_types_module_def = { static struct PyModuleDef BPyGPU_types_module_def = {
PyModuleDef_HEAD_INIT, PyModuleDef_HEAD_INIT,
.m_name = "_gpu.types", .m_name = "gpu.types",
}; };
PyObject *BPyInit_gpu_types(void) PyObject *BPyInit_gpu_types(void)
{ {
PyObject *submodule; PyObject *submodule;
submodule = PyModule_Create(&BPy_BM_types_module_def); submodule = PyModule_Create(&BPyGPU_types_module_def);
if (PyType_Ready(&BPyGPUVertFormat_Type) < 0) if (PyType_Ready(&BPyGPUVertFormat_Type) < 0)
return NULL; return NULL;
@@ -779,6 +55,8 @@ PyObject *BPyInit_gpu_types(void)
return NULL; return NULL;
if (PyType_Ready(&BPyGPUBatch_Type) < 0) if (PyType_Ready(&BPyGPUBatch_Type) < 0)
return NULL; return NULL;
if (PyType_Ready(&BPyGPUOffScreen_Type) < 0)
return NULL;
#define MODULE_TYPE_ADD(s, t) \ #define MODULE_TYPE_ADD(s, t) \
PyModule_AddObject(s, t.tp_name, (PyObject *)&t); Py_INCREF((PyObject *)&t) PyModule_AddObject(s, t.tp_name, (PyObject *)&t); Py_INCREF((PyObject *)&t)
@@ -786,6 +64,7 @@ PyObject *BPyInit_gpu_types(void)
MODULE_TYPE_ADD(submodule, BPyGPUVertFormat_Type); MODULE_TYPE_ADD(submodule, BPyGPUVertFormat_Type);
MODULE_TYPE_ADD(submodule, BPyGPUVertBuf_Type); MODULE_TYPE_ADD(submodule, BPyGPUVertBuf_Type);
MODULE_TYPE_ADD(submodule, BPyGPUBatch_Type); MODULE_TYPE_ADD(submodule, BPyGPUBatch_Type);
MODULE_TYPE_ADD(submodule, BPyGPUOffScreen_Type);
#undef MODULE_TYPE_ADD #undef MODULE_TYPE_ADD
@@ -793,53 +72,3 @@ PyObject *BPyInit_gpu_types(void)
} }
/** \} */ /** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUVertFormat_CreatePyObject(GPUVertFormat *fmt)
{
BPyGPUVertFormat *self;
self = PyObject_New(BPyGPUVertFormat, &BPyGPUVertFormat_Type);
if (fmt) {
self->fmt = *fmt;
}
else {
memset(&self->fmt, 0, sizeof(self->fmt));
}
return (PyObject *)self;
}
PyObject *BPyGPUVertBuf_CreatePyObject(GPUVertBuf *buf)
{
BPyGPUVertBuf *self;
self = PyObject_New(BPyGPUVertBuf, &BPyGPUVertBuf_Type);
self->buf = buf;
return (PyObject *)self;
}
PyObject *BPyGPUBatch_CreatePyObject(GPUBatch *batch)
{
BPyGPUBatch *self;
#ifdef USE_GPU_PY_REFERENCES
self = (BPyGPUBatch *)_PyObject_GC_New(&BPyGPUBatch_Type);
self->references = NULL;
#else
self = PyObject_New(BPyGPUBatch, &BPyGPUBatch_Type);
#endif
self->batch = batch;
return (PyObject *)self;
}
/** \} */

View File

@@ -19,49 +19,17 @@
*/ */
/** \file blender/python/gpu/gpu_py_types.h /** \file blender/python/gpu/gpu_py_types.h
* \ingroup pygpu * \ingroup bpygpu
*/ */
#ifndef __GPU_PY_TYPES_H__ #ifndef __GPU_PY_TYPES_H__
#define __GPU_PY_TYPES_H__ #define __GPU_PY_TYPES_H__
#include "BLI_compiler_attrs.h" #include "gpu_py_vertex_format.h"
#include "gpu_py_vertex_buffer.h"
#define USE_GPU_PY_REFERENCES #include "gpu_py_batch.h"
#include "gpu_py_offscreen.h"
extern PyTypeObject BPyGPUVertFormat_Type;
extern PyTypeObject BPyGPUVertBuf_Type;
extern PyTypeObject BPyGPUBatch_Type;
#define BPyGPUVertFormat_Check(v) (Py_TYPE(v) == &BPyGPUVertFormat_Type)
#define BPyGPUVertBuf_Check(v) (Py_TYPE(v) == &BPyGPUVertBuf_Type)
#define BPyGPUBatch_Check(v) (Py_TYPE(v) == &BPyGPUBatch_Type)
typedef struct BPyGPUVertFormat {
PyObject_VAR_HEAD
struct GPUVertFormat fmt;
} BPyGPUVertFormat;
typedef struct BPyGPUVertBuf {
PyObject_VAR_HEAD
/* The buf is owned, we may support thin wrapped batches later. */
struct GPUVertBuf *buf;
} BPyGPUVertBuf;
typedef struct BPyGPUBatch {
PyObject_VAR_HEAD
/* The batch is owned, we may support thin wrapped batches later. */
struct GPUBatch *batch;
#ifdef USE_GPU_PY_REFERENCES
/* Just to keep a user to prevent freeing buf's we're using */
PyObject *references;
#endif
} BPyGPUBatch;
PyObject *BPyInit_gpu_types(void); PyObject *BPyInit_gpu_types(void);
PyObject *BPyGPUVertFormat_CreatePyObject(struct GPUVertFormat *fmt);
PyObject *BPyGPUVertBuf_CreatePyObject(struct GPUVertBuf *vbo) ATTR_NONNULL(1);
PyObject *BPyGPUBatch_CreatePyObject(struct GPUBatch *batch) ATTR_NONNULL(1);
#endif /* __GPU_PY_TYPES_H__ */ #endif /* __GPU_PY_TYPES_H__ */

View File

@@ -0,0 +1,345 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_vertex_buffer.c
* \ingroup bpygpu
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "GPU_vertex_buffer.h"
#include "BLI_math.h"
#include "MEM_guardedalloc.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "gpu_py_vertex_format.h"
#include "gpu_py_vertex_buffer.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name Enum Conversion
*
* Use with PyArg_ParseTuple's "O&" formatting.
* \{ */
static int bpygpu_ParseVertFetchMode(PyObject *o, void *p)
{
Py_ssize_t mode_id_len;
const char *mode_id = _PyUnicode_AsStringAndSize(o, &mode_id_len);
if (mode_id == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
#define MATCH_ID(id) \
if (mode_id_len == strlen(STRINGIFY(id))) { \
if (STREQ(mode_id, STRINGIFY(id))) { \
mode = GPU_FETCH_##id; \
goto success; \
} \
} ((void)0)
GPUVertFetchMode mode;
MATCH_ID(FLOAT);
MATCH_ID(INT);
MATCH_ID(INT_TO_FLOAT_UNIT);
MATCH_ID(INT_TO_FLOAT);
#undef MATCH_ID
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
mode_id);
return 0;
success:
(*(GPUVertFetchMode *)p) = mode;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Utility Functions
* \{ */
#define PY_AS_NATIVE_SWITCH(attr) \
switch (attr->comp_type) { \
case GPU_COMP_I8: { PY_AS_NATIVE(int8_t, PyC_Long_AsI8); break; } \
case GPU_COMP_U8: { PY_AS_NATIVE(uint8_t, PyC_Long_AsU8); break; } \
case GPU_COMP_I16: { PY_AS_NATIVE(int16_t, PyC_Long_AsI16); break; } \
case GPU_COMP_U16: { PY_AS_NATIVE(uint16_t, PyC_Long_AsU16); break; } \
case GPU_COMP_I32: { PY_AS_NATIVE(int32_t, PyC_Long_AsI32); break; } \
case GPU_COMP_U32: { PY_AS_NATIVE(uint32_t, PyC_Long_AsU32); break; } \
case GPU_COMP_F32: { PY_AS_NATIVE(float, PyFloat_AsDouble); break; } \
default: \
BLI_assert(0); \
} ((void)0)
/* No error checking, callers must run PyErr_Occurred */
static void fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
{
#define PY_AS_NATIVE(ty_dst, py_as_native) \
{ \
ty_dst *data_dst = data_dst_void; \
*data_dst = py_as_native(py_src); \
} ((void)0)
PY_AS_NATIVE_SWITCH(attr);
#undef PY_AS_NATIVE
}
/* No error checking, callers must run PyErr_Occurred */
static void fill_format_tuple(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
{
const uint len = attr->comp_len;
/**
* Args are constants, so range checks will be optimized out if they're nop's.
*/
#define PY_AS_NATIVE(ty_dst, py_as_native) \
ty_dst *data_dst = data_dst_void; \
for (uint i = 0; i < len; i++) { \
data_dst[i] = py_as_native(PyTuple_GET_ITEM(py_src, i)); \
} ((void)0)
PY_AS_NATIVE_SWITCH(attr);
#undef PY_AS_NATIVE
}
#undef PY_AS_NATIVE_SWITCH
#undef WARN_TYPE_LIMIT_PUSH
#undef WARN_TYPE_LIMIT_POP
static bool bpygpu_vertbuf_fill_impl(
GPUVertBuf *vbo,
uint data_id, PyObject *seq)
{
bool ok = true;
const GPUVertAttr *attr = &vbo->format.attribs[data_id];
GPUVertBufRaw data_step;
GPU_vertbuf_attr_get_raw_data(vbo, data_id, &data_step);
PyObject *seq_fast = PySequence_Fast(seq, "Vertex buffer fill");
if (seq_fast == NULL) {
goto finally;
}
const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
if (seq_len != vbo->vertex_len) {
PyErr_Format(PyExc_ValueError,
"Expected a sequence of size %d, got %d",
vbo->vertex_len, seq_len);
}
PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
if (attr->comp_len == 1) {
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *item = seq_items[i];
fill_format_elem(data, item, attr);
}
}
else {
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *item = seq_items[i];
if (!PyTuple_CheckExact(item)) {
PyErr_Format(PyExc_ValueError,
"expected a tuple, got %s",
Py_TYPE(item)->tp_name);
ok = false;
goto finally;
}
if (PyTuple_GET_SIZE(item) != attr->comp_len) {
PyErr_Format(PyExc_ValueError,
"expected a tuple of size %d, got %d",
attr->comp_len, PyTuple_GET_SIZE(item));
ok = false;
goto finally;
}
/* May trigger error, check below */
fill_format_tuple(data, item, attr);
}
}
if (PyErr_Occurred()) {
ok = false;
}
finally:
Py_DECREF(seq_fast);
return ok;
}
/* handy, but not used just now */
#if 0
static int bpygpu_find_id(const GPUVertFormat *fmt, const char *id)
{
for (int i = 0; i < fmt->attr_len; i++) {
for (uint j = 0; j < fmt->name_len; j++) {
if (STREQ(fmt->attribs[i].name[j], id)) {
return i;
}
}
}
return -1;
}
#endif
/** \} */
/* -------------------------------------------------------------------- */
/** \name VertBuf Type
* \{ */
static PyObject *bpygpu_VertBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
const char * const keywords[] = {"len", "format", NULL};
struct {
BPyGPUVertFormat *py_fmt;
uint len;
} params;
if (!PyArg_ParseTupleAndKeywords(
args, kwds,
"$IO!:GPUVertBuf.__new__", (char **)keywords,
&params.len,
&BPyGPUVertFormat_Type, &params.py_fmt))
{
return NULL;
}
struct GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&params.py_fmt->fmt);
GPU_vertbuf_data_alloc(vbo, params.len);
return BPyGPUVertBuf_CreatePyObject(vbo);
}
PyDoc_STRVAR(bpygpu_VertBuf_fill_doc,
"TODO"
);
static PyObject *bpygpu_VertBuf_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"id", "data", NULL};
struct {
uint id;
PyObject *py_seq_data;
} params;
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "$IO:fill", (char **)kwlist,
&params.id,
&params.py_seq_data))
{
return NULL;
}
if (params.id >= self->buf->format.attr_len) {
PyErr_Format(PyExc_ValueError,
"Format id %d out of range",
params.id);
return NULL;
}
if (self->buf->data == NULL) {
PyErr_SetString(PyExc_ValueError,
"Can't fill, static buffer already in use");
return NULL;
}
if (!bpygpu_vertbuf_fill_impl(self->buf, params.id, params.py_seq_data)) {
return NULL;
}
Py_RETURN_NONE;
}
static struct PyMethodDef bpygpu_VertBuf_methods[] = {
{"fill", (PyCFunction) bpygpu_VertBuf_fill,
METH_VARARGS | METH_KEYWORDS, bpygpu_VertBuf_fill_doc},
{NULL, NULL, 0, NULL}
};
static void bpygpu_VertBuf_dealloc(BPyGPUVertBuf *self)
{
GPU_vertbuf_discard(self->buf);
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(py_gpu_vertex_buffer_doc,
"GPUVertBuf(len, format)\n"
"\n"
"Contains a VBO."
"\n"
" :param len: number of elements to allocate\n"
" :type type: int`\n"
" :param format: Vertex format.\n"
" :type buf: GPUVertFormat`\n"
);
PyTypeObject BPyGPUVertBuf_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUVertBuf",
.tp_basicsize = sizeof(BPyGPUVertBuf),
.tp_dealloc = (destructor)bpygpu_VertBuf_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_gpu_vertex_buffer_doc,
.tp_methods = bpygpu_VertBuf_methods,
.tp_new = bpygpu_VertBuf_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUVertBuf_CreatePyObject(GPUVertBuf *buf)
{
BPyGPUVertBuf *self;
self = PyObject_New(BPyGPUVertBuf, &BPyGPUVertBuf_Type);
self->buf = buf;
return (PyObject *)self;
}
/** \} */

View File

@@ -0,0 +1,42 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_vertex_buffer.h
* \ingroup bpygpu
*/
#ifndef __GPU_PY_VERTEX_BUFFER_H__
#define __GPU_PY_VERTEX_BUFFER_H__
#include "BLI_compiler_attrs.h"
extern PyTypeObject BPyGPUVertBuf_Type;
#define BPyGPUVertBuf_Check(v) (Py_TYPE(v) == &BPyGPUVertBuf_Type)
typedef struct BPyGPUVertBuf {
PyObject_VAR_HEAD
/* The buf is owned, we may support thin wrapped batches later. */
struct GPUVertBuf *buf;
} BPyGPUVertBuf;
PyObject *BPyGPUVertBuf_CreatePyObject(struct GPUVertBuf *vbo) ATTR_NONNULL(1);
#endif /* __GPU_PY_VERTEX_BUFFER_H__ */

View File

@@ -0,0 +1,230 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_vertex_format.c
* \ingroup bpygpu
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "BLI_math.h"
#include "MEM_guardedalloc.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "gpu_py_vertex_format.h" /* own include */
#ifdef __BIG_ENDIAN__
/* big endian */
# define MAKE_ID2(c, d) ((c) << 8 | (d))
# define MAKE_ID3(a, b, c) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 )
# define MAKE_ID4(a, b, c, d) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d) )
#else
/* little endian */
# define MAKE_ID2(c, d) ((d) << 8 | (c))
# define MAKE_ID3(a, b, c) ( (int)(c) << 16 | (b) << 8 | (a) )
# define MAKE_ID4(a, b, c, d) ( (int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a) )
#endif
/* -------------------------------------------------------------------- */
/** \name Enum Conversion
*
* Use with PyArg_ParseTuple's "O&" formatting.
* \{ */
static int bpygpu_ParseVertCompType(PyObject *o, void *p)
{
Py_ssize_t comp_type_id_len;
const char *comp_type_id = _PyUnicode_AsStringAndSize(o, &comp_type_id_len);
if (comp_type_id == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
GPUVertCompType comp_type;
if (comp_type_id_len == 2) {
switch (*((ushort *)comp_type_id)) {
case MAKE_ID2('I', '8'): { comp_type = GPU_COMP_I8; goto success; }
case MAKE_ID2('U', '8'): { comp_type = GPU_COMP_U8; goto success; }
}
}
else if (comp_type_id_len == 3) {
switch (*((uint *)comp_type_id)) {
case MAKE_ID3('I', '1', '6'): { comp_type = GPU_COMP_I16; goto success; }
case MAKE_ID3('U', '1', '6'): { comp_type = GPU_COMP_U16; goto success; }
case MAKE_ID3('I', '3', '2'): { comp_type = GPU_COMP_I32; goto success; }
case MAKE_ID3('U', '3', '2'): { comp_type = GPU_COMP_U32; goto success; }
case MAKE_ID3('F', '3', '2'): { comp_type = GPU_COMP_F32; goto success; }
case MAKE_ID3('I', '1', '0'): { comp_type = GPU_COMP_I10; goto success; }
}
}
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
comp_type_id);
return 0;
success:
*((GPUVertCompType *)p) = comp_type;
return 1;
}
static int bpygpu_ParseVertFetchMode(PyObject *o, void *p)
{
Py_ssize_t mode_id_len;
const char *mode_id = _PyUnicode_AsStringAndSize(o, &mode_id_len);
if (mode_id == NULL) {
PyErr_Format(PyExc_ValueError,
"expected a string, got %s",
Py_TYPE(o)->tp_name);
return 0;
}
#define MATCH_ID(id) \
if (mode_id_len == strlen(STRINGIFY(id))) { \
if (STREQ(mode_id, STRINGIFY(id))) { \
mode = GPU_FETCH_##id; \
goto success; \
} \
} ((void)0)
GPUVertFetchMode mode;
MATCH_ID(FLOAT);
MATCH_ID(INT);
MATCH_ID(INT_TO_FLOAT_UNIT);
MATCH_ID(INT_TO_FLOAT);
#undef MATCH_ID
PyErr_Format(PyExc_ValueError,
"unknown type literal: '%s'",
mode_id);
return 0;
success:
(*(GPUVertFetchMode *)p) = mode;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name VertFormat Type
* \{ */
static PyObject *bpygpu_VertFormat_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
if (PyTuple_GET_SIZE(args) || (kwds && PyDict_Size(kwds))) {
PyErr_SetString(PyExc_TypeError,
"VertFormat(): takes no arguments");
return NULL;
}
BPyGPUVertFormat *ret = (BPyGPUVertFormat *)BPyGPUVertFormat_CreatePyObject(NULL);
return (PyObject *)ret;
}
PyDoc_STRVAR(bpygpu_VertFormat_attr_add_doc,
"TODO"
);
static PyObject *bpygpu_VertFormat_attr_add(BPyGPUVertFormat *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist[] = {"id", "comp_type", "len", "fetch_mode", NULL};
struct {
const char *id;
GPUVertCompType comp_type;
uint len;
GPUVertFetchMode fetch_mode;
} params;
if (self->fmt.attr_len == GPU_VERT_ATTR_MAX_LEN) {
PyErr_SetString(PyExc_ValueError, "Maxumum attr reached " STRINGIFY(GPU_VERT_ATTR_MAX_LEN));
return NULL;
}
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "$sO&IO&:attr_add", (char **)kwlist,
&params.id,
bpygpu_ParseVertCompType, &params.comp_type,
&params.len,
bpygpu_ParseVertFetchMode, &params.fetch_mode))
{
return NULL;
}
uint attr_id = GPU_vertformat_attr_add(&self->fmt, params.id, params.comp_type, params.len, params.fetch_mode);
return PyLong_FromLong(attr_id);
}
static struct PyMethodDef bpygpu_VertFormat_methods[] = {
{"attr_add", (PyCFunction)bpygpu_VertFormat_attr_add,
METH_VARARGS | METH_KEYWORDS, bpygpu_VertFormat_attr_add_doc},
{NULL, NULL, 0, NULL}
};
static void bpygpu_VertFormat_dealloc(BPyGPUVertFormat *self)
{
Py_TYPE(self)->tp_free(self);
}
PyTypeObject BPyGPUVertFormat_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "GPUVertFormat",
.tp_basicsize = sizeof(BPyGPUVertFormat),
.tp_dealloc = (destructor)bpygpu_VertFormat_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = bpygpu_VertFormat_methods,
.tp_new = bpygpu_VertFormat_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUVertFormat_CreatePyObject(GPUVertFormat *fmt)
{
BPyGPUVertFormat *self;
self = PyObject_New(BPyGPUVertFormat, &BPyGPUVertFormat_Type);
if (fmt) {
self->fmt = *fmt;
}
else {
memset(&self->fmt, 0, sizeof(self->fmt));
}
return (PyObject *)self;
}
/** \} */

View File

@@ -0,0 +1,41 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/python/gpu/gpu_py_vertex_format.h
* \ingroup bpygpu
*/
#ifndef __GPU_PY_VERTEX_FORMAT_H__
#define __GPU_PY_VERTEX_FORMAT_H__
#include "GPU_vertex_format.h"
extern PyTypeObject BPyGPUVertFormat_Type;
#define BPyGPUVertFormat_Check(v) (Py_TYPE(v) == &BPyGPUVertFormat_Type)
typedef struct BPyGPUVertFormat {
PyObject_VAR_HEAD
struct GPUVertFormat fmt;
} BPyGPUVertFormat;
PyObject *BPyGPUVertFormat_CreatePyObject(struct GPUVertFormat *fmt);
#endif /* __GPU_PY_VERTEX_FORMAT_H__ */

View File

@@ -82,10 +82,6 @@ set(SRC
bpy_traceback.c bpy_traceback.c
bpy_utils_previews.c bpy_utils_previews.c
bpy_utils_units.c bpy_utils_units.c
gpu.c
gpu_offscreen.c
gpu_py_matrix.c
gpu_py_select.c
stubs.c stubs.c
bpy.h bpy.h
@@ -120,7 +116,6 @@ set(SRC
bpy_traceback.h bpy_traceback.h
bpy_utils_previews.h bpy_utils_previews.h
bpy_utils_units.h bpy_utils_units.h
gpu.h
../BPY_extern.h ../BPY_extern.h
../BPY_extern_clog.h ../BPY_extern_clog.h
) )

View File

@@ -46,7 +46,6 @@
#include "RNA_types.h" #include "RNA_types.h"
#include "bpy.h" #include "bpy.h"
#include "gpu.h"
#include "bpy_rna.h" #include "bpy_rna.h"
#include "bpy_path.h" #include "bpy_path.h"
#include "bpy_capi_utils.h" #include "bpy_capi_utils.h"
@@ -219,7 +218,6 @@ static struct _inittab bpy_internal_modules[] = {
{"mathutils.kdtree", PyInit_mathutils_kdtree}, {"mathutils.kdtree", PyInit_mathutils_kdtree},
#endif #endif
{"_bpy_path", BPyInit__bpy_path}, {"_bpy_path", BPyInit__bpy_path},
{"_gpu", BPyInit_gpu},
{"bgl", BPyInit_bgl}, {"bgl", BPyInit_bgl},
{"blf", BPyInit_blf}, {"blf", BPyInit_blf},
{"imbuf", BPyInit_imbuf}, {"imbuf", BPyInit_imbuf},
@@ -235,7 +233,7 @@ static struct _inittab bpy_internal_modules[] = {
#ifdef WITH_CYCLES #ifdef WITH_CYCLES
{"_cycles", CCL_initPython}, {"_cycles", CCL_initPython},
#endif #endif
{"gpu", GPU_initPython}, {"gpu", BPyInit_gpu},
{"idprop", BPyInit_idprop}, {"idprop", BPyInit_idprop},
{NULL, NULL} {NULL, NULL}
}; };