Merge branch 'master' into temp_bmesh_multires

This commit is contained in:
2021-02-28 04:59:03 -08:00
1297 changed files with 73092 additions and 47707 deletions

View File

@@ -21,6 +21,7 @@
#pragma once
struct AnimationEvalContext;
struct ARegionType;
struct ChannelDriver; /* DNA_anim_types.h */
struct ID; /* DNA_ID.h */
struct ListBase; /* DNA_listBase.h */
@@ -33,6 +34,7 @@ struct bConstraintTarget; /* DNA_constraint_types.h*/
struct bContext;
struct bContextDataResult;
struct bPythonConstraint; /* DNA_constraint_types.h */
struct wmWindowManager;
#include "BLI_utildefines.h"
@@ -100,6 +102,10 @@ void BPY_id_release(struct ID *id);
bool BPY_string_is_keyword(const char *str);
/* bpy_rna_callback.c */
void BPY_callback_screen_free(struct ARegionType *art);
void BPY_callback_wm_free(struct wmWindowManager *wm);
/* I18n for addons */
#ifdef WITH_INTERNATIONAL
const char *BPY_app_translations_py_pgettext(const char *msgctxt, const char *msgid);

View File

@@ -167,11 +167,7 @@ static PyTypeObject bmesh_op_Type = {
0, /* tp_itemsize */
/* methods */
NULL, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
@@ -249,7 +245,7 @@ static PyTypeObject bmesh_op_Type = {
static PyObject *bpy_bmesh_ops_module_getattro(PyObject *UNUSED(self), PyObject *pyname)
{
const char *opname = _PyUnicode_AsString(pyname);
const char *opname = PyUnicode_AsUTF8(pyname);
if (BMO_opcode_from_opname(opname) != -1) {
return bpy_bmesh_op_CreatePyObject(opname);

View File

@@ -184,7 +184,7 @@ static int bpy_slot_from_py(BMesh *bm,
if (slot->slot_subtype.intg == BMO_OP_SLOT_SUBTYPE_INT_ENUM) {
int enum_val = -1;
PyC_FlagSet *items = (PyC_FlagSet *)slot->data.enum_data.flags;
const char *enum_str = _PyUnicode_AsString(value);
const char *enum_str = PyUnicode_AsUTF8(value);
if (enum_str == NULL) {
PyErr_Format(PyExc_TypeError,
@@ -787,7 +787,7 @@ PyObject *BPy_BMO_call(BPy_BMeshOpFunc *self, PyObject *args, PyObject *kw)
PyObject *key, *value;
Py_ssize_t pos = 0;
while (PyDict_Next(kw, &pos, &key, &value)) {
const char *slot_name = _PyUnicode_AsString(key);
const char *slot_name = PyUnicode_AsUTF8(key);
BMOpSlot *slot;
if (!BMO_slot_exists(bmop.slots_in, slot_name)) {

View File

@@ -3756,37 +3756,31 @@ PyObject *BPyInit_bmesh_types(void)
submodule = PyModule_Create(&BPy_BM_types_module_def);
#define MODULE_TYPE_ADD(s, t) \
PyModule_AddObject(s, t.tp_name, (PyObject *)&t); \
Py_INCREF((PyObject *)&t)
/* bmesh_py_types.c */
MODULE_TYPE_ADD(submodule, BPy_BMesh_Type);
MODULE_TYPE_ADD(submodule, BPy_BMVert_Type);
MODULE_TYPE_ADD(submodule, BPy_BMEdge_Type);
MODULE_TYPE_ADD(submodule, BPy_BMFace_Type);
MODULE_TYPE_ADD(submodule, BPy_BMLoop_Type);
MODULE_TYPE_ADD(submodule, BPy_BMElemSeq_Type);
MODULE_TYPE_ADD(submodule, BPy_BMVertSeq_Type);
MODULE_TYPE_ADD(submodule, BPy_BMEdgeSeq_Type);
MODULE_TYPE_ADD(submodule, BPy_BMFaceSeq_Type);
MODULE_TYPE_ADD(submodule, BPy_BMLoopSeq_Type);
MODULE_TYPE_ADD(submodule, BPy_BMIter_Type);
PyModule_AddType(submodule, &BPy_BMesh_Type);
PyModule_AddType(submodule, &BPy_BMVert_Type);
PyModule_AddType(submodule, &BPy_BMEdge_Type);
PyModule_AddType(submodule, &BPy_BMFace_Type);
PyModule_AddType(submodule, &BPy_BMLoop_Type);
PyModule_AddType(submodule, &BPy_BMElemSeq_Type);
PyModule_AddType(submodule, &BPy_BMVertSeq_Type);
PyModule_AddType(submodule, &BPy_BMEdgeSeq_Type);
PyModule_AddType(submodule, &BPy_BMFaceSeq_Type);
PyModule_AddType(submodule, &BPy_BMLoopSeq_Type);
PyModule_AddType(submodule, &BPy_BMIter_Type);
/* bmesh_py_types_select.c */
MODULE_TYPE_ADD(submodule, BPy_BMEditSelSeq_Type);
MODULE_TYPE_ADD(submodule, BPy_BMEditSelIter_Type);
PyModule_AddType(submodule, &BPy_BMEditSelSeq_Type);
PyModule_AddType(submodule, &BPy_BMEditSelIter_Type);
/* bmesh_py_types_customdata.c */
MODULE_TYPE_ADD(submodule, BPy_BMLayerAccessVert_Type);
MODULE_TYPE_ADD(submodule, BPy_BMLayerAccessEdge_Type);
MODULE_TYPE_ADD(submodule, BPy_BMLayerAccessFace_Type);
MODULE_TYPE_ADD(submodule, BPy_BMLayerAccessLoop_Type);
MODULE_TYPE_ADD(submodule, BPy_BMLayerCollection_Type);
MODULE_TYPE_ADD(submodule, BPy_BMLayerItem_Type);
PyModule_AddType(submodule, &BPy_BMLayerAccessVert_Type);
PyModule_AddType(submodule, &BPy_BMLayerAccessEdge_Type);
PyModule_AddType(submodule, &BPy_BMLayerAccessFace_Type);
PyModule_AddType(submodule, &BPy_BMLayerAccessLoop_Type);
PyModule_AddType(submodule, &BPy_BMLayerCollection_Type);
PyModule_AddType(submodule, &BPy_BMLayerItem_Type);
/* bmesh_py_types_meshdata.c */
MODULE_TYPE_ADD(submodule, BPy_BMLoopUV_Type);
MODULE_TYPE_ADD(submodule, BPy_BMDeformVert_Type);
#undef MODULE_TYPE_ADD
PyModule_AddType(submodule, &BPy_BMLoopUV_Type);
PyModule_AddType(submodule, &BPy_BMDeformVert_Type);
return submodule;
}

View File

@@ -804,7 +804,7 @@ static PyObject *bpy_bmlayercollection_subscript(BPy_BMLayerCollection *self, Py
{
/* don't need error check here */
if (PyUnicode_Check(key)) {
return bpy_bmlayercollection_subscript_str(self, _PyUnicode_AsString(key));
return bpy_bmlayercollection_subscript_str(self, PyUnicode_AsUTF8(key));
}
if (PyIndex_Check(key)) {
const Py_ssize_t i = PyNumber_AsSsize_t(key, PyExc_IndexError);
@@ -862,7 +862,7 @@ static PyObject *bpy_bmlayercollection_subscript(BPy_BMLayerCollection *self, Py
static int bpy_bmlayercollection_contains(BPy_BMLayerCollection *self, PyObject *value)
{
const char *keyname = _PyUnicode_AsString(value);
const char *keyname = PyUnicode_AsUTF8(value);
CustomData *data;
int index;

View File

@@ -189,13 +189,13 @@ static int BPy_IDGroup_SetData(BPy_IDProperty *self, IDProperty *prop, PyObject
st = (char *)PyC_UnicodeAsByte(value, &value_coerce);
alloc_len = strlen(st) + 1;
st = _PyUnicode_AsString(value);
st = PyUnicode_AsUTF8(value);
IDP_ResizeArray(prop, alloc_len);
memcpy(IDP_Array(prop), st, alloc_len);
Py_XDECREF(value_coerce);
}
# else
st = _PyUnicode_AsString(value);
st = PyUnicode_AsUTF8(value);
IDP_ResizeArray(prop, strlen(st) + 1);
strcpy(IDP_Array(prop), st);
# endif
@@ -253,7 +253,7 @@ static int BPy_IDGroup_SetName(BPy_IDProperty *self, PyObject *value, void *UNUS
return -1;
}
name = _PyUnicode_AsStringAndSize(value, &name_size);
name = PyUnicode_AsUTF8AndSize(value, &name_size);
if (name_size >= MAX_IDPROP_NAME) {
PyErr_SetString(PyExc_TypeError, "string length cannot exceed 63 characters!");
@@ -300,7 +300,7 @@ static PyObject *BPy_IDGroup_Map_GetItem(BPy_IDProperty *self, PyObject *item)
return NULL;
}
name = _PyUnicode_AsString(item);
name = PyUnicode_AsUTF8(item);
if (name == NULL) {
PyErr_SetString(PyExc_TypeError, "only strings are allowed as keys of ID properties");
@@ -358,7 +358,7 @@ static const char *idp_try_read_name(PyObject *name_obj)
const char *name = NULL;
if (name_obj) {
Py_ssize_t name_size;
name = _PyUnicode_AsStringAndSize(name_obj, &name_size);
name = PyUnicode_AsUTF8AndSize(name_obj, &name_size);
if (name == NULL) {
PyErr_Format(PyExc_KeyError,
@@ -420,7 +420,7 @@ static IDProperty *idp_from_PyUnicode(const char *name, PyObject *ob)
prop = IDP_New(IDP_STRING, &val, name);
Py_XDECREF(value_coerce);
#else
val.str = _PyUnicode_AsString(ob);
val.str = PyUnicode_AsUTF8(ob);
prop = IDP_New(IDP_STRING, val, name);
#endif
return prop;
@@ -722,7 +722,7 @@ int BPy_Wrap_SetMapItem(IDProperty *prop, PyObject *key, PyObject *val)
if (val == NULL) { /* del idprop[key] */
IDProperty *pkey;
const char *name = _PyUnicode_AsString(key);
const char *name = PyUnicode_AsUTF8(key);
if (name == NULL) {
PyErr_Format(PyExc_KeyError, "expected a string, not %.200s", Py_TYPE(key)->tp_name);
@@ -1050,7 +1050,7 @@ static PyObject *BPy_IDGroup_items(BPy_IDProperty *self)
static int BPy_IDGroup_Contains(BPy_IDProperty *self, PyObject *value)
{
const char *name = _PyUnicode_AsString(value);
const char *name = PyUnicode_AsUTF8(value);
if (!name) {
PyErr_Format(PyExc_TypeError, "expected a string, not a %.200s", Py_TYPE(value)->tp_name);
@@ -1192,12 +1192,8 @@ PyTypeObject BPy_IDGroup_Type = {
/* Methods to implement standard operations */
NULL, /* destructor tp_dealloc; */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* destructor tp_dealloc; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL, /* cmpfunc tp_compare; */
@@ -1605,12 +1601,8 @@ PyTypeObject BPy_IDArray_Type = {
/* Methods to implement standard operations */
NULL, /* destructor tp_dealloc; */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* destructor tp_dealloc; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL, /* cmpfunc tp_compare; */
@@ -1726,12 +1718,8 @@ PyTypeObject BPy_IDGroup_Iter_Type = {
/* Methods to implement standard operations */
NULL, /* destructor tp_dealloc; */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* destructor tp_dealloc; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL, /* cmpfunc tp_compare; */
@@ -1811,16 +1799,10 @@ static PyObject *BPyInit_idprop_types(void)
IDProp_Init_Types();
#define MODULE_TYPE_ADD(s, t) \
PyModule_AddObject(s, t.tp_name, (PyObject *)&t); \
Py_INCREF((PyObject *)&t)
/* bmesh_py_types.c */
MODULE_TYPE_ADD(submodule, BPy_IDGroup_Type);
MODULE_TYPE_ADD(submodule, BPy_IDGroup_Iter_Type);
MODULE_TYPE_ADD(submodule, BPy_IDArray_Type);
#undef MODULE_TYPE_ADD
PyModule_AddType(submodule, &BPy_IDGroup_Type);
PyModule_AddType(submodule, &BPy_IDGroup_Iter_Type);
PyModule_AddType(submodule, &BPy_IDArray_Type);
return submodule;
}

View File

@@ -40,6 +40,8 @@
#include <errno.h>
#include <fcntl.h>
static PyObject *BPyInit_imbuf_types(void);
static PyObject *Py_ImBuf_CreatePyObject(ImBuf *ibuf);
/* -------------------------------------------------------------------- */
@@ -267,7 +269,7 @@ static int py_imbuf_filepath_set(Py_ImBuf *self, PyObject *value, void *UNUSED(c
ImBuf *ibuf = self->ibuf;
const Py_ssize_t value_str_len_max = sizeof(ibuf->name);
Py_ssize_t value_str_len;
const char *value_str = _PyUnicode_AsStringAndSize(value, &value_str_len);
const char *value_str = PyUnicode_AsUTF8AndSize(value, &value_str_len);
if (value_str_len >= value_str_len_max) {
PyErr_Format(PyExc_TypeError, "filepath length over %zd", value_str_len_max - 1);
return -1;
@@ -347,15 +349,11 @@ PyTypeObject Py_ImBuf_Type = {
/* Methods to implement standard operations */
(destructor)py_imbuf_dealloc, /* destructor tp_dealloc; */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL, /* cmpfunc tp_compare; */
(reprfunc)py_imbuf_repr, /* reprfunc tp_repr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL, /* cmpfunc tp_compare; */
(reprfunc)py_imbuf_repr, /* reprfunc tp_repr; */
/* Method suites for standard classes */
@@ -526,7 +524,7 @@ static PyObject *M_imbuf_write(PyObject *UNUSED(self), PyObject *args, PyObject
/** \} */
/* -------------------------------------------------------------------- */
/** \name Module Definition
/** \name Module Definition (`imbuf`)
* \{ */
static PyMethodDef IMB_methods[] = {
@@ -551,11 +549,51 @@ static struct PyModuleDef IMB_module_def = {
PyObject *BPyInit_imbuf(void)
{
PyObject *mod;
PyObject *submodule;
PyObject *sys_modules = PyImport_GetModuleDict();
submodule = PyModule_Create(&IMB_module_def);
mod = PyModule_Create(&IMB_module_def);
PyType_Ready(&Py_ImBuf_Type);
/* `imbuf.types` */
PyModule_AddObject(mod, "types", (submodule = BPyInit_imbuf_types()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
return mod;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Module Definition (`imbuf.types`)
*
* `imbuf.types` module, only include this to expose access to `imbuf.types.ImBuf`
* for docs and the ability to use with built-ins such as `isinstance`, `issubclass`.
* \{ */
PyDoc_STRVAR(IMB_types_doc, "This module provides access to image buffer types.");
static struct PyModuleDef IMB_types_module_def = {
PyModuleDef_HEAD_INIT,
"imbuf.types", /* m_name */
IMB_types_doc, /* m_doc */
0, /* m_size */
NULL, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
PyObject *BPyInit_imbuf_types(void)
{
PyObject *submodule = PyModule_Create(&IMB_types_module_def);
if (PyType_Ready(&Py_ImBuf_Type) < 0) {
return NULL;
}
PyModule_AddType(submodule, &Py_ImBuf_Type);
return submodule;
}

View File

@@ -257,7 +257,7 @@ int PyC_ParseBool(PyObject *o, void *p)
int PyC_ParseStringEnum(PyObject *o, void *p)
{
struct PyC_StringEnum *e = p;
const char *value = _PyUnicode_AsString(o);
const char *value = PyUnicode_AsUTF8(o);
if (value == NULL) {
PyErr_Format(PyExc_ValueError, "expected a string, got %s", Py_TYPE(o)->tp_name);
return 0;
@@ -282,6 +282,17 @@ int PyC_ParseStringEnum(PyObject *o, void *p)
return 0;
}
const char *PyC_StringEnum_FindIDFromValue(const struct PyC_StringEnumItems *items,
const int value)
{
for (int i = 0; items[i].id; i++) {
if (items[i].value == value) {
return items[i].id;
}
}
return NULL;
}
/* silly function, we dont use arg. just check its compatible with __deepcopy__ */
int PyC_CheckArgs_DeepCopy(PyObject *args)
{
@@ -343,7 +354,7 @@ void PyC_ObSpitStr(char *result, size_t result_len, PyObject *var)
(int)var->ob_refcnt,
(void *)var,
type ? type->tp_name : null_str,
var_str ? _PyUnicode_AsString(var_str) : "<error>");
var_str ? PyUnicode_AsUTF8(var_str) : "<error>");
if (var_str != NULL) {
Py_DECREF(var_str);
}
@@ -405,7 +416,7 @@ void PyC_FileAndNum(const char **r_filename, int *r_lineno)
/* when executing a script */
if (r_filename) {
*r_filename = _PyUnicode_AsString(frame->f_code->co_filename);
*r_filename = PyUnicode_AsUTF8(frame->f_code->co_filename);
}
/* when executing a module */
@@ -418,7 +429,7 @@ void PyC_FileAndNum(const char **r_filename, int *r_lineno)
if (mod) {
PyObject *mod_file = PyModule_GetFilenameObject(mod);
if (mod_file) {
*r_filename = _PyUnicode_AsString(mod_name);
*r_filename = PyUnicode_AsUTF8(mod_name);
Py_DECREF(mod_file);
}
else {
@@ -428,7 +439,7 @@ void PyC_FileAndNum(const char **r_filename, int *r_lineno)
/* unlikely, fallback */
if (*r_filename == NULL) {
*r_filename = _PyUnicode_AsString(mod_name);
*r_filename = PyUnicode_AsUTF8(mod_name);
}
}
}
@@ -569,9 +580,9 @@ void PyC_Err_PrintWithFunc(PyObject *py_func)
/* use py style error */
fprintf(stderr,
"File \"%s\", line %d, in %s\n",
_PyUnicode_AsString(f_code->co_filename),
PyUnicode_AsUTF8(f_code->co_filename),
f_code->co_firstlineno,
_PyUnicode_AsString(((PyFunctionObject *)py_func)->func_name));
PyUnicode_AsUTF8(((PyFunctionObject *)py_func)->func_name));
}
/** \} */
@@ -740,7 +751,7 @@ const char *PyC_UnicodeAsByteAndSize(PyObject *py_str, Py_ssize_t *size, PyObjec
{
const char *result;
result = _PyUnicode_AsStringAndSize(py_str, size);
result = PyUnicode_AsUTF8AndSize(py_str, size);
if (result) {
/* 99% of the time this is enough but we better support non unicode
@@ -767,7 +778,7 @@ const char *PyC_UnicodeAsByte(PyObject *py_str, PyObject **coerce)
{
const char *result;
result = _PyUnicode_AsString(py_str);
result = PyUnicode_AsUTF8(py_str);
if (result) {
/* 99% of the time this is enough but we better support non unicode
@@ -879,40 +890,6 @@ void PyC_MainModule_Restore(PyObject *main_mod)
Py_XDECREF(main_mod);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name #Py_SetPythonHome Wrapper
* \{ */
/**
* - Must be called before #Py_Initialize.
* - Expects output of `BKE_appdir_folder_id(BLENDER_PYTHON, NULL)`.
* - Note that the `PYTHONPATH` environment variable isn't reliable, see T31506.
* Use #Py_SetPythonHome instead.
*/
void PyC_SetHomePath(const char *py_path_bundle)
{
# ifdef __APPLE__
/* OSX allow file/directory names to contain : character (represented as / in the Finder)
* but current Python lib (release 3.1.1) doesn't handle these correctly */
if (strchr(py_path_bundle, ':')) {
fprintf(stderr,
"Warning! Blender application is located in a path containing ':' or '/' chars\n"
"This may make python import function fail\n");
}
# endif
/* Set the environment path. */
wchar_t py_path_bundle_wchar[1024];
/* Can't use `mbstowcs` on linux gives bug: T23018. */
BLI_strncpy_wchar_from_utf8(
py_path_bundle_wchar, py_path_bundle, ARRAY_SIZE(py_path_bundle_wchar));
Py_SetPythonHome(py_path_bundle_wchar);
}
bool PyC_IsInterpreterActive(void)
{
/* instead of PyThreadState_Get, which calls Py_FatalError */
@@ -1180,7 +1157,7 @@ int PyC_FlagSet_ToBitfield(PyC_FlagSet *items,
*r_value = 0;
while (_PySet_NextEntry(value, &pos, &key, &hash)) {
const char *param = _PyUnicode_AsString(key);
const char *param = PyUnicode_AsUTF8(key);
if (param == NULL) {
PyErr_Format(PyExc_TypeError,
@@ -1358,7 +1335,7 @@ bool PyC_RunString_AsStringAndSize(const char *imports[],
const char *val;
Py_ssize_t val_len;
val = _PyUnicode_AsStringAndSize(retval, &val_len);
val = PyUnicode_AsUTF8AndSize(retval, &val_len);
if (val == NULL && PyErr_Occurred()) {
ok = false;
}
@@ -1483,7 +1460,6 @@ uint32_t PyC_Long_AsU32(PyObject *value)
/* -------------------------------------------------------------------- */
/** \name Py_buffer Utils
*
* \{ */
char PyC_StructFmt_type_from_str(const char *typestr)

View File

@@ -88,8 +88,6 @@ bool PyC_NameSpace_ImportArray(PyObject *py_dict, const char *imports[]);
void PyC_MainModule_Backup(PyObject **r_main_mod);
void PyC_MainModule_Restore(PyObject *main_mod);
void PyC_SetHomePath(const char *py_path_bundle);
bool PyC_IsInterpreterActive(void);
void *PyC_RNA_AsPointer(PyObject *value, const char *type_name);
@@ -142,6 +140,8 @@ struct PyC_StringEnum {
};
int PyC_ParseStringEnum(PyObject *o, void *p);
const char *PyC_StringEnum_FindIDFromValue(const struct PyC_StringEnumItems *items,
const int value);
int PyC_CheckArgs_DeepCopy(PyObject *args);

View File

@@ -33,25 +33,37 @@ set(INC_SYS
)
set(SRC
gpu_py.c
gpu_py_api.c
gpu_py_batch.c
gpu_py_buffer.c
gpu_py_element.c
gpu_py_framebuffer.c
gpu_py_matrix.c
gpu_py_offscreen.c
gpu_py_select.c
gpu_py_shader.c
gpu_py_state.c
gpu_py_texture.c
gpu_py_types.c
gpu_py_uniformbuffer.c
gpu_py_vertex_buffer.c
gpu_py_vertex_format.c
gpu_py.h
gpu_py_api.h
gpu_py_batch.h
gpu_py_buffer.h
gpu_py_element.h
gpu_py_framebuffer.h
gpu_py_matrix.h
gpu_py_offscreen.h
gpu_py_select.h
gpu_py_shader.h
gpu_py_state.h
gpu_py_texture.h
gpu_py_types.h
gpu_py_uniformbuffer.h
gpu_py_vertex_buffer.h
gpu_py_vertex_format.h
)

View File

@@ -0,0 +1,79 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "GPU_init_exit.h"
#include "GPU_primitive.h"
#include "GPU_texture.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPU Enums
* \{ */
struct PyC_StringEnumItems bpygpu_primtype_items[] = {
{GPU_PRIM_POINTS, "POINTS"},
{GPU_PRIM_LINES, "LINES"},
{GPU_PRIM_TRIS, "TRIS"},
{GPU_PRIM_LINE_STRIP, "LINE_STRIP"},
{GPU_PRIM_LINE_LOOP, "LINE_LOOP"},
{GPU_PRIM_TRI_STRIP, "TRI_STRIP"},
{GPU_PRIM_TRI_FAN, "TRI_FAN"},
{GPU_PRIM_LINES_ADJ, "LINES_ADJ"},
{GPU_PRIM_TRIS_ADJ, "TRIS_ADJ"},
{GPU_PRIM_LINE_STRIP_ADJ, "LINE_STRIP_ADJ"},
{0, NULL},
};
struct PyC_StringEnumItems bpygpu_dataformat_items[] = {
{GPU_DATA_FLOAT, "FLOAT"},
{GPU_DATA_INT, "INT"},
{GPU_DATA_UINT, "UINT"},
{GPU_DATA_UBYTE, "UBYTE"},
{GPU_DATA_UINT_24_8, "UINT_24_8"},
{GPU_DATA_10_11_11_REV, "10_11_11_REV"},
{0, NULL},
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Utilities
* \{ */
bool bpygpu_is_init_or_error(void)
{
if (!GPU_is_init()) {
PyErr_SetString(PyExc_SystemError,
"GPU functions for drawing are not available in background mode");
return false;
}
return true;
}
/** \} */

View File

@@ -0,0 +1,37 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
extern struct PyC_StringEnumItems bpygpu_primtype_items[];
extern struct PyC_StringEnumItems bpygpu_dataformat_items[];
bool bpygpu_is_init_or_error(void);
#define BPYGPU_IS_INIT_OR_ERROR_OBJ \
if (UNLIKELY(!bpygpu_is_init_or_error())) { \
return NULL; \
} \
((void)0)
#define BPYGPU_IS_INIT_OR_ERROR_INT \
if (UNLIKELY(!bpygpu_is_init_or_error())) { \
return -1; \
} \
((void)0)

View File

@@ -30,88 +30,24 @@
#include "../generic/python_utildefines.h"
#include "GPU_init_exit.h"
#include "GPU_primitive.h"
#include "gpu_py_matrix.h"
#include "gpu_py_select.h"
#include "gpu_py_state.h"
#include "gpu_py_types.h"
#include "gpu_py_api.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name Utils to invalidate functions
* \{ */
bool bpygpu_is_init_or_error(void)
{
if (!GPU_is_init()) {
PyErr_SetString(PyExc_SystemError,
"GPU functions for drawing are not available in background mode");
return false;
}
return true;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Primitive Type Utils
* \{ */
int bpygpu_ParsePrimType(PyObject *o, void *p)
{
Py_ssize_t mode_id_len;
const char *mode_id = _PyUnicode_AsStringAndSize(o, &mode_id_len);
if (mode_id == NULL) {
PyErr_Format(PyExc_ValueError, "expected a string, got %s", Py_TYPE(o)->tp_name);
return 0;
}
#define MATCH_ID(id) \
if (mode_id_len == strlen(STRINGIFY(id))) { \
if (STREQ(mode_id, STRINGIFY(id))) { \
mode = GPU_PRIM_##id; \
goto success; \
} \
} \
((void)0)
GPUPrimType mode;
MATCH_ID(POINTS);
MATCH_ID(LINES);
MATCH_ID(TRIS);
MATCH_ID(LINE_STRIP);
MATCH_ID(LINE_LOOP);
MATCH_ID(TRI_STRIP);
MATCH_ID(TRI_FAN);
MATCH_ID(LINES_ADJ);
MATCH_ID(TRIS_ADJ);
MATCH_ID(LINE_STRIP_ADJ);
#undef MATCH_ID
PyErr_Format(PyExc_ValueError, "unknown type literal: '%s'", mode_id);
return 0;
success:
(*(GPUPrimType *)p) = mode;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPU Module
* \{ */
PyDoc_STRVAR(GPU_doc,
PyDoc_STRVAR(pygpu_doc,
"This module provides Python wrappers for the GPU implementation in Blender.\n"
"Some higher level functions can be found in the `gpu_extras` module.");
static struct PyModuleDef GPU_module_def = {
static struct PyModuleDef pygpu_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu",
.m_doc = GPU_doc,
.m_doc = pygpu_doc,
};
PyObject *BPyInit_gpu(void)
@@ -120,18 +56,21 @@ PyObject *BPyInit_gpu(void)
PyObject *submodule;
PyObject *mod;
mod = PyModule_Create(&GPU_module_def);
mod = PyModule_Create(&pygpu_module_def);
PyModule_AddObject(mod, "types", (submodule = BPyInit_gpu_types()));
PyModule_AddObject(mod, "types", (submodule = bpygpu_types_init()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "matrix", (submodule = BPyInit_gpu_matrix()));
PyModule_AddObject(mod, "matrix", (submodule = bpygpu_matrix_init()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "select", (submodule = BPyInit_gpu_select()));
PyModule_AddObject(mod, "select", (submodule = bpygpu_select_init()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "shader", (submodule = BPyInit_gpu_shader()));
PyModule_AddObject(mod, "shader", (submodule = bpygpu_shader_init()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "state", (submodule = bpygpu_state_init()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
return mod;

View File

@@ -20,18 +20,8 @@
#pragma once
int bpygpu_ParsePrimType(PyObject *o, void *p);
/* Each type object could have a method for free GPU resources.
* However, it is currently of little use. */
// #define BPYGPU_USE_GPUOBJ_FREE_METHOD
PyObject *BPyInit_gpu(void);
bool bpygpu_is_init_or_error(void);
#define BPYGPU_IS_INIT_OR_ERROR_OBJ \
if (UNLIKELY(!bpygpu_is_init_or_error())) { \
return NULL; \
} \
((void)0)
#define BPYGPU_IS_INIT_OR_ERROR_INT \
if (UNLIKELY(!bpygpu_is_init_or_error())) { \
return -1; \
} \
((void)0)

View File

@@ -38,17 +38,18 @@
#include "../generic/py_capi_utils.h"
#include "gpu_py_api.h"
#include "gpu_py_batch.h" /* own include */
#include "gpu_py.h"
#include "gpu_py_element.h"
#include "gpu_py_shader.h"
#include "gpu_py_vertex_buffer.h"
#include "gpu_py_batch.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name Utility Functions
* \{ */
static bool py_batch_is_program_or_error(BPyGPUBatch *self)
static bool pygpu_batch_is_program_or_error(BPyGPUBatch *self)
{
if (!self->batch->shader) {
PyErr_SetString(PyExc_RuntimeError, "batch does not have any program assigned to it");
@@ -63,56 +64,50 @@ static bool py_batch_is_program_or_error(BPyGPUBatch *self)
/** \name GPUBatch Type
* \{ */
static PyObject *py_Batch_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
static PyObject *pygpu_batch__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
const char *exc_str_missing_arg = "GPUBatch.__new__() missing required argument '%s' (pos %d)";
struct {
GPUPrimType type_id;
BPyGPUVertBuf *py_vertbuf;
BPyGPUIndexBuf *py_indexbuf;
} params = {GPU_PRIM_NONE, NULL, NULL};
struct PyC_StringEnum prim_type = {bpygpu_primtype_items, GPU_PRIM_NONE};
BPyGPUVertBuf *py_vertbuf = NULL;
BPyGPUIndexBuf *py_indexbuf = NULL;
static const char *_keywords[] = {"type", "buf", "elem", NULL};
static _PyArg_Parser _parser = {"|$O&O!O!:GPUBatch.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
bpygpu_ParsePrimType,
&params.type_id,
PyC_ParseStringEnum,
&prim_type,
&BPyGPUVertBuf_Type,
&params.py_vertbuf,
&py_vertbuf,
&BPyGPUIndexBuf_Type,
&params.py_indexbuf)) {
&py_indexbuf)) {
return NULL;
}
if (params.type_id == GPU_PRIM_NONE) {
PyErr_Format(PyExc_TypeError, exc_str_missing_arg, _keywords[0], 1);
return NULL;
}
BLI_assert(prim_type.value_found != GPU_PRIM_NONE);
if (params.py_vertbuf == NULL) {
if (py_vertbuf == NULL) {
PyErr_Format(PyExc_TypeError, exc_str_missing_arg, _keywords[1], 2);
return NULL;
}
GPUBatch *batch = GPU_batch_create(params.type_id,
params.py_vertbuf->buf,
params.py_indexbuf ? params.py_indexbuf->elem : NULL);
GPUBatch *batch = GPU_batch_create(
prim_type.value_found, py_vertbuf->buf, py_indexbuf ? py_indexbuf->elem : NULL);
BPyGPUBatch *ret = (BPyGPUBatch *)BPyGPUBatch_CreatePyObject(batch);
#ifdef USE_GPU_PY_REFERENCES
ret->references = PyList_New(params.py_indexbuf ? 2 : 1);
PyList_SET_ITEM(ret->references, 0, (PyObject *)params.py_vertbuf);
Py_INCREF(params.py_vertbuf);
ret->references = PyList_New(py_indexbuf ? 2 : 1);
PyList_SET_ITEM(ret->references, 0, (PyObject *)py_vertbuf);
Py_INCREF(py_vertbuf);
if (params.py_indexbuf != NULL) {
PyList_SET_ITEM(ret->references, 1, (PyObject *)params.py_indexbuf);
Py_INCREF(params.py_indexbuf);
if (py_indexbuf != NULL) {
PyList_SET_ITEM(ret->references, 1, (PyObject *)py_indexbuf);
Py_INCREF(py_indexbuf);
}
PyObject_GC_Track(ret);
@@ -121,7 +116,7 @@ static PyObject *py_Batch_new(PyTypeObject *UNUSED(type), PyObject *args, PyObje
return (PyObject *)ret;
}
PyDoc_STRVAR(py_Batch_vertbuf_add_doc,
PyDoc_STRVAR(pygpu_batch_vertbuf_add_doc,
".. method:: vertbuf_add(buf)\n"
"\n"
" Add another vertex buffer to the Batch.\n"
@@ -134,7 +129,7 @@ PyDoc_STRVAR(py_Batch_vertbuf_add_doc,
" :param buf: The vertex buffer that will be added to the batch.\n"
" :type buf: :class:`gpu.types.GPUVertBuf`\n"
);
static PyObject *py_Batch_vertbuf_add(BPyGPUBatch *self, BPyGPUVertBuf *py_buf)
static PyObject *pygpu_batch_vertbuf_add(BPyGPUBatch *self, BPyGPUVertBuf *py_buf)
{
if (!BPyGPUVertBuf_Check(py_buf)) {
PyErr_Format(PyExc_TypeError, "Expected a GPUVertBuf, got %s", Py_TYPE(py_buf)->tp_name);
@@ -167,7 +162,7 @@ static PyObject *py_Batch_vertbuf_add(BPyGPUBatch *self, BPyGPUVertBuf *py_buf)
}
PyDoc_STRVAR(
py_Batch_program_set_doc,
pygpu_batch_program_set_doc,
".. method:: program_set(program)\n"
"\n"
" Assign a shader to this batch that will be used for drawing when not overwritten later.\n"
@@ -177,7 +172,7 @@ PyDoc_STRVAR(
"\n"
" :param program: The program/shader the batch will use in future draw calls.\n"
" :type program: :class:`gpu.types.GPUShader`\n");
static PyObject *py_Batch_program_set(BPyGPUBatch *self, BPyGPUShader *py_shader)
static PyObject *pygpu_batch_program_set(BPyGPUBatch *self, BPyGPUShader *py_shader)
{
if (!BPyGPUShader_Check(py_shader)) {
PyErr_Format(PyExc_TypeError, "Expected a GPUShader, got %s", Py_TYPE(py_shader)->tp_name);
@@ -208,7 +203,7 @@ static PyObject *py_Batch_program_set(BPyGPUBatch *self, BPyGPUShader *py_shader
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_Batch_draw_doc,
PyDoc_STRVAR(pygpu_batch_draw_doc,
".. method:: draw(program=None)\n"
"\n"
" Run the drawing program with the parameters assigned to the batch.\n"
@@ -216,7 +211,7 @@ PyDoc_STRVAR(py_Batch_draw_doc,
" :param program: Program that performs the drawing operations.\n"
" If ``None`` is passed, the last program set to this batch will run.\n"
" :type program: :class:`gpu.types.GPUShader`\n");
static PyObject *py_Batch_draw(BPyGPUBatch *self, PyObject *args)
static PyObject *pygpu_batch_draw(BPyGPUBatch *self, PyObject *args)
{
BPyGPUShader *py_program = NULL;
@@ -224,7 +219,7 @@ static PyObject *py_Batch_draw(BPyGPUBatch *self, PyObject *args)
return NULL;
}
if (py_program == NULL) {
if (!py_batch_is_program_or_error(self)) {
if (!pygpu_batch_is_program_or_error(self)) {
return NULL;
}
}
@@ -236,42 +231,42 @@ static PyObject *py_Batch_draw(BPyGPUBatch *self, PyObject *args)
Py_RETURN_NONE;
}
static PyObject *py_Batch_program_use_begin(BPyGPUBatch *self)
static PyObject *pygpu_batch_program_use_begin(BPyGPUBatch *self)
{
if (!py_batch_is_program_or_error(self)) {
if (!pygpu_batch_is_program_or_error(self)) {
return NULL;
}
GPU_shader_bind(self->batch->shader);
Py_RETURN_NONE;
}
static PyObject *py_Batch_program_use_end(BPyGPUBatch *self)
static PyObject *pygpu_batch_program_use_end(BPyGPUBatch *self)
{
if (!py_batch_is_program_or_error(self)) {
if (!pygpu_batch_is_program_or_error(self)) {
return NULL;
}
GPU_shader_unbind();
Py_RETURN_NONE;
}
static struct PyMethodDef py_Batch_methods[] = {
{"vertbuf_add", (PyCFunction)py_Batch_vertbuf_add, METH_O, py_Batch_vertbuf_add_doc},
{"program_set", (PyCFunction)py_Batch_program_set, METH_O, py_Batch_program_set_doc},
{"draw", (PyCFunction)py_Batch_draw, METH_VARARGS, py_Batch_draw_doc},
{"_program_use_begin", (PyCFunction)py_Batch_program_use_begin, METH_NOARGS, ""},
{"_program_use_end", (PyCFunction)py_Batch_program_use_end, METH_NOARGS, ""},
static struct PyMethodDef pygpu_batch__tp_methods[] = {
{"vertbuf_add", (PyCFunction)pygpu_batch_vertbuf_add, METH_O, pygpu_batch_vertbuf_add_doc},
{"program_set", (PyCFunction)pygpu_batch_program_set, METH_O, pygpu_batch_program_set_doc},
{"draw", (PyCFunction)pygpu_batch_draw, METH_VARARGS, pygpu_batch_draw_doc},
{"_program_use_begin", (PyCFunction)pygpu_batch_program_use_begin, METH_NOARGS, ""},
{"_program_use_end", (PyCFunction)pygpu_batch_program_use_end, METH_NOARGS, ""},
{NULL, NULL, 0, NULL},
};
#ifdef USE_GPU_PY_REFERENCES
static int py_Batch_traverse(BPyGPUBatch *self, visitproc visit, void *arg)
static int pygpu_batch__tp_traverse(BPyGPUBatch *self, visitproc visit, void *arg)
{
Py_VISIT(self->references);
return 0;
}
static int py_Batch_clear(BPyGPUBatch *self)
static int pygpu_batch__tp_clear(BPyGPUBatch *self)
{
Py_CLEAR(self->references);
return 0;
@@ -279,14 +274,14 @@ static int py_Batch_clear(BPyGPUBatch *self)
#endif
static void py_Batch_dealloc(BPyGPUBatch *self)
static void pygpu_batch__tp_dealloc(BPyGPUBatch *self)
{
GPU_batch_discard(self->batch);
#ifdef USE_GPU_PY_REFERENCES
if (self->references) {
PyObject_GC_UnTrack(self);
py_Batch_clear(self);
pygpu_batch__tp_clear(self);
Py_XDECREF(self->references);
}
#endif
@@ -295,7 +290,7 @@ static void py_Batch_dealloc(BPyGPUBatch *self)
}
PyDoc_STRVAR(
py_gpu_batch_doc,
pygpu_batch__tp_doc,
".. class:: GPUBatch(type, buf, elem=None)\n"
"\n"
" Reusable container for drawable geometry.\n"
@@ -319,17 +314,17 @@ PyDoc_STRVAR(
PyTypeObject BPyGPUBatch_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUBatch",
.tp_basicsize = sizeof(BPyGPUBatch),
.tp_dealloc = (destructor)py_Batch_dealloc,
.tp_dealloc = (destructor)pygpu_batch__tp_dealloc,
#ifdef USE_GPU_PY_REFERENCES
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_doc = py_gpu_batch_doc,
.tp_traverse = (traverseproc)py_Batch_traverse,
.tp_clear = (inquiry)py_Batch_clear,
.tp_doc = pygpu_batch__tp_doc,
.tp_traverse = (traverseproc)pygpu_batch__tp_traverse,
.tp_clear = (inquiry)pygpu_batch__tp_clear,
#else
.tp_flags = Py_TPFLAGS_DEFAULT,
#endif
.tp_methods = py_Batch_methods,
.tp_new = py_Batch_new,
.tp_methods = pygpu_batch__tp_methods,
.tp_new = pygpu_batch__tp_new,
};
/** \} */

View File

@@ -0,0 +1,667 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the gpu.state API.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "BLI_utildefines.h"
#include "MEM_guardedalloc.h"
#include "GPU_texture.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py.h"
#include "gpu_py_buffer.h"
// #define PYGPU_BUFFER_PROTOCOL
/* -------------------------------------------------------------------- */
/** \name Utility Functions
* \{ */
static bool pygpu_buffer_dimensions_compare(int ndim,
const Py_ssize_t *shape_a,
const Py_ssize_t *shape_b)
{
return (bool)memcmp(shape_a, shape_b, ndim * sizeof(Py_ssize_t));
}
static const char *pygpu_buffer_formatstr(eGPUDataFormat data_format)
{
switch (data_format) {
case GPU_DATA_FLOAT:
return "f";
case GPU_DATA_INT:
return "i";
case GPU_DATA_UINT:
return "I";
case GPU_DATA_UBYTE:
return "B";
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
return "I";
default:
break;
}
return NULL;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name BPyGPUBuffer API
* \{ */
static BPyGPUBuffer *pygpu_buffer_make_from_data(PyObject *parent,
const eGPUDataFormat format,
const int shape_len,
const Py_ssize_t *shape,
void *buf)
{
BPyGPUBuffer *buffer = (BPyGPUBuffer *)_PyObject_GC_New(&BPyGPU_BufferType);
buffer->parent = NULL;
buffer->format = format;
buffer->shape_len = shape_len;
buffer->shape = MEM_mallocN(shape_len * sizeof(*buffer->shape), "BPyGPUBuffer shape");
memcpy(buffer->shape, shape, shape_len * sizeof(*buffer->shape));
buffer->buf.as_void = buf;
if (parent) {
Py_INCREF(parent);
buffer->parent = parent;
PyObject_GC_Track(buffer);
}
return buffer;
}
static PyObject *pygpu_buffer__sq_item(BPyGPUBuffer *self, int i)
{
if (i >= self->shape[0] || i < 0) {
PyErr_SetString(PyExc_IndexError, "array index out of range");
return NULL;
}
const char *formatstr = pygpu_buffer_formatstr(self->format);
if (self->shape_len == 1) {
switch (self->format) {
case GPU_DATA_FLOAT:
return Py_BuildValue(formatstr, self->buf.as_float[i]);
case GPU_DATA_INT:
return Py_BuildValue(formatstr, self->buf.as_int[i]);
case GPU_DATA_UBYTE:
return Py_BuildValue(formatstr, self->buf.as_byte[i]);
case GPU_DATA_UINT:
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
return Py_BuildValue(formatstr, self->buf.as_uint[i]);
}
}
else {
int offset = i * GPU_texture_dataformat_size(self->format);
for (int j = 1; j < self->shape_len; j++) {
offset *= self->shape[j];
}
return (PyObject *)pygpu_buffer_make_from_data((PyObject *)self,
self->format,
self->shape_len - 1,
self->shape + 1,
self->buf.as_byte + offset);
}
return NULL;
}
static PyObject *pygpu_buffer_to_list(BPyGPUBuffer *self)
{
int i, len = self->shape[0];
PyObject *list = PyList_New(len);
for (i = 0; i < len; i++) {
PyList_SET_ITEM(list, i, pygpu_buffer__sq_item(self, i));
}
return list;
}
static PyObject *pygpu_buffer_to_list_recursive(BPyGPUBuffer *self)
{
PyObject *list;
if (self->shape_len > 1) {
int i, len = self->shape[0];
list = PyList_New(len);
for (i = 0; i < len; i++) {
/* "BPyGPUBuffer *sub_tmp" is a temporary object created just to be read for nested lists.
* That is why it is decremented/freed soon after.
* TODO: For efficiency, avoid creating #BPyGPUBuffer when creating nested lists. */
BPyGPUBuffer *sub_tmp = (BPyGPUBuffer *)pygpu_buffer__sq_item(self, i);
PyList_SET_ITEM(list, i, pygpu_buffer_to_list_recursive(sub_tmp));
Py_DECREF(sub_tmp);
}
}
else {
list = pygpu_buffer_to_list(self);
}
return list;
}
static PyObject *pygpu_buffer_dimensions(BPyGPUBuffer *self, void *UNUSED(arg))
{
PyObject *list = PyList_New(self->shape_len);
int i;
for (i = 0; i < self->shape_len; i++) {
PyList_SET_ITEM(list, i, PyLong_FromLong(self->shape[i]));
}
return list;
}
static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg)
{
Py_VISIT(self->parent);
return 0;
}
static int pygpu_buffer__tp_clear(BPyGPUBuffer *self)
{
Py_CLEAR(self->parent);
return 0;
}
static void pygpu_buffer__tp_dealloc(BPyGPUBuffer *self)
{
if (self->parent) {
PyObject_GC_UnTrack(self);
pygpu_buffer__tp_clear(self);
Py_XDECREF(self->parent);
}
else {
MEM_freeN(self->buf.as_void);
}
MEM_freeN(self->shape);
PyObject_GC_Del(self);
}
static PyObject *pygpu_buffer__tp_repr(BPyGPUBuffer *self)
{
PyObject *repr;
PyObject *list = pygpu_buffer_to_list_recursive(self);
const char *typestr = PyC_StringEnum_FindIDFromValue(bpygpu_dataformat_items, self->format);
repr = PyUnicode_FromFormat("Buffer(%s, %R)", typestr, list);
Py_DECREF(list);
return repr;
}
static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v);
static int pygpu_buffer_ass_slice(BPyGPUBuffer *self,
Py_ssize_t begin,
Py_ssize_t end,
PyObject *seq)
{
PyObject *item;
int count, err = 0;
if (begin < 0) {
begin = 0;
}
if (end > self->shape[0]) {
end = self->shape[0];
}
if (begin > end) {
begin = end;
}
if (!PySequence_Check(seq)) {
PyErr_Format(PyExc_TypeError,
"buffer[:] = value, invalid assignment. "
"Expected a sequence, not an %.200s type",
Py_TYPE(seq)->tp_name);
return -1;
}
/* re-use count var */
if ((count = PySequence_Size(seq)) != (end - begin)) {
PyErr_Format(PyExc_TypeError,
"buffer[:] = value, size mismatch in assignment. "
"Expected: %d (given: %d)",
count,
end - begin);
return -1;
}
for (count = begin; count < end; count++) {
item = PySequence_GetItem(seq, count - begin);
if (item) {
err = pygpu_buffer__sq_ass_item(self, count, item);
Py_DECREF(item);
}
else {
err = -1;
}
if (err) {
break;
}
}
return err;
}
#define MAX_DIMENSIONS 64
static PyObject *pygpu_buffer__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
PyObject *length_ob, *init = NULL;
BPyGPUBuffer *buffer = NULL;
Py_ssize_t shape[MAX_DIMENSIONS];
Py_ssize_t i, shape_len = 0;
if (kwds && PyDict_Size(kwds)) {
PyErr_SetString(PyExc_TypeError, "Buffer(): takes no keyword args");
return NULL;
}
const struct PyC_StringEnum pygpu_dataformat = {bpygpu_dataformat_items, GPU_DATA_FLOAT};
if (!PyArg_ParseTuple(
args, "O&O|O: Buffer", PyC_ParseStringEnum, &pygpu_dataformat, &length_ob, &init)) {
return NULL;
}
if (PyLong_Check(length_ob)) {
shape_len = 1;
if (((shape[0] = PyLong_AsLong(length_ob)) < 1)) {
PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
return NULL;
}
}
else if (PySequence_Check(length_ob)) {
shape_len = PySequence_Size(length_ob);
if (shape_len > MAX_DIMENSIONS) {
PyErr_SetString(PyExc_AttributeError,
"too many dimensions, max is " STRINGIFY(MAX_DIMENSIONS));
return NULL;
}
if (shape_len < 1) {
PyErr_SetString(PyExc_AttributeError, "sequence must have at least one dimension");
return NULL;
}
for (i = 0; i < shape_len; i++) {
PyObject *ob = PySequence_GetItem(length_ob, i);
if (!PyLong_Check(ob)) {
PyErr_Format(PyExc_TypeError,
"invalid dimension %i, expected an int, not a %.200s",
i,
Py_TYPE(ob)->tp_name);
Py_DECREF(ob);
return NULL;
}
shape[i] = PyLong_AsLong(ob);
Py_DECREF(ob);
if (shape[i] < 1) {
PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
return NULL;
}
}
}
else {
PyErr_Format(PyExc_TypeError,
"invalid second argument argument expected a sequence "
"or an int, not a %.200s",
Py_TYPE(length_ob)->tp_name);
return NULL;
}
if (init && PyObject_CheckBuffer(init)) {
Py_buffer pybuffer;
if (PyObject_GetBuffer(init, &pybuffer, PyBUF_ND | PyBUF_FORMAT) == -1) {
/* PyObject_GetBuffer raise a PyExc_BufferError */
return NULL;
}
if (shape_len != pybuffer.ndim ||
!pygpu_buffer_dimensions_compare(shape_len, shape, pybuffer.shape)) {
PyErr_Format(PyExc_TypeError, "array size does not match");
}
else {
buffer = pygpu_buffer_make_from_data(
init, pygpu_dataformat.value_found, pybuffer.ndim, shape, pybuffer.buf);
}
PyBuffer_Release(&pybuffer);
}
else {
buffer = BPyGPU_Buffer_CreatePyObject(pygpu_dataformat.value_found, shape, shape_len, NULL);
if (init && pygpu_buffer_ass_slice(buffer, 0, shape[0], init)) {
Py_DECREF(buffer);
return NULL;
}
}
return (PyObject *)buffer;
}
/* BPyGPUBuffer sequence methods */
static int pygpu_buffer__sq_length(BPyGPUBuffer *self)
{
return self->shape[0];
}
static PyObject *pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end)
{
PyObject *list;
Py_ssize_t count;
if (begin < 0) {
begin = 0;
}
if (end > self->shape[0]) {
end = self->shape[0];
}
if (begin > end) {
begin = end;
}
list = PyList_New(end - begin);
for (count = begin; count < end; count++) {
PyList_SET_ITEM(list, count - begin, pygpu_buffer__sq_item(self, count));
}
return list;
}
static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v)
{
if (i >= self->shape[0] || i < 0) {
PyErr_SetString(PyExc_IndexError, "array assignment index out of range");
return -1;
}
if (self->shape_len != 1) {
BPyGPUBuffer *row = (BPyGPUBuffer *)pygpu_buffer__sq_item(self, i);
if (row) {
const int ret = pygpu_buffer_ass_slice(row, 0, self->shape[1], v);
Py_DECREF(row);
return ret;
}
return -1;
}
switch (self->format) {
case GPU_DATA_FLOAT:
return PyArg_Parse(v, "f:Expected floats", &self->buf.as_float[i]) ? 0 : -1;
case GPU_DATA_INT:
return PyArg_Parse(v, "i:Expected ints", &self->buf.as_int[i]) ? 0 : -1;
case GPU_DATA_UBYTE:
return PyArg_Parse(v, "b:Expected ints", &self->buf.as_byte[i]) ? 0 : -1;
case GPU_DATA_UINT:
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
return PyArg_Parse(v, "b:Expected ints", &self->buf.as_uint[i]) ? 0 : -1;
default:
return 0; /* should never happen */
}
}
static PyObject *pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item)
{
if (PyIndex_Check(item)) {
Py_ssize_t i;
i = PyNumber_AsSsize_t(item, PyExc_IndexError);
if (i == -1 && PyErr_Occurred()) {
return NULL;
}
if (i < 0) {
i += self->shape[0];
}
return pygpu_buffer__sq_item(self, i);
}
if (PySlice_Check(item)) {
Py_ssize_t start, stop, step, slicelength;
if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
return NULL;
}
if (slicelength <= 0) {
return PyTuple_New(0);
}
if (step == 1) {
return pygpu_buffer_slice(self, start, stop);
}
PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
return NULL;
}
PyErr_Format(
PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
return NULL;
}
static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value)
{
if (PyIndex_Check(item)) {
Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
if (i == -1 && PyErr_Occurred()) {
return -1;
}
if (i < 0) {
i += self->shape[0];
}
return pygpu_buffer__sq_ass_item(self, i, value);
}
if (PySlice_Check(item)) {
Py_ssize_t start, stop, step, slicelength;
if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
return -1;
}
if (step == 1) {
return pygpu_buffer_ass_slice(self, start, stop, value);
}
PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
return -1;
}
PyErr_Format(
PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
return -1;
}
static PyMethodDef pygpu_buffer__tp_methods[] = {
{"to_list",
(PyCFunction)pygpu_buffer_to_list_recursive,
METH_NOARGS,
"return the buffer as a list"},
{NULL, NULL, 0, NULL},
};
static PyGetSetDef pygpu_buffer_getseters[] = {
{"dimensions", (getter)pygpu_buffer_dimensions, NULL, NULL, NULL},
{NULL, NULL, NULL, NULL, NULL},
};
static PySequenceMethods pygpu_buffer__tp_as_sequence = {
(lenfunc)pygpu_buffer__sq_length, /*sq_length */
(binaryfunc)NULL, /*sq_concat */
(ssizeargfunc)NULL, /*sq_repeat */
(ssizeargfunc)pygpu_buffer__sq_item, /*sq_item */
(ssizessizeargfunc)NULL, /*sq_slice, deprecated, handled in pygpu_buffer__sq_item */
(ssizeobjargproc)pygpu_buffer__sq_ass_item, /*sq_ass_item */
(ssizessizeobjargproc)NULL, /* sq_ass_slice, deprecated handled in pygpu_buffer__sq_ass_item */
(objobjproc)NULL, /* sq_contains */
(binaryfunc)NULL, /* sq_inplace_concat */
(ssizeargfunc)NULL, /* sq_inplace_repeat */
};
static PyMappingMethods pygpu_buffer__tp_as_mapping = {
(lenfunc)pygpu_buffer__sq_length,
(binaryfunc)pygpu_buffer__mp_subscript,
(objobjargproc)pygpu_buffer__mp_ass_subscript,
};
#ifdef PYGPU_BUFFER_PROTOCOL
static void pygpu_buffer_strides_calc(const eGPUDataFormat format,
const int shape_len,
const Py_ssize_t *shape,
Py_ssize_t *r_strides)
{
r_strides[0] = GPU_texture_dataformat_size(format);
for (int i = 1; i < shape_len; i++) {
r_strides[i] = r_strides[i - 1] * shape[i - 1];
}
}
/* Here is the buffer interface function */
static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags)
{
if (view == NULL) {
PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer");
return -1;
}
view->obj = (PyObject *)self;
view->buf = (void *)self->buf.as_void;
view->len = bpygpu_Buffer_size(self);
view->readonly = 0;
view->itemsize = GPU_texture_dataformat_size(self->format);
view->format = pygpu_buffer_formatstr(self->format);
view->ndim = self->shape_len;
view->shape = self->shape;
view->strides = MEM_mallocN(view->ndim * sizeof(*view->strides), "BPyGPUBuffer strides");
pygpu_buffer_strides_calc(self->format, view->ndim, view->shape, view->strides);
view->suboffsets = NULL;
view->internal = NULL;
Py_INCREF(self);
return 0;
}
static void pygpu_buffer__bf_releasebuffer(PyObject *UNUSED(exporter), Py_buffer *view)
{
MEM_SAFE_FREE(view->strides);
}
static PyBufferProcs pygpu_buffer__tp_as_buffer = {
(getbufferproc)pygpu_buffer__bf_getbuffer,
(releasebufferproc)pygpu_buffer__bf_releasebuffer,
};
#endif
PyDoc_STRVAR(pygpu_buffer__tp_doc,
".. class:: Buffer(format, dimensions, data)\n"
"\n"
" For Python access to GPU functions requiring a pointer.\n"
"\n"
" :arg format: One of these primitive types: {\n"
" `FLOAT`,\n"
" `INT`,\n"
" `UINT`,\n"
" `UBYTE`,\n"
" `UINT_24_8`,\n"
" `10_11_11_REV`,\n"
" :type type: `str`\n"
" :arg dimensions: Array describing the dimensions.\n"
" :type dimensions: `int`\n"
" :arg data: Optional data array.\n"
" :type data: `array`\n");
PyTypeObject BPyGPU_BufferType = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "Buffer",
.tp_basicsize = sizeof(BPyGPUBuffer),
.tp_dealloc = (destructor)pygpu_buffer__tp_dealloc,
.tp_repr = (reprfunc)pygpu_buffer__tp_repr,
.tp_as_sequence = &pygpu_buffer__tp_as_sequence,
.tp_as_mapping = &pygpu_buffer__tp_as_mapping,
#ifdef PYGPU_BUFFER_PROTOCOL
.tp_as_buffer = &pygpu_buffer__tp_as_buffer,
#endif
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_doc = pygpu_buffer__tp_doc,
.tp_traverse = (traverseproc)pygpu_buffer__tp_traverse,
.tp_clear = (inquiry)pygpu_buffer__tp_clear,
.tp_methods = pygpu_buffer__tp_methods,
.tp_getset = pygpu_buffer_getseters,
.tp_new = pygpu_buffer__tp_new,
};
static size_t pygpu_buffer_calc_size(const int format,
const int shape_len,
const Py_ssize_t *shape)
{
size_t r_size = GPU_texture_dataformat_size(format);
for (int i = 0; i < shape_len; i++) {
r_size *= shape[i];
}
return r_size;
}
size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer)
{
return pygpu_buffer_calc_size(buffer->format, buffer->shape_len, buffer->shape);
}
/**
* Create a buffer object
*
* \param shape: An array of `shape_len` integers representing the size of each dimension.
* \param buffer: When not NULL holds a contiguous buffer
* with the correct format from which the buffer will be initialized
*/
BPyGPUBuffer *BPyGPU_Buffer_CreatePyObject(const int format,
const Py_ssize_t *shape,
const int shape_len,
void *buffer)
{
if (buffer == NULL) {
size_t size = pygpu_buffer_calc_size(format, shape_len, shape);
buffer = MEM_callocN(size, "BPyGPUBuffer buffer");
}
return pygpu_buffer_make_from_data(NULL, format, shape_len, shape, buffer);
}
/** \} */

View File

@@ -0,0 +1,53 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
extern PyTypeObject BPyGPU_BufferType;
#define BPyGPU_Buffer_Check(v) (Py_TYPE(v) == &BPyGPU_BufferType)
/**
* Buffer Object
*
* For Python access to GPU functions requiring a pointer.
*/
typedef struct BPyGPUBuffer {
PyObject_VAR_HEAD PyObject *parent;
int format;
int shape_len;
Py_ssize_t *shape;
union {
char *as_byte;
int *as_int;
uint *as_uint;
float *as_float;
void *as_void;
} buf;
} BPyGPUBuffer;
size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer);
BPyGPUBuffer *BPyGPU_Buffer_CreatePyObject(const int format,
const Py_ssize_t *shape,
const int shape_len,
void *buffer);

View File

@@ -32,24 +32,22 @@
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "gpu_py_api.h"
#include "gpu_py.h"
#include "gpu_py_element.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name IndexBuf Type
* \{ */
static PyObject *py_IndexBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
static PyObject *pygpu_IndexBuf__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
const char *error_prefix = "IndexBuf.__new__";
bool ok = true;
struct {
GPUPrimType type_id;
PyObject *seq;
} params;
struct PyC_StringEnum prim_type = {bpygpu_primtype_items, GPU_PRIM_NONE};
PyObject *seq;
uint verts_per_prim;
uint index_len;
@@ -58,11 +56,11 @@ static PyObject *py_IndexBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyO
static const char *_keywords[] = {"type", "seq", NULL};
static _PyArg_Parser _parser = {"$O&O:IndexBuf.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, bpygpu_ParsePrimType, &params.type_id, &params.seq)) {
args, kwds, &_parser, PyC_ParseStringEnum, &prim_type, &seq)) {
return NULL;
}
verts_per_prim = GPU_indexbuf_primitive_len(params.type_id);
verts_per_prim = GPU_indexbuf_primitive_len(prim_type.value_found);
if (verts_per_prim == -1) {
PyErr_Format(PyExc_ValueError,
"The argument 'type' must be "
@@ -70,10 +68,10 @@ static PyObject *py_IndexBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyO
return NULL;
}
if (PyObject_CheckBuffer(params.seq)) {
if (PyObject_CheckBuffer(seq)) {
Py_buffer pybuffer;
if (PyObject_GetBuffer(params.seq, &pybuffer, PyBUF_FORMAT | PyBUF_ND) == -1) {
if (PyObject_GetBuffer(seq, &pybuffer, PyBUF_FORMAT | PyBUF_ND) == -1) {
/* PyObject_GetBuffer already handles error messages. */
return NULL;
}
@@ -97,7 +95,7 @@ static PyObject *py_IndexBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyO
/* The `vertex_len` parameter is only used for asserts in the Debug build. */
/* Not very useful in python since scripts are often tested in Release build. */
/* Use `INT_MAX` instead of the actual number of vertices. */
GPU_indexbuf_init(&builder, params.type_id, index_len, INT_MAX);
GPU_indexbuf_init(&builder, prim_type.value_found, index_len, INT_MAX);
#if 0
uint *buf = pybuffer.buf;
@@ -111,7 +109,7 @@ static PyObject *py_IndexBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyO
PyBuffer_Release(&pybuffer);
}
else {
PyObject *seq_fast = PySequence_Fast(params.seq, error_prefix);
PyObject *seq_fast = PySequence_Fast(seq, error_prefix);
if (seq_fast == NULL) {
return false;
@@ -126,7 +124,7 @@ static PyObject *py_IndexBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyO
/* The `vertex_len` parameter is only used for asserts in the Debug build. */
/* Not very useful in python since scripts are often tested in Release build. */
/* Use `INT_MAX` instead of the actual number of vertices. */
GPU_indexbuf_init(&builder, params.type_id, index_len, INT_MAX);
GPU_indexbuf_init(&builder, prim_type.value_found, index_len, INT_MAX);
if (verts_per_prim == 1) {
for (uint i = 0; i < seq_len; i++) {
@@ -175,13 +173,13 @@ static PyObject *py_IndexBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyO
return BPyGPUIndexBuf_CreatePyObject(GPU_indexbuf_build(&builder));
}
static void py_IndexBuf_dealloc(BPyGPUIndexBuf *self)
static void pygpu_IndexBuf__tp_dealloc(BPyGPUIndexBuf *self)
{
GPU_indexbuf_discard(self->elem);
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(py_gpu_element_doc,
PyDoc_STRVAR(pygpu_IndexBuf__tp_doc,
".. class:: GPUIndexBuf(type, seq)\n"
"\n"
" Contains an index buffer.\n"
@@ -199,10 +197,10 @@ PyDoc_STRVAR(py_gpu_element_doc,
PyTypeObject BPyGPUIndexBuf_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUIndexBuf",
.tp_basicsize = sizeof(BPyGPUIndexBuf),
.tp_dealloc = (destructor)py_IndexBuf_dealloc,
.tp_dealloc = (destructor)pygpu_IndexBuf__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_gpu_element_doc,
.tp_new = py_IndexBuf_new,
.tp_doc = pygpu_IndexBuf__tp_doc,
.tp_new = pygpu_IndexBuf__tp_new,
};
/** \} */

View File

@@ -0,0 +1,546 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the framebuffer functionalities of the 'gpu' module
* used for off-screen OpenGL rendering.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "GPU_context.h"
#include "GPU_framebuffer.h"
#include "GPU_init_exit.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "../mathutils/mathutils.h"
#include "gpu_py.h"
#include "gpu_py_texture.h"
#include "gpu_py_framebuffer.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPUFrameBuffer Common Utilities
* \{ */
static int pygpu_framebuffer_valid_check(BPyGPUFrameBuffer *bpygpu_fb)
{
if (UNLIKELY(bpygpu_fb->fb == NULL)) {
PyErr_SetString(PyExc_ReferenceError,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
"GPU framebuffer was freed, no further access is valid"
#else
"GPU framebuffer: internal error"
#endif
);
return -1;
}
return 0;
}
#define PYGPU_FRAMEBUFFER_CHECK_OBJ(bpygpu) \
{ \
if (UNLIKELY(pygpu_framebuffer_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} \
((void)0)
static void pygpu_framebuffer_free_if_possible(GPUFrameBuffer *fb)
{
if (!fb) {
return;
}
if (GPU_is_init()) {
GPU_framebuffer_free(fb);
}
else {
printf("PyFramebuffer freed after the context has been destroyed.\n");
}
}
/* Keep less than or equal to #FRAMEBUFFER_STACK_DEPTH */
#define GPU_PY_FRAMEBUFFER_STACK_LEN 16
static bool pygpu_framebuffer_stack_push_and_bind_or_error(GPUFrameBuffer *fb)
{
if (GPU_framebuffer_stack_level_get() >= GPU_PY_FRAMEBUFFER_STACK_LEN) {
PyErr_SetString(
PyExc_RuntimeError,
"Maximum framebuffer stack depth " STRINGIFY(GPU_PY_FRAMEBUFFER_STACK_LEN) " reached");
return false;
}
GPU_framebuffer_push(GPU_framebuffer_active_get());
GPU_framebuffer_bind(fb);
return true;
}
static bool pygpu_framebuffer_stack_pop_and_restore_or_error(GPUFrameBuffer *fb)
{
if (GPU_framebuffer_stack_level_get() == 0) {
PyErr_SetString(PyExc_RuntimeError, "Minimum framebuffer stack depth reached");
return false;
}
if (fb && !GPU_framebuffer_bound(fb)) {
PyErr_SetString(PyExc_RuntimeError, "Framebuffer is not bound");
return false;
}
GPUFrameBuffer *fb_prev = GPU_framebuffer_pop();
GPU_framebuffer_bind(fb_prev);
return true;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Stack (Context Manager)
*
* Safer alternative to ensure balanced push/pop calls.
*
* \{ */
typedef struct {
PyObject_HEAD /* required python macro */
BPyGPUFrameBuffer *py_fb;
int level;
} PyFrameBufferStackContext;
static void pygpu_framebuffer_stack_context__tp_dealloc(PyFrameBufferStackContext *self)
{
Py_DECREF(self->py_fb);
PyObject_DEL(self);
}
static PyObject *pygpu_framebuffer_stack_context_enter(PyFrameBufferStackContext *self)
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self->py_fb);
/* sanity - should never happen */
if (self->level != -1) {
PyErr_SetString(PyExc_RuntimeError, "Already in use");
return NULL;
}
if (!pygpu_framebuffer_stack_push_and_bind_or_error(self->py_fb->fb)) {
return NULL;
}
self->level = GPU_framebuffer_stack_level_get();
Py_RETURN_NONE;
}
static PyObject *pygpu_framebuffer_stack_context_exit(PyFrameBufferStackContext *self,
PyObject *UNUSED(args))
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self->py_fb);
/* sanity - should never happen */
if (self->level == -1) {
fprintf(stderr, "Not yet in use\n");
return NULL;
}
const int level = GPU_framebuffer_stack_level_get();
if (level != self->level) {
fprintf(stderr, "Level of bind mismatch, expected %d, got %d\n", self->level, level);
}
if (!pygpu_framebuffer_stack_pop_and_restore_or_error(self->py_fb->fb)) {
return NULL;
}
Py_RETURN_NONE;
}
static PyMethodDef pygpu_framebuffer_stack_context__tp_methods[] = {
{"__enter__", (PyCFunction)pygpu_framebuffer_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)pygpu_framebuffer_stack_context_exit, METH_VARARGS},
{NULL},
};
static PyTypeObject FramebufferStackContext_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBufferStackContext",
.tp_basicsize = sizeof(PyFrameBufferStackContext),
.tp_dealloc = (destructor)pygpu_framebuffer_stack_context__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = pygpu_framebuffer_stack_context__tp_methods,
};
PyDoc_STRVAR(pygpu_framebuffer_bind_doc,
".. function:: bind()\n"
"\n"
" Context manager to ensure balanced bind calls, even in the case of an error.\n");
static PyObject *pygpu_framebuffer_bind(BPyGPUFrameBuffer *self)
{
PyFrameBufferStackContext *ret = PyObject_New(PyFrameBufferStackContext,
&FramebufferStackContext_Type);
ret->py_fb = self;
ret->level = -1;
Py_INCREF(self);
return (PyObject *)ret;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUFramebuffer Type
* \{ */
/* Fill in the GPUAttachment according to the PyObject parameter.
* PyObject *o can be NULL, Py_None, BPyGPUTexture or a dictionary containing the keyword "texture"
* and the optional keywords "layer" and "mip".
* Returns false on error. In this case, a python message will be raised and GPUAttachment will not
* be touched. */
static bool pygpu_framebuffer_new_parse_arg(PyObject *o, GPUAttachment *r_attach)
{
GPUAttachment tmp_attach = GPU_ATTACHMENT_NONE;
if (!o || o == Py_None) {
/* Pass. */;
}
else if (BPyGPUTexture_Check(o)) {
if (!bpygpu_ParseTexture(o, &tmp_attach.tex)) {
return false;
}
}
else {
const char *c_texture = "texture";
const char *c_layer = "layer";
const char *c_mip = "mip";
PyObject *key, *value;
Py_ssize_t pos = 0;
while (PyDict_Next(o, &pos, &key, &value)) {
if (!PyUnicode_Check(key)) {
PyErr_SetString(PyExc_TypeError, "keywords must be strings");
return false;
}
if (c_texture && _PyUnicode_EqualToASCIIString(key, c_texture)) {
/* Compare only once. */
c_texture = NULL;
if (!bpygpu_ParseTexture(value, &tmp_attach.tex)) {
return false;
}
}
else if (c_layer && _PyUnicode_EqualToASCIIString(key, c_layer)) {
/* Compare only once. */
c_layer = NULL;
tmp_attach.layer = PyLong_AsLong(value);
if (tmp_attach.layer == -1 && PyErr_Occurred()) {
return false;
}
}
else if (c_mip && _PyUnicode_EqualToASCIIString(key, c_mip)) {
/* Compare only once. */
c_mip = NULL;
tmp_attach.mip = PyLong_AsLong(value);
if (tmp_attach.mip == -1 && PyErr_Occurred()) {
return false;
}
}
else {
PyErr_Format(
PyExc_TypeError, "'%U' is an invalid keyword argument for this attribute", key);
return false;
}
}
}
*r_attach = tmp_attach;
return true;
}
static PyObject *pygpu_framebuffer__tp_new(PyTypeObject *UNUSED(self),
PyObject *args,
PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
if (!GPU_context_active_get()) {
PyErr_SetString(PyExc_RuntimeError, "No active GPU context found");
return NULL;
}
PyObject *depth_attachment = NULL;
PyObject *color_attachements = NULL;
static const char *_keywords[] = {"depth_slot", "color_slots", NULL};
static _PyArg_Parser _parser = {"|$OO:GPUFrameBuffer.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, &depth_attachment, &color_attachements)) {
return NULL;
}
/* Keep in sync with #GPU_FB_MAX_COLOR_ATTACHMENT.
* TODO: share the define. */
#define BPYGPU_FB_MAX_COLOR_ATTACHMENT 6
GPUAttachment config[BPYGPU_FB_MAX_COLOR_ATTACHMENT + 1];
if (!pygpu_framebuffer_new_parse_arg(depth_attachment, &config[0])) {
return NULL;
}
if (config[0].tex && !GPU_texture_depth(config[0].tex)) {
PyErr_SetString(PyExc_ValueError, "Depth texture with incompatible format");
return NULL;
}
int color_attachements_len = 0;
if (color_attachements && color_attachements != Py_None) {
if (PySequence_Check(color_attachements)) {
color_attachements_len = PySequence_Size(color_attachements);
if (color_attachements_len > BPYGPU_FB_MAX_COLOR_ATTACHMENT) {
PyErr_SetString(
PyExc_AttributeError,
"too many attachements, max is " STRINGIFY(BPYGPU_FB_MAX_COLOR_ATTACHMENT));
return NULL;
}
for (int i = 1; i <= color_attachements_len; i++) {
PyObject *o = PySequence_GetItem(color_attachements, i);
bool ok = pygpu_framebuffer_new_parse_arg(o, &config[i]);
Py_DECREF(o);
if (!ok) {
return NULL;
}
}
}
else {
if (!pygpu_framebuffer_new_parse_arg(color_attachements, &config[1])) {
return NULL;
}
color_attachements_len = 1;
}
}
GPUFrameBuffer *fb_python = GPU_framebuffer_create("fb_python");
GPU_framebuffer_config_array(fb_python, config, color_attachements_len + 1);
return BPyGPUFrameBuffer_CreatePyObject(fb_python);
}
PyDoc_STRVAR(pygpu_framebuffer_is_bound_doc,
"Checks if this is the active framebuffer in the context.");
static PyObject *pygpu_framebuffer_is_bound(BPyGPUFrameBuffer *self, void *UNUSED(type))
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self);
return PyBool_FromLong(GPU_framebuffer_bound(self->fb));
}
PyDoc_STRVAR(pygpu_framebuffer_clear_doc,
".. method:: clear(color=None, depth=None, stencil=None)\n"
"\n"
" Fill color, depth and stencil textures with specific value.\n"
" Common values: color=(0.0, 0.0, 0.0, 1.0), depth=1.0, stencil=0.\n"
"\n"
" :arg color: float sequence each representing ``(r, g, b, a)``.\n"
" :type color: sequence of 3 or 4 floats\n"
" :arg depth: depth value.\n"
" :type depth: `float`\n"
" :arg stencil: stencil value.\n"
" :type stencil: `int`\n");
static PyObject *pygpu_framebuffer_clear(BPyGPUFrameBuffer *self, PyObject *args, PyObject *kwds)
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self);
if (!GPU_framebuffer_bound(self->fb)) {
return NULL;
}
PyObject *py_col = NULL;
PyObject *py_depth = NULL;
PyObject *py_stencil = NULL;
static const char *_keywords[] = {"color", "depth", "stencil", NULL};
static _PyArg_Parser _parser = {"|$OOO:clear", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &py_col, &py_depth, &py_stencil)) {
return NULL;
}
eGPUFrameBufferBits buffers = 0;
float col[4] = {0.0f, 0.0f, 0.0f, 1.0f};
float depth = 1.0f;
uint stencil = 0;
if (py_col && py_col != Py_None) {
if (mathutils_array_parse(col, 3, 4, py_col, "GPUFrameBuffer.clear(), invalid 'color' arg") ==
-1) {
return NULL;
}
buffers |= GPU_COLOR_BIT;
}
if (py_depth && py_depth != Py_None) {
depth = PyFloat_AsDouble(py_depth);
if (PyErr_Occurred()) {
return NULL;
}
buffers |= GPU_DEPTH_BIT;
}
if (py_stencil && py_stencil != Py_None) {
if ((stencil = PyC_Long_AsU32(py_stencil)) == (uint)-1) {
return NULL;
}
buffers |= GPU_STENCIL_BIT;
}
GPU_framebuffer_clear(self->fb, buffers, col, depth, stencil);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_framebuffer_viewport_set_doc,
".. function:: viewport_set(x, y, xsize, ysize)\n"
"\n"
" Set the viewport for this framebuffer object.\n"
" Note: The viewport state is not saved upon framebuffer rebind.\n"
"\n"
" :param x, y: lower left corner of the viewport_set rectangle, in pixels.\n"
" :param xsize, ysize: width and height of the viewport_set.\n"
" :type x, y, xsize, ysize: `int`\n");
static PyObject *pygpu_framebuffer_viewport_set(BPyGPUFrameBuffer *self,
PyObject *args,
void *UNUSED(type))
{
int x, y, xsize, ysize;
if (!PyArg_ParseTuple(args, "iiii:viewport_set", &x, &y, &xsize, &ysize)) {
return NULL;
}
GPU_framebuffer_viewport_set(self->fb, x, y, xsize, ysize);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_framebuffer_viewport_get_doc,
".. function:: viewport_get()\n"
"\n"
" Returns position and dimension to current viewport.\n");
static PyObject *pygpu_framebuffer_viewport_get(BPyGPUFrameBuffer *self, void *UNUSED(type))
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self);
int viewport[4];
GPU_framebuffer_viewport_get(self->fb, viewport);
PyObject *ret = PyTuple_New(4);
PyTuple_SET_ITEMS(ret,
PyLong_FromLong(viewport[0]),
PyLong_FromLong(viewport[1]),
PyLong_FromLong(viewport[2]),
PyLong_FromLong(viewport[3]));
return ret;
}
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
PyDoc_STRVAR(pygpu_framebuffer_free_doc,
".. method:: free()\n"
"\n"
" Free the framebuffer object.\n"
" The framebuffer will no longer be accessible.\n");
static PyObject *pygpu_framebuffer_free(BPyGPUFrameBuffer *self)
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self);
pygpu_framebuffer_free_if_possible(self->fb);
self->fb = NULL;
Py_RETURN_NONE;
}
#endif
static void BPyGPUFrameBuffer__tp_dealloc(BPyGPUFrameBuffer *self)
{
pygpu_framebuffer_free_if_possible(self->fb);
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef pygpu_framebuffer__tp_getseters[] = {
{"is_bound",
(getter)pygpu_framebuffer_is_bound,
(setter)NULL,
pygpu_framebuffer_is_bound_doc,
NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef pygpu_framebuffer__tp_methods[] = {
{"bind", (PyCFunction)pygpu_framebuffer_bind, METH_NOARGS, pygpu_framebuffer_bind_doc},
{"clear",
(PyCFunction)pygpu_framebuffer_clear,
METH_VARARGS | METH_KEYWORDS,
pygpu_framebuffer_clear_doc},
{"viewport_set",
(PyCFunction)pygpu_framebuffer_viewport_set,
METH_NOARGS,
pygpu_framebuffer_viewport_set_doc},
{"viewport_get",
(PyCFunction)pygpu_framebuffer_viewport_get,
METH_VARARGS,
pygpu_framebuffer_viewport_get_doc},
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
{"free", (PyCFunction)pygpu_framebuffer_free, METH_NOARGS, pygpu_framebuffer_free_doc},
#endif
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(pygpu_framebuffer__tp_doc,
".. class:: GPUFrameBuffer(depth_slot=None, color_slots=None)\n"
"\n"
" This object gives access to framebuffer functionallities.\n"
" When a 'layer' is specified in a argument, a single layer of a 3D or array "
"texture is attached to the frame-buffer.\n"
" For cube map textures, layer is translated into a cube map face.\n"
"\n"
" :arg depth_slot: GPUTexture to attach or a `dict` containing keywords: "
"'texture', 'layer' and 'mip'.\n"
" :type depth_slot: :class:`gpu.types.GPUTexture`, `dict` or `Nonetype`\n"
" :arg color_slots: Tuple where each item can be a GPUTexture or a `dict` "
"containing keywords: 'texture', 'layer' and 'mip'.\n"
" :type color_slots: `tuple` or `Nonetype`\n");
PyTypeObject BPyGPUFrameBuffer_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBuffer",
.tp_basicsize = sizeof(BPyGPUFrameBuffer),
.tp_dealloc = (destructor)BPyGPUFrameBuffer__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = pygpu_framebuffer__tp_doc,
.tp_methods = pygpu_framebuffer__tp_methods,
.tp_getset = pygpu_framebuffer__tp_getseters,
.tp_new = pygpu_framebuffer__tp_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUFrameBuffer_CreatePyObject(GPUFrameBuffer *fb)
{
BPyGPUFrameBuffer *self;
self = PyObject_New(BPyGPUFrameBuffer, &BPyGPUFrameBuffer_Type);
self->fb = fb;
return (PyObject *)self;
}
/** \} */
#undef PYGPU_FRAMEBUFFER_CHECK_OBJ

View File

@@ -0,0 +1,33 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
#include "BLI_compiler_attrs.h"
extern PyTypeObject BPyGPUFrameBuffer_Type;
#define BPyGPUFrameBuffer_Check(v) (Py_TYPE(v) == &BPyGPUFrameBuffer_Type)
typedef struct BPyGPUFrameBuffer {
PyObject_HEAD struct GPUFrameBuffer *fb;
} BPyGPUFrameBuffer;
PyObject *BPyGPUFrameBuffer_CreatePyObject(struct GPUFrameBuffer *fb) ATTR_NONNULL(1);

View File

@@ -44,7 +44,7 @@
/** \name Helper Functions
* \{ */
static bool py_stack_is_push_model_view_ok_or_error(void)
static bool pygpu_stack_is_push_model_view_ok_or_error(void)
{
if (GPU_matrix_stack_level_get_model_view() >= GPU_PY_MATRIX_STACK_LEN) {
PyErr_SetString(
@@ -55,7 +55,7 @@ static bool py_stack_is_push_model_view_ok_or_error(void)
return true;
}
static bool py_stack_is_push_projection_ok_or_error(void)
static bool pygpu_stack_is_push_projection_ok_or_error(void)
{
if (GPU_matrix_stack_level_get_projection() >= GPU_PY_MATRIX_STACK_LEN) {
PyErr_SetString(
@@ -66,7 +66,7 @@ static bool py_stack_is_push_projection_ok_or_error(void)
return true;
}
static bool py_stack_is_pop_model_view_ok_or_error(void)
static bool pygpu_stack_is_pop_model_view_ok_or_error(void)
{
if (GPU_matrix_stack_level_get_model_view() == 0) {
PyErr_SetString(PyExc_RuntimeError, "Minimum model-view stack depth reached");
@@ -75,7 +75,7 @@ static bool py_stack_is_pop_model_view_ok_or_error(void)
return true;
}
static bool py_stack_is_pop_projection_ok_or_error(void)
static bool pygpu_stack_is_pop_projection_ok_or_error(void)
{
if (GPU_matrix_stack_level_get_projection() == 0) {
PyErr_SetString(PyExc_RuntimeError, "Minimum projection stack depth reached");
@@ -90,52 +90,52 @@ static bool py_stack_is_pop_projection_ok_or_error(void)
/** \name Manage Stack
* \{ */
PyDoc_STRVAR(py_matrix_push_doc,
PyDoc_STRVAR(pygpu_matrix_push_doc,
".. function:: push()\n"
"\n"
" Add to the model-view matrix stack.\n");
static PyObject *py_matrix_push(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_push(PyObject *UNUSED(self))
{
if (!py_stack_is_push_model_view_ok_or_error()) {
if (!pygpu_stack_is_push_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_push();
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_pop_doc,
PyDoc_STRVAR(pygpu_matrix_pop_doc,
".. function:: pop()\n"
"\n"
" Remove the last model-view matrix from the stack.\n");
static PyObject *py_matrix_pop(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_pop(PyObject *UNUSED(self))
{
if (!py_stack_is_pop_model_view_ok_or_error()) {
if (!pygpu_stack_is_pop_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_pop();
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_push_projection_doc,
PyDoc_STRVAR(pygpu_matrix_push_projection_doc,
".. function:: push_projection()\n"
"\n"
" Add to the projection matrix stack.\n");
static PyObject *py_matrix_push_projection(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_push_projection(PyObject *UNUSED(self))
{
if (!py_stack_is_push_projection_ok_or_error()) {
if (!pygpu_stack_is_push_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_push_projection();
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_pop_projection_doc,
PyDoc_STRVAR(pygpu_matrix_pop_projection_doc,
".. function:: pop_projection()\n"
"\n"
" Remove the last projection matrix from the stack.\n");
static PyObject *py_matrix_pop_projection(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_pop_projection(PyObject *UNUSED(self))
{
if (!py_stack_is_pop_projection_ok_or_error()) {
if (!pygpu_stack_is_pop_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_pop_projection();
@@ -162,23 +162,23 @@ enum {
PYGPU_MATRIX_TYPE_PROJECTION = 2,
};
static PyObject *py_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self);
static PyObject *py_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self, PyObject *args);
static PyObject *pygpu_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self);
static PyObject *pygpu_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self, PyObject *args);
static PyMethodDef py_matrix_stack_context_methods[] = {
{"__enter__", (PyCFunction)py_matrix_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)py_matrix_stack_context_exit, METH_VARARGS},
static PyMethodDef pygpu_matrix_stack_context__tp_methods[] = {
{"__enter__", (PyCFunction)pygpu_matrix_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)pygpu_matrix_stack_context_exit, METH_VARARGS},
{NULL},
};
static PyTypeObject BPyGPU_matrix_stack_context_Type = {
static PyTypeObject PyGPUMatrixStackContext_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUMatrixStackContext",
.tp_basicsize = sizeof(BPyGPU_MatrixStackContext),
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = py_matrix_stack_context_methods,
.tp_methods = pygpu_matrix_stack_context__tp_methods,
};
static PyObject *py_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self)
static PyObject *pygpu_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self)
{
/* sanity - should never happen */
if (self->level != -1) {
@@ -187,14 +187,14 @@ static PyObject *py_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self)
}
if (self->type == PYGPU_MATRIX_TYPE_MODEL_VIEW) {
if (!py_stack_is_push_model_view_ok_or_error()) {
if (!pygpu_stack_is_push_model_view_ok_or_error()) {
return NULL;
}
GPU_matrix_push();
self->level = GPU_matrix_stack_level_get_model_view();
}
else if (self->type == PYGPU_MATRIX_TYPE_PROJECTION) {
if (!py_stack_is_push_projection_ok_or_error()) {
if (!pygpu_stack_is_push_projection_ok_or_error()) {
return NULL;
}
GPU_matrix_push_projection();
@@ -206,8 +206,8 @@ static PyObject *py_matrix_stack_context_enter(BPyGPU_MatrixStackContext *self)
Py_RETURN_NONE;
}
static PyObject *py_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self,
PyObject *UNUSED(args))
static PyObject *pygpu_matrix_stack_context_exit(BPyGPU_MatrixStackContext *self,
PyObject *UNUSED(args))
{
/* sanity - should never happen */
if (self->level == -1) {
@@ -240,33 +240,33 @@ finally:
Py_RETURN_NONE;
}
static PyObject *py_matrix_push_pop_impl(int type)
static PyObject *pygpu_matrix_push_pop_impl(int type)
{
BPyGPU_MatrixStackContext *ret = PyObject_New(BPyGPU_MatrixStackContext,
&BPyGPU_matrix_stack_context_Type);
&PyGPUMatrixStackContext_Type);
ret->type = type;
ret->level = -1;
return (PyObject *)ret;
}
PyDoc_STRVAR(
py_matrix_push_pop_doc,
pygpu_matrix_push_pop_doc,
".. function:: push_pop()\n"
"\n"
" Context manager to ensure balanced push/pop calls, even in the case of an error.\n");
static PyObject *py_matrix_push_pop(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_push_pop(PyObject *UNUSED(self))
{
return py_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_MODEL_VIEW);
return pygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_MODEL_VIEW);
}
PyDoc_STRVAR(
py_matrix_push_pop_projection_doc,
pygpu_matrix_push_pop_projection_doc,
".. function:: push_pop_projection()\n"
"\n"
" Context manager to ensure balanced push/pop calls, even in the case of an error.\n");
static PyObject *py_matrix_push_pop_projection(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_push_pop_projection(PyObject *UNUSED(self))
{
return py_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_PROJECTION);
return pygpu_matrix_push_pop_impl(PYGPU_MATRIX_TYPE_PROJECTION);
}
/** \} */
@@ -275,14 +275,14 @@ static PyObject *py_matrix_push_pop_projection(PyObject *UNUSED(self))
/** \name Manipulate State
* \{ */
PyDoc_STRVAR(py_matrix_multiply_matrix_doc,
PyDoc_STRVAR(pygpu_matrix_multiply_matrix_doc,
".. function:: multiply_matrix(matrix)\n"
"\n"
" Multiply the current stack matrix.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n");
static PyObject *py_matrix_multiply_matrix(PyObject *UNUSED(self), PyObject *value)
static PyObject *pygpu_matrix_multiply_matrix(PyObject *UNUSED(self), PyObject *value)
{
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
@@ -292,14 +292,14 @@ static PyObject *py_matrix_multiply_matrix(PyObject *UNUSED(self), PyObject *val
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_scale_doc,
PyDoc_STRVAR(pygpu_matrix_scale_doc,
".. function:: scale(scale)\n"
"\n"
" Scale the current stack matrix.\n"
"\n"
" :param scale: Scale the current stack matrix.\n"
" :type scale: sequence of 2 or 3 floats\n");
static PyObject *py_matrix_scale(PyObject *UNUSED(self), PyObject *value)
static PyObject *pygpu_matrix_scale(PyObject *UNUSED(self), PyObject *value)
{
float scale[3];
int len;
@@ -316,12 +316,12 @@ static PyObject *py_matrix_scale(PyObject *UNUSED(self), PyObject *value)
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_scale_uniform_doc,
PyDoc_STRVAR(pygpu_matrix_scale_uniform_doc,
".. function:: scale_uniform(scale)\n"
"\n"
" :param scale: Scale the current stack matrix.\n"
" :type scale: float\n");
static PyObject *py_matrix_scale_uniform(PyObject *UNUSED(self), PyObject *value)
static PyObject *pygpu_matrix_scale_uniform(PyObject *UNUSED(self), PyObject *value)
{
float scalar;
if ((scalar = PyFloat_AsDouble(value)) == -1.0f && PyErr_Occurred()) {
@@ -332,14 +332,14 @@ static PyObject *py_matrix_scale_uniform(PyObject *UNUSED(self), PyObject *value
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_translate_doc,
PyDoc_STRVAR(pygpu_matrix_translate_doc,
".. function:: translate(offset)\n"
"\n"
" Scale the current stack matrix.\n"
"\n"
" :param offset: Translate the current stack matrix.\n"
" :type offset: sequence of 2 or 3 floats\n");
static PyObject *py_matrix_translate(PyObject *UNUSED(self), PyObject *value)
static PyObject *pygpu_matrix_translate(PyObject *UNUSED(self), PyObject *value)
{
float offset[3];
int len;
@@ -362,34 +362,34 @@ static PyObject *py_matrix_translate(PyObject *UNUSED(self), PyObject *value)
/** \name Write State
* \{ */
PyDoc_STRVAR(py_matrix_reset_doc,
PyDoc_STRVAR(pygpu_matrix_reset_doc,
".. function:: reset()\n"
"\n"
" Empty stack and set to identity.\n");
static PyObject *py_matrix_reset(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_reset(PyObject *UNUSED(self))
{
GPU_matrix_reset();
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_load_identity_doc,
PyDoc_STRVAR(pygpu_matrix_load_identity_doc,
".. function:: load_identity()\n"
"\n"
" Empty stack and set to identity.\n");
static PyObject *py_matrix_load_identity(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_load_identity(PyObject *UNUSED(self))
{
GPU_matrix_identity_set();
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_load_matrix_doc,
PyDoc_STRVAR(pygpu_matrix_load_matrix_doc,
".. function:: load_matrix(matrix)\n"
"\n"
" Load a matrix into the stack.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n");
static PyObject *py_matrix_load_matrix(PyObject *UNUSED(self), PyObject *value)
static PyObject *pygpu_matrix_load_matrix(PyObject *UNUSED(self), PyObject *value)
{
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
@@ -399,14 +399,14 @@ static PyObject *py_matrix_load_matrix(PyObject *UNUSED(self), PyObject *value)
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_matrix_load_projection_matrix_doc,
PyDoc_STRVAR(pygpu_matrix_load_projection_matrix_doc,
".. function:: load_projection_matrix(matrix)\n"
"\n"
" Load a projection matrix into the stack.\n"
"\n"
" :param matrix: A 4x4 matrix.\n"
" :type matrix: :class:`mathutils.Matrix`\n");
static PyObject *py_matrix_load_projection_matrix(PyObject *UNUSED(self), PyObject *value)
static PyObject *pygpu_matrix_load_projection_matrix(PyObject *UNUSED(self), PyObject *value)
{
MatrixObject *pymat;
if (!Matrix_Parse4x4(value, &pymat)) {
@@ -422,42 +422,42 @@ static PyObject *py_matrix_load_projection_matrix(PyObject *UNUSED(self), PyObje
/** \name Read State
* \{ */
PyDoc_STRVAR(py_matrix_get_projection_matrix_doc,
PyDoc_STRVAR(pygpu_matrix_get_projection_matrix_doc,
".. function:: get_projection_matrix()\n"
"\n"
" Return a copy of the projection matrix.\n"
"\n"
" :return: A 4x4 projection matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n");
static PyObject *py_matrix_get_projection_matrix(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_get_projection_matrix(PyObject *UNUSED(self))
{
float matrix[4][4];
GPU_matrix_projection_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 4, 4, NULL);
}
PyDoc_STRVAR(py_matrix_get_model_view_matrix_doc,
PyDoc_STRVAR(pygpu_matrix_get_model_view_matrix_doc,
".. function:: get_model_view_matrix()\n"
"\n"
" Return a copy of the model-view matrix.\n"
"\n"
" :return: A 4x4 view matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n");
static PyObject *py_matrix_get_model_view_matrix(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_get_model_view_matrix(PyObject *UNUSED(self))
{
float matrix[4][4];
GPU_matrix_model_view_get(matrix);
return Matrix_CreatePyObject(&matrix[0][0], 4, 4, NULL);
}
PyDoc_STRVAR(py_matrix_get_normal_matrix_doc,
PyDoc_STRVAR(pygpu_matrix_get_normal_matrix_doc,
".. function:: get_normal_matrix()\n"
"\n"
" Return a copy of the normal matrix.\n"
"\n"
" :return: A 3x3 normal matrix.\n"
" :rtype: :class:`mathutils.Matrix`\n");
static PyObject *py_matrix_get_normal_matrix(PyObject *UNUSED(self))
static PyObject *pygpu_matrix_get_normal_matrix(PyObject *UNUSED(self))
{
float matrix[3][3];
GPU_matrix_normal_get(matrix);
@@ -470,87 +470,90 @@ static PyObject *py_matrix_get_normal_matrix(PyObject *UNUSED(self))
/** \name Module
* \{ */
static struct PyMethodDef py_matrix_methods[] = {
static struct PyMethodDef pygpu_matrix__tp_methods[] = {
/* Manage Stack */
{"push", (PyCFunction)py_matrix_push, METH_NOARGS, py_matrix_push_doc},
{"pop", (PyCFunction)py_matrix_pop, METH_NOARGS, py_matrix_pop_doc},
{"push", (PyCFunction)pygpu_matrix_push, METH_NOARGS, pygpu_matrix_push_doc},
{"pop", (PyCFunction)pygpu_matrix_pop, METH_NOARGS, pygpu_matrix_pop_doc},
{"push_projection",
(PyCFunction)py_matrix_push_projection,
(PyCFunction)pygpu_matrix_push_projection,
METH_NOARGS,
py_matrix_push_projection_doc},
pygpu_matrix_push_projection_doc},
{"pop_projection",
(PyCFunction)py_matrix_pop_projection,
(PyCFunction)pygpu_matrix_pop_projection,
METH_NOARGS,
py_matrix_pop_projection_doc},
pygpu_matrix_pop_projection_doc},
/* Stack (Context Manager) */
{"push_pop", (PyCFunction)py_matrix_push_pop, METH_NOARGS, py_matrix_push_pop_doc},
{"push_pop", (PyCFunction)pygpu_matrix_push_pop, METH_NOARGS, pygpu_matrix_push_pop_doc},
{"push_pop_projection",
(PyCFunction)py_matrix_push_pop_projection,
(PyCFunction)pygpu_matrix_push_pop_projection,
METH_NOARGS,
py_matrix_push_pop_projection_doc},
pygpu_matrix_push_pop_projection_doc},
/* Manipulate State */
{"multiply_matrix",
(PyCFunction)py_matrix_multiply_matrix,
(PyCFunction)pygpu_matrix_multiply_matrix,
METH_O,
py_matrix_multiply_matrix_doc},
{"scale", (PyCFunction)py_matrix_scale, METH_O, py_matrix_scale_doc},
{"scale_uniform", (PyCFunction)py_matrix_scale_uniform, METH_O, py_matrix_scale_uniform_doc},
{"translate", (PyCFunction)py_matrix_translate, METH_O, py_matrix_translate_doc},
pygpu_matrix_multiply_matrix_doc},
{"scale", (PyCFunction)pygpu_matrix_scale, METH_O, pygpu_matrix_scale_doc},
{"scale_uniform",
(PyCFunction)pygpu_matrix_scale_uniform,
METH_O,
pygpu_matrix_scale_uniform_doc},
{"translate", (PyCFunction)pygpu_matrix_translate, METH_O, pygpu_matrix_translate_doc},
/* TODO */
#if 0
{"rotate", (PyCFunction)py_matrix_rotate, METH_O, py_matrix_rotate_doc},
{"rotate_axis", (PyCFunction)py_matrix_rotate_axis, METH_O, py_matrix_rotate_axis_doc},
{"look_at", (PyCFunction)py_matrix_look_at, METH_O, py_matrix_look_at_doc},
{"rotate", (PyCFunction)pygpu_matrix_rotate, METH_O, pygpu_matrix_rotate_doc},
{"rotate_axis", (PyCFunction)pygpu_matrix_rotate_axis, METH_O, pygpu_matrix_rotate_axis_doc},
{"look_at", (PyCFunction)pygpu_matrix_look_at, METH_O, pygpu_matrix_look_at_doc},
#endif
/* Write State */
{"reset", (PyCFunction)py_matrix_reset, METH_NOARGS, py_matrix_reset_doc},
{"reset", (PyCFunction)pygpu_matrix_reset, METH_NOARGS, pygpu_matrix_reset_doc},
{"load_identity",
(PyCFunction)py_matrix_load_identity,
(PyCFunction)pygpu_matrix_load_identity,
METH_NOARGS,
py_matrix_load_identity_doc},
{"load_matrix", (PyCFunction)py_matrix_load_matrix, METH_O, py_matrix_load_matrix_doc},
pygpu_matrix_load_identity_doc},
{"load_matrix", (PyCFunction)pygpu_matrix_load_matrix, METH_O, pygpu_matrix_load_matrix_doc},
{"load_projection_matrix",
(PyCFunction)py_matrix_load_projection_matrix,
(PyCFunction)pygpu_matrix_load_projection_matrix,
METH_O,
py_matrix_load_projection_matrix_doc},
pygpu_matrix_load_projection_matrix_doc},
/* Read State */
{"get_projection_matrix",
(PyCFunction)py_matrix_get_projection_matrix,
(PyCFunction)pygpu_matrix_get_projection_matrix,
METH_NOARGS,
py_matrix_get_projection_matrix_doc},
pygpu_matrix_get_projection_matrix_doc},
{"get_model_view_matrix",
(PyCFunction)py_matrix_get_model_view_matrix,
(PyCFunction)pygpu_matrix_get_model_view_matrix,
METH_NOARGS,
py_matrix_get_model_view_matrix_doc},
pygpu_matrix_get_model_view_matrix_doc},
{"get_normal_matrix",
(PyCFunction)py_matrix_get_normal_matrix,
(PyCFunction)pygpu_matrix_get_normal_matrix,
METH_NOARGS,
py_matrix_get_normal_matrix_doc},
pygpu_matrix_get_normal_matrix_doc},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(py_matrix_doc, "This module provides access to the matrix stack.");
static PyModuleDef BPyGPU_matrix_module_def = {
PyDoc_STRVAR(pygpu_matrix__tp_doc, "This module provides access to the matrix stack.");
static PyModuleDef pygpu_matrix_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.matrix",
.m_doc = py_matrix_doc,
.m_methods = py_matrix_methods,
.m_doc = pygpu_matrix__tp_doc,
.m_methods = pygpu_matrix__tp_methods,
};
PyObject *BPyInit_gpu_matrix(void)
PyObject *bpygpu_matrix_init(void)
{
PyObject *submodule;
submodule = PyModule_Create(&BPyGPU_matrix_module_def);
submodule = PyModule_Create(&pygpu_matrix_module_def);
if (PyType_Ready(&BPyGPU_matrix_stack_context_Type) < 0) {
if (PyType_Ready(&PyGPUMatrixStackContext_Type) < 0) {
return NULL;
}

View File

@@ -20,4 +20,4 @@
#pragma once
PyObject *BPyInit_gpu_matrix(void);
PyObject *bpygpu_matrix_init(void);

View File

@@ -30,6 +30,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_string.h"
#include "BLI_utildefines.h"
#include "BKE_global.h"
@@ -51,17 +52,26 @@
#include "../generic/py_capi_utils.h"
#include "gpu_py_api.h"
#include "gpu_py.h"
#include "gpu_py_offscreen.h" /* own include */
/* Define the free method to avoid breakage. */
#define BPYGPU_USE_GPUOBJ_FREE_METHOD
/* -------------------------------------------------------------------- */
/** \name GPUOffScreen Common Utilities
* \{ */
static int py_offscreen_valid_check(BPyGPUOffScreen *py_ofs)
static int pygpu_offscreen_valid_check(BPyGPUOffScreen *py_ofs)
{
if (UNLIKELY(py_ofs->ofs == NULL)) {
PyErr_SetString(PyExc_ReferenceError, "GPU offscreen was freed, no further access is valid");
PyErr_SetString(PyExc_ReferenceError,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
"GPU offscreen was freed, no further access is valid"
#else
"GPU offscreen: internal error"
#endif
);
return -1;
}
return 0;
@@ -69,7 +79,7 @@ static int py_offscreen_valid_check(BPyGPUOffScreen *py_ofs)
#define BPY_GPU_OFFSCREEN_CHECK_OBJ(bpygpu) \
{ \
if (UNLIKELY(py_offscreen_valid_check(bpygpu) == -1)) { \
if (UNLIKELY(pygpu_offscreen_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} \
@@ -78,94 +88,96 @@ static int py_offscreen_valid_check(BPyGPUOffScreen *py_ofs)
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUOffscreen Type
/** \name Stack (Context Manager)
*
* Safer alternative to ensure balanced push/pop calls.
*
* \{ */
static PyObject *py_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args, PyObject *kwds)
typedef struct {
PyObject_HEAD /* required python macro */
BPyGPUOffScreen *py_offs;
int level;
bool is_explicitly_bound; /* Bound by "bind" method. */
} OffScreenStackContext;
static void pygpu_offscreen_stack_context__tp_dealloc(OffScreenStackContext *self)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
Py_DECREF(self->py_offs);
PyObject_DEL(self);
}
GPUOffScreen *ofs = NULL;
int width, height;
char err_out[256];
static PyObject *pygpu_offscreen_stack_context_enter(OffScreenStackContext *self)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self->py_offs);
static const char *_keywords[] = {"width", "height", NULL};
static _PyArg_Parser _parser = {"ii|i:GPUOffScreen.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &width, &height)) {
if (!self->is_explicitly_bound) {
if (self->level != -1) {
PyErr_SetString(PyExc_RuntimeError, "Already in use");
return NULL;
}
GPU_offscreen_bind(self->py_offs->ofs, true);
self->level = GPU_framebuffer_stack_level_get();
}
Py_RETURN_NONE;
}
static PyObject *pygpu_offscreen_stack_context_exit(OffScreenStackContext *self,
PyObject *UNUSED(args))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self->py_offs);
if (self->level == -1) {
PyErr_SetString(PyExc_RuntimeError, "Not yet in use\n");
return NULL;
}
if (GPU_context_active_get()) {
ofs = GPU_offscreen_create(width, height, true, false, err_out);
}
else {
strncpy(err_out, "No active GPU context found", 256);
const int level = GPU_framebuffer_stack_level_get();
if (level != self->level) {
PyErr_Format(
PyExc_RuntimeError, "Level of bind mismatch, expected %d, got %d\n", self->level, level);
}
if (ofs == NULL) {
PyErr_Format(PyExc_RuntimeError,
"gpu.offscreen.new(...) failed with '%s'",
err_out[0] ? err_out : "unknown error");
return NULL;
}
return BPyGPUOffScreen_CreatePyObject(ofs);
GPU_offscreen_unbind(self->py_offs->ofs, true);
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_offscreen_width_doc, "Width of the texture.\n\n:type: `int`");
static PyObject *py_offscreen_width_get(BPyGPUOffScreen *self, void *UNUSED(type))
static PyMethodDef pygpu_offscreen_stack_context__tp_methods[] = {
{"__enter__", (PyCFunction)pygpu_offscreen_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)pygpu_offscreen_stack_context_exit, METH_VARARGS},
{NULL},
};
static PyTypeObject PyGPUOffscreenStackContext_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBufferStackContext",
.tp_basicsize = sizeof(OffScreenStackContext),
.tp_dealloc = (destructor)pygpu_offscreen_stack_context__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = pygpu_offscreen_stack_context__tp_methods,
};
PyDoc_STRVAR(pygpu_offscreen_bind_doc,
".. function:: bind()\n"
"\n"
" Context manager to ensure balanced bind calls, even in the case of an error.\n");
static PyObject *pygpu_offscreen_bind(BPyGPUOffScreen *self)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_width(self->ofs));
}
PyDoc_STRVAR(py_offscreen_height_doc, "Height of the texture.\n\n:type: `int`");
static PyObject *py_offscreen_height_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_height(self->ofs));
}
PyDoc_STRVAR(py_offscreen_color_texture_doc,
"OpenGL bindcode for the color texture.\n\n:type: `int`");
static PyObject *py_offscreen_color_texture_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
GPUTexture *texture = GPU_offscreen_color_texture(self->ofs);
return PyLong_FromLong(GPU_texture_opengl_bindcode(texture));
}
PyDoc_STRVAR(
py_offscreen_bind_doc,
".. method:: bind(save=True)\n"
"\n"
" Bind the offscreen object.\n"
" To make sure that the offscreen gets unbind whether an exception occurs or not,\n"
" pack it into a `with` statement.\n"
"\n"
" :arg save: Save the current OpenGL state, so that it can be restored when unbinding.\n"
" :type save: `bool`\n");
static PyObject *py_offscreen_bind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
bool save = true;
static const char *_keywords[] = {"save", NULL};
static _PyArg_Parser _parser = {"|O&:bind", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &save)) {
return NULL;
}
GPU_offscreen_bind(self->ofs, save);
GPU_apply_state();
self->is_saved = save;
OffScreenStackContext *ret = PyObject_New(OffScreenStackContext,
&PyGPUOffscreenStackContext_Type);
ret->py_offs = self;
ret->level = -1;
ret->is_explicitly_bound = false;
Py_INCREF(self);
return (PyObject *)self;
pygpu_offscreen_stack_context_enter(ret);
ret->is_explicitly_bound = true;
return (PyObject *)ret;
}
PyDoc_STRVAR(py_offscreen_unbind_doc,
PyDoc_STRVAR(pygpu_offscreen_unbind_doc,
".. method:: unbind(restore=True)\n"
"\n"
" Unbind the offscreen object.\n"
@@ -173,7 +185,7 @@ PyDoc_STRVAR(py_offscreen_unbind_doc,
" :arg restore: Restore the OpenGL state, can only be used when the state has been "
"saved before.\n"
" :type restore: `bool`\n");
static PyObject *py_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
static PyObject *pygpu_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
bool restore = true;
@@ -190,8 +202,70 @@ static PyObject *py_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyOb
Py_RETURN_NONE;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUOffscreen Type
* \{ */
static PyObject *pygpu_offscreen__tp_new(PyTypeObject *UNUSED(self),
PyObject *args,
PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
GPUOffScreen *ofs = NULL;
int width, height;
char err_out[256];
static const char *_keywords[] = {"width", "height", NULL};
static _PyArg_Parser _parser = {"ii:GPUOffScreen.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &width, &height)) {
return NULL;
}
if (GPU_context_active_get()) {
ofs = GPU_offscreen_create(width, height, true, false, err_out);
}
else {
STRNCPY(err_out, "No active GPU context found");
}
if (ofs == NULL) {
PyErr_Format(PyExc_RuntimeError,
"gpu.offscreen.new(...) failed with '%s'",
err_out[0] ? err_out : "unknown error");
return NULL;
}
return BPyGPUOffScreen_CreatePyObject(ofs);
}
PyDoc_STRVAR(pygpu_offscreen_width_doc, "Width of the texture.\n\n:type: `int`");
static PyObject *pygpu_offscreen_width_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_width(self->ofs));
}
PyDoc_STRVAR(pygpu_offscreen_height_doc, "Height of the texture.\n\n:type: `int`");
static PyObject *pygpu_offscreen_height_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
return PyLong_FromLong(GPU_offscreen_height(self->ofs));
}
PyDoc_STRVAR(pygpu_offscreen_color_texture_doc,
"OpenGL bindcode for the color texture.\n\n:type: `int`");
static PyObject *pygpu_offscreen_color_texture_get(BPyGPUOffScreen *self, void *UNUSED(type))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
GPUTexture *texture = GPU_offscreen_color_texture(self->ofs);
return PyLong_FromLong(GPU_texture_opengl_bindcode(texture));
}
PyDoc_STRVAR(
py_offscreen_draw_view3d_doc,
pygpu_offscreen_draw_view3d_doc,
".. method:: draw_view3d(scene, view_layer, view3d, region, view_matrix, projection_matrix)\n"
"\n"
" Draw the 3d viewport in the offscreen object.\n"
@@ -208,7 +282,7 @@ PyDoc_STRVAR(
" :type view_matrix: :class:`mathutils.Matrix`\n"
" :arg projection_matrix: Projection Matrix (e.g. ``camera.calc_matrix_camera(...)``).\n"
" :type projection_matrix: :class:`mathutils.Matrix`\n");
static PyObject *py_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
MatrixObject *py_mat_view, *py_mat_projection;
PyObject *py_scene, *py_view_layer, *py_region, *py_view3d;
@@ -268,6 +342,7 @@ static PyObject *py_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *args,
true,
"",
false,
true,
self->ofs,
NULL);
@@ -280,12 +355,13 @@ static PyObject *py_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *args,
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_offscreen_free_doc,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
PyDoc_STRVAR(pygpu_offscreen_free_doc,
".. method:: free()\n"
"\n"
" Free the offscreen object.\n"
" The framebuffer, texture and render objects will no longer be accessible.\n");
static PyObject *py_offscreen_free(BPyGPUOffScreen *self)
static PyObject *pygpu_offscreen_free(BPyGPUOffScreen *self)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
@@ -293,17 +369,7 @@ static PyObject *py_offscreen_free(BPyGPUOffScreen *self)
self->ofs = NULL;
Py_RETURN_NONE;
}
static PyObject *py_offscreen_bind_context_enter(BPyGPUOffScreen *UNUSED(self))
{
Py_RETURN_NONE;
}
static PyObject *py_offscreen_bind_context_exit(BPyGPUOffScreen *self, PyObject *UNUSED(args))
{
GPU_offscreen_unbind(self->ofs, self->is_saved);
Py_RETURN_NONE;
}
#endif
static void BPyGPUOffScreen__tp_dealloc(BPyGPUOffScreen *self)
{
@@ -313,34 +379,34 @@ static void BPyGPUOffScreen__tp_dealloc(BPyGPUOffScreen *self)
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef py_offscreen_getseters[] = {
static PyGetSetDef pygpu_offscreen__tp_getseters[] = {
{"color_texture",
(getter)py_offscreen_color_texture_get,
(getter)pygpu_offscreen_color_texture_get,
(setter)NULL,
py_offscreen_color_texture_doc,
pygpu_offscreen_color_texture_doc,
NULL},
{"width", (getter)py_offscreen_width_get, (setter)NULL, py_offscreen_width_doc, NULL},
{"height", (getter)py_offscreen_height_get, (setter)NULL, py_offscreen_height_doc, NULL},
{"width", (getter)pygpu_offscreen_width_get, (setter)NULL, pygpu_offscreen_width_doc, NULL},
{"height", (getter)pygpu_offscreen_height_get, (setter)NULL, pygpu_offscreen_height_doc, NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef py_offscreen_methods[] = {
{"bind", (PyCFunction)py_offscreen_bind, METH_VARARGS | METH_KEYWORDS, py_offscreen_bind_doc},
static struct PyMethodDef pygpu_offscreen__tp_methods[] = {
{"bind", (PyCFunction)pygpu_offscreen_bind, METH_NOARGS, pygpu_offscreen_bind_doc},
{"unbind",
(PyCFunction)py_offscreen_unbind,
(PyCFunction)pygpu_offscreen_unbind,
METH_VARARGS | METH_KEYWORDS,
py_offscreen_unbind_doc},
pygpu_offscreen_unbind_doc},
{"draw_view3d",
(PyCFunction)py_offscreen_draw_view3d,
(PyCFunction)pygpu_offscreen_draw_view3d,
METH_VARARGS | METH_KEYWORDS,
py_offscreen_draw_view3d_doc},
{"free", (PyCFunction)py_offscreen_free, METH_NOARGS, py_offscreen_free_doc},
{"__enter__", (PyCFunction)py_offscreen_bind_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)py_offscreen_bind_context_exit, METH_VARARGS},
pygpu_offscreen_draw_view3d_doc},
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
{"free", (PyCFunction)pygpu_offscreen_free, METH_NOARGS, pygpu_offscreen_free_doc},
#endif
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(py_offscreen_doc,
PyDoc_STRVAR(pygpu_offscreen__tp_doc,
".. class:: GPUOffScreen(width, height)\n"
"\n"
" This object gives access to off screen buffers.\n"
@@ -354,10 +420,10 @@ PyTypeObject BPyGPUOffScreen_Type = {
.tp_basicsize = sizeof(BPyGPUOffScreen),
.tp_dealloc = (destructor)BPyGPUOffScreen__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_offscreen_doc,
.tp_methods = py_offscreen_methods,
.tp_getset = py_offscreen_getseters,
.tp_new = py_offscreen_new,
.tp_doc = pygpu_offscreen__tp_doc,
.tp_methods = pygpu_offscreen__tp_methods,
.tp_getset = pygpu_offscreen__tp_getseters,
.tp_new = pygpu_offscreen__tp_new,
};
/** \} */

View File

@@ -28,7 +28,6 @@ extern PyTypeObject BPyGPUOffScreen_Type;
typedef struct BPyGPUOffScreen {
PyObject_HEAD struct GPUOffScreen *ofs;
bool is_saved;
} BPyGPUOffScreen;
PyObject *BPyGPUOffScreen_CreatePyObject(struct GPUOffScreen *ofs) ATTR_NONNULL(1);

View File

@@ -40,14 +40,14 @@
/** \name Methods
* \{ */
PyDoc_STRVAR(py_select_load_id_doc,
PyDoc_STRVAR(pygpu_select_load_id_doc,
".. function:: load_id(id)\n"
"\n"
" Set the selection ID.\n"
"\n"
" :param id: Number (32-bit uint).\n"
" :type select: int\n");
static PyObject *py_select_load_id(PyObject *UNUSED(self), PyObject *value)
static PyObject *pygpu_select_load_id(PyObject *UNUSED(self), PyObject *value)
{
uint id;
if ((id = PyC_Long_AsU32(value)) == (uint)-1) {
@@ -62,25 +62,25 @@ static PyObject *py_select_load_id(PyObject *UNUSED(self), PyObject *value)
/** \name Module
* \{ */
static struct PyMethodDef py_select_methods[] = {
static struct PyMethodDef pygpu_select__tp_methods[] = {
/* Manage Stack */
{"load_id", (PyCFunction)py_select_load_id, METH_O, py_select_load_id_doc},
{"load_id", (PyCFunction)pygpu_select_load_id, METH_O, pygpu_select_load_id_doc},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(py_select_doc, "This module provides access to selection.");
static PyModuleDef BPyGPU_select_module_def = {
PyDoc_STRVAR(pygpu_select__tp_doc, "This module provides access to selection.");
static PyModuleDef pygpu_select_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.select",
.m_doc = py_select_doc,
.m_methods = py_select_methods,
.m_doc = pygpu_select__tp_doc,
.m_methods = pygpu_select__tp_methods,
};
PyObject *BPyInit_gpu_select(void)
PyObject *bpygpu_select_init(void)
{
PyObject *submodule;
submodule = PyModule_Create(&BPyGPU_select_module_def);
submodule = PyModule_Create(&pygpu_select_module_def);
return submodule;
}

View File

@@ -20,4 +20,4 @@
#pragma once
PyObject *BPyInit_gpu_select(void);
PyObject *bpygpu_select_init(void);

View File

@@ -26,20 +26,25 @@
#include "BLI_utildefines.h"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "GPU_uniform_buffer.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "../mathutils/mathutils.h"
#include "gpu_py_api.h"
#include "gpu_py_shader.h" /* own include */
#include "gpu_py.h"
#include "gpu_py_texture.h"
#include "gpu_py_uniformbuffer.h"
#include "gpu_py_vertex_format.h"
#include "gpu_py_shader.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name Enum Conversion.
* \{ */
static const struct PyC_StringEnumItems pygpu_bultinshader_items[] = {
static const struct PyC_StringEnumItems pygpu_shader_builtin_items[] = {
{GPU_SHADER_2D_UNIFORM_COLOR, "2D_UNIFORM_COLOR"},
{GPU_SHADER_2D_FLAT_COLOR, "2D_FLAT_COLOR"},
{GPU_SHADER_2D_SMOOTH_COLOR, "2D_SMOOTH_COLOR"},
@@ -51,7 +56,9 @@ static const struct PyC_StringEnumItems pygpu_bultinshader_items[] = {
{0, NULL},
};
static int py_uniform_location_get(GPUShader *shader, const char *name, const char *error_prefix)
static int pygpu_shader_uniform_location_get(GPUShader *shader,
const char *name,
const char *error_prefix)
{
const int uniform = GPU_shader_get_uniform(shader, name);
@@ -68,7 +75,7 @@ static int py_uniform_location_get(GPUShader *shader, const char *name, const ch
/** \name Shader Type
* \{ */
static PyObject *py_shader_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
static PyObject *pygpu_shader__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
@@ -107,17 +114,17 @@ static PyObject *py_shader_new(PyTypeObject *UNUSED(type), PyObject *args, PyObj
}
PyDoc_STRVAR(
py_shader_bind_doc,
pygpu_shader_bind_doc,
".. method:: bind()\n"
"\n"
" Bind the shader object. Required to be able to change uniforms of this shader.\n");
static PyObject *py_shader_bind(BPyGPUShader *self)
static PyObject *pygpu_shader_bind(BPyGPUShader *self)
{
GPU_shader_bind(self->shader);
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_shader_uniform_from_name_doc,
PyDoc_STRVAR(pygpu_shader_uniform_from_name_doc,
".. method:: uniform_from_name(name)\n"
"\n"
" Get uniform location by name.\n"
@@ -126,14 +133,15 @@ PyDoc_STRVAR(py_shader_uniform_from_name_doc,
" :type name: `str`\n"
" :return: Location of the uniform variable.\n"
" :rtype: `int`\n");
static PyObject *py_shader_uniform_from_name(BPyGPUShader *self, PyObject *arg)
static PyObject *pygpu_shader_uniform_from_name(BPyGPUShader *self, PyObject *arg)
{
const char *name = PyUnicode_AsUTF8(arg);
if (name == NULL) {
return NULL;
}
const int uniform = py_uniform_location_get(self->shader, name, "GPUShader.get_uniform");
const int uniform = pygpu_shader_uniform_location_get(
self->shader, name, "GPUShader.get_uniform");
if (uniform == -1) {
return NULL;
@@ -143,7 +151,7 @@ static PyObject *py_shader_uniform_from_name(BPyGPUShader *self, PyObject *arg)
}
PyDoc_STRVAR(
py_shader_uniform_block_from_name_doc,
pygpu_shader_uniform_block_from_name_doc,
".. method:: uniform_block_from_name(name)\n"
"\n"
" Get uniform block location by name.\n"
@@ -152,7 +160,7 @@ PyDoc_STRVAR(
" :type name: `str`\n"
" :return: The location of the uniform block variable.\n"
" :rtype: `int`\n");
static PyObject *py_shader_uniform_block_from_name(BPyGPUShader *self, PyObject *arg)
static PyObject *pygpu_shader_uniform_block_from_name(BPyGPUShader *self, PyObject *arg)
{
const char *name = PyUnicode_AsUTF8(arg);
if (name == NULL) {
@@ -169,12 +177,12 @@ static PyObject *py_shader_uniform_block_from_name(BPyGPUShader *self, PyObject
return PyLong_FromLong(uniform);
}
static bool py_shader_uniform_vector_impl(PyObject *args,
int elem_size,
int *r_location,
int *r_length,
int *r_count,
Py_buffer *r_pybuffer)
static bool pygpu_shader_uniform_vector_impl(PyObject *args,
int elem_size,
int *r_location,
int *r_length,
int *r_count,
Py_buffer *r_pybuffer)
{
PyObject *buffer;
@@ -197,7 +205,7 @@ static bool py_shader_uniform_vector_impl(PyObject *args,
return true;
}
PyDoc_STRVAR(py_shader_uniform_vector_float_doc,
PyDoc_STRVAR(pygpu_shader_uniform_vector_float_doc,
".. method:: uniform_vector_float(location, buffer, length, count)\n"
"\n"
" Set the buffer to fill the uniform.\n"
@@ -217,13 +225,14 @@ PyDoc_STRVAR(py_shader_uniform_vector_float_doc,
" :param count: Specifies the number of elements, vector or matrices that are to "
"be modified.\n"
" :type count: int\n");
static PyObject *py_shader_uniform_vector_float(BPyGPUShader *self, PyObject *args)
static PyObject *pygpu_shader_uniform_vector_float(BPyGPUShader *self, PyObject *args)
{
int location, length, count;
Py_buffer pybuffer;
if (!py_shader_uniform_vector_impl(args, sizeof(float), &location, &length, &count, &pybuffer)) {
if (!pygpu_shader_uniform_vector_impl(
args, sizeof(float), &location, &length, &count, &pybuffer)) {
return NULL;
}
@@ -234,17 +243,18 @@ static PyObject *py_shader_uniform_vector_float(BPyGPUShader *self, PyObject *ar
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_shader_uniform_vector_int_doc,
PyDoc_STRVAR(pygpu_shader_uniform_vector_int_doc,
".. method:: uniform_vector_int(location, buffer, length, count)\n"
"\n"
" See GPUShader.uniform_vector_float(...) description.\n");
static PyObject *py_shader_uniform_vector_int(BPyGPUShader *self, PyObject *args)
static PyObject *pygpu_shader_uniform_vector_int(BPyGPUShader *self, PyObject *args)
{
int location, length, count;
Py_buffer pybuffer;
if (!py_shader_uniform_vector_impl(args, sizeof(int), &location, &length, &count, &pybuffer)) {
if (!pygpu_shader_uniform_vector_impl(
args, sizeof(int), &location, &length, &count, &pybuffer)) {
return NULL;
}
@@ -255,7 +265,7 @@ static PyObject *py_shader_uniform_vector_int(BPyGPUShader *self, PyObject *args
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_shader_uniform_bool_doc,
PyDoc_STRVAR(pygpu_shader_uniform_bool_doc,
".. method:: uniform_bool(name, seq)\n"
"\n"
" Specify the value of a uniform variable for the current program object.\n"
@@ -264,7 +274,7 @@ PyDoc_STRVAR(py_shader_uniform_bool_doc,
" :type name: str\n"
" :param seq: Value that will be used to update the specified uniform variable.\n"
" :type seq: sequence of bools\n");
static PyObject *py_shader_uniform_bool(BPyGPUShader *self, PyObject *args)
static PyObject *pygpu_shader_uniform_bool(BPyGPUShader *self, PyObject *args)
{
const char *error_prefix = "GPUShader.uniform_bool";
@@ -308,7 +318,7 @@ static PyObject *py_shader_uniform_bool(BPyGPUShader *self, PyObject *args)
return NULL;
}
const int location = py_uniform_location_get(self->shader, params.id, error_prefix);
const int location = pygpu_shader_uniform_location_get(self->shader, params.id, error_prefix);
if (location == -1) {
return NULL;
@@ -319,7 +329,7 @@ static PyObject *py_shader_uniform_bool(BPyGPUShader *self, PyObject *args)
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_shader_uniform_float_doc,
PyDoc_STRVAR(pygpu_shader_uniform_float_doc,
".. method:: uniform_float(name, value)\n"
"\n"
" Specify the value of a uniform variable for the current program object.\n"
@@ -328,7 +338,7 @@ PyDoc_STRVAR(py_shader_uniform_float_doc,
" :type name: str\n"
" :param value: Value that will be used to update the specified uniform variable.\n"
" :type value: single number or sequence of numbers\n");
static PyObject *py_shader_uniform_float(BPyGPUShader *self, PyObject *args)
static PyObject *pygpu_shader_uniform_float(BPyGPUShader *self, PyObject *args)
{
const char *error_prefix = "GPUShader.uniform_float";
@@ -377,7 +387,7 @@ static PyObject *py_shader_uniform_float(BPyGPUShader *self, PyObject *args)
return NULL;
}
const int location = py_uniform_location_get(self->shader, params.id, error_prefix);
const int location = pygpu_shader_uniform_location_get(self->shader, params.id, error_prefix);
if (location == -1) {
return NULL;
@@ -388,7 +398,7 @@ static PyObject *py_shader_uniform_float(BPyGPUShader *self, PyObject *args)
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_shader_uniform_int_doc,
PyDoc_STRVAR(pygpu_shader_uniform_int_doc,
".. method:: uniform_int(name, seq)\n"
"\n"
" Specify the value of a uniform variable for the current program object.\n"
@@ -397,7 +407,7 @@ PyDoc_STRVAR(py_shader_uniform_int_doc,
" :type name: str\n"
" :param seq: Value that will be used to update the specified uniform variable.\n"
" :type seq: sequence of numbers\n");
static PyObject *py_shader_uniform_int(BPyGPUShader *self, PyObject *args)
static PyObject *pygpu_shader_uniform_int(BPyGPUShader *self, PyObject *args)
{
const char *error_prefix = "GPUShader.uniform_int";
@@ -447,7 +457,7 @@ static PyObject *py_shader_uniform_int(BPyGPUShader *self, PyObject *args)
return NULL;
}
const int location = py_uniform_location_get(self->shader, params.id, error_prefix);
const int location = pygpu_shader_uniform_location_get(self->shader, params.id, error_prefix);
if (location == -1) {
return NULL;
@@ -458,8 +468,66 @@ static PyObject *py_shader_uniform_int(BPyGPUShader *self, PyObject *args)
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_shader_uniform_sampler_doc,
".. method:: uniform_sampler(name, texture)\n"
"\n"
" Specify the value of a texture uniform variable for the current GPUShader.\n"
"\n"
" :param name: name of the uniform variable whose texture is to be specified.\n"
" :type name: str\n"
" :param texture: Texture to attach.\n"
" :type texture: :class:`gpu.types.GPUTexture`\n");
static PyObject *pygpu_shader_uniform_sampler(BPyGPUShader *self, PyObject *args)
{
const char *name;
BPyGPUTexture *py_texture;
if (!PyArg_ParseTuple(
args, "sO!:GPUShader.uniform_sampler", &name, &BPyGPUTexture_Type, &py_texture)) {
return NULL;
}
int slot = GPU_shader_get_texture_binding(self->shader, name);
GPU_texture_bind(py_texture->tex, slot);
GPU_shader_uniform_1i(self->shader, name, slot);
Py_RETURN_NONE;
}
PyDoc_STRVAR(
py_shader_attr_from_name_doc,
pygpu_shader_uniform_block_doc,
".. method:: uniform_block(name, ubo)\n"
"\n"
" Specify the value of an uniform buffer object variable for the current GPUShader.\n"
"\n"
" :param name: name of the uniform variable whose UBO is to be specified.\n"
" :type name: str\n"
" :param ubo: Uniform Buffer to attach.\n"
" :type texture: :class:`gpu.types.GPUUniformBuf`\n");
static PyObject *pygpu_shader_uniform_block(BPyGPUShader *self, PyObject *args)
{
const char *name;
BPyGPUUniformBuf *py_ubo;
if (!PyArg_ParseTuple(
args, "sO!:GPUShader.uniform_block", &name, &BPyGPUUniformBuf_Type, &py_ubo)) {
return NULL;
}
int slot = GPU_shader_get_uniform_block(self->shader, name);
if (slot == -1) {
PyErr_SetString(
PyExc_BufferError,
"GPUShader.uniform_buffer: uniform block not found, make sure the name is correct");
return NULL;
}
GPU_uniformbuf_bind(py_ubo->ubo, slot);
GPU_shader_uniform_1i(self->shader, name, slot);
Py_RETURN_NONE;
}
PyDoc_STRVAR(
pygpu_shader_attr_from_name_doc,
".. method:: attr_from_name(name)\n"
"\n"
" Get attribute location by name.\n"
@@ -468,7 +536,7 @@ PyDoc_STRVAR(
" :type name: str\n"
" :return: The location of an attribute variable.\n"
" :rtype: int\n");
static PyObject *py_shader_attr_from_name(BPyGPUShader *self, PyObject *arg)
static PyObject *pygpu_shader_attr_from_name(BPyGPUShader *self, PyObject *arg)
{
const char *name = PyUnicode_AsUTF8(arg);
if (name == NULL) {
@@ -485,69 +553,83 @@ static PyObject *py_shader_attr_from_name(BPyGPUShader *self, PyObject *arg)
return PyLong_FromLong(attr);
}
PyDoc_STRVAR(py_shader_calc_format_doc,
PyDoc_STRVAR(pygpu_shader_calc_format_doc,
".. method:: calc_format()\n"
"\n"
" Build a new format based on the attributes of the shader.\n"
"\n"
" :return: vertex attribute format for the shader\n"
" :rtype: GPUVertFormat\n");
static PyObject *py_shader_calc_format(BPyGPUShader *self, PyObject *UNUSED(arg))
static PyObject *pygpu_shader_calc_format(BPyGPUShader *self, PyObject *UNUSED(arg))
{
BPyGPUVertFormat *ret = (BPyGPUVertFormat *)BPyGPUVertFormat_CreatePyObject(NULL);
GPU_vertformat_from_shader(&ret->fmt, self->shader);
return (PyObject *)ret;
}
static struct PyMethodDef py_shader_methods[] = {
{"bind", (PyCFunction)py_shader_bind, METH_NOARGS, py_shader_bind_doc},
static struct PyMethodDef pygpu_shader__tp_methods[] = {
{"bind", (PyCFunction)pygpu_shader_bind, METH_NOARGS, pygpu_shader_bind_doc},
{"uniform_from_name",
(PyCFunction)py_shader_uniform_from_name,
(PyCFunction)pygpu_shader_uniform_from_name,
METH_O,
py_shader_uniform_from_name_doc},
pygpu_shader_uniform_from_name_doc},
{"uniform_block_from_name",
(PyCFunction)py_shader_uniform_block_from_name,
(PyCFunction)pygpu_shader_uniform_block_from_name,
METH_O,
py_shader_uniform_block_from_name_doc},
pygpu_shader_uniform_block_from_name_doc},
{"uniform_vector_float",
(PyCFunction)py_shader_uniform_vector_float,
(PyCFunction)pygpu_shader_uniform_vector_float,
METH_VARARGS,
py_shader_uniform_vector_float_doc},
pygpu_shader_uniform_vector_float_doc},
{"uniform_vector_int",
(PyCFunction)py_shader_uniform_vector_int,
(PyCFunction)pygpu_shader_uniform_vector_int,
METH_VARARGS,
py_shader_uniform_vector_int_doc},
pygpu_shader_uniform_vector_int_doc},
{"uniform_bool",
(PyCFunction)py_shader_uniform_bool,
(PyCFunction)pygpu_shader_uniform_bool,
METH_VARARGS,
py_shader_uniform_bool_doc},
pygpu_shader_uniform_bool_doc},
{"uniform_float",
(PyCFunction)py_shader_uniform_float,
(PyCFunction)pygpu_shader_uniform_float,
METH_VARARGS,
py_shader_uniform_float_doc},
{"uniform_int", (PyCFunction)py_shader_uniform_int, METH_VARARGS, py_shader_uniform_int_doc},
pygpu_shader_uniform_float_doc},
{"uniform_int",
(PyCFunction)pygpu_shader_uniform_int,
METH_VARARGS,
pygpu_shader_uniform_int_doc},
{"uniform_sampler",
(PyCFunction)pygpu_shader_uniform_sampler,
METH_VARARGS,
pygpu_shader_uniform_sampler_doc},
{"uniform_block",
(PyCFunction)pygpu_shader_uniform_block,
METH_VARARGS,
pygpu_shader_uniform_block_doc},
{"attr_from_name",
(PyCFunction)py_shader_attr_from_name,
(PyCFunction)pygpu_shader_attr_from_name,
METH_O,
py_shader_attr_from_name_doc},
{"format_calc", (PyCFunction)py_shader_calc_format, METH_NOARGS, py_shader_calc_format_doc},
pygpu_shader_attr_from_name_doc},
{"format_calc",
(PyCFunction)pygpu_shader_calc_format,
METH_NOARGS,
pygpu_shader_calc_format_doc},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(
py_shader_program_doc,
pygpu_shader_program_doc,
"The name of the program object for use by the OpenGL API (read-only).\n\n:type: int");
static PyObject *py_shader_program_get(BPyGPUShader *self, void *UNUSED(closure))
static PyObject *pygpu_shader_program_get(BPyGPUShader *self, void *UNUSED(closure))
{
return PyLong_FromLong(GPU_shader_get_program(self->shader));
}
static PyGetSetDef py_shader_getseters[] = {
{"program", (getter)py_shader_program_get, (setter)NULL, py_shader_program_doc, NULL},
static PyGetSetDef pygpu_shader__tp_getseters[] = {
{"program", (getter)pygpu_shader_program_get, (setter)NULL, pygpu_shader_program_doc, NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static void py_shader_dealloc(BPyGPUShader *self)
static void pygpu_shader__tp_dealloc(BPyGPUShader *self)
{
if (self->is_builtin == false) {
GPU_shader_free(self->shader);
@@ -556,7 +638,7 @@ static void py_shader_dealloc(BPyGPUShader *self)
}
PyDoc_STRVAR(
py_shader_doc,
pygpu_shader__tp_doc,
".. class:: GPUShader(vertexcode, fragcode, geocode=None, libcode=None, defines=None)\n"
"\n"
" GPUShader combines multiple GLSL shaders into a program used for drawing.\n"
@@ -587,12 +669,12 @@ PyDoc_STRVAR(
PyTypeObject BPyGPUShader_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUShader",
.tp_basicsize = sizeof(BPyGPUShader),
.tp_dealloc = (destructor)py_shader_dealloc,
.tp_dealloc = (destructor)pygpu_shader__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_shader_doc,
.tp_methods = py_shader_methods,
.tp_getset = py_shader_getseters,
.tp_new = py_shader_new,
.tp_doc = pygpu_shader__tp_doc,
.tp_methods = pygpu_shader__tp_methods,
.tp_getset = pygpu_shader__tp_getseters,
.tp_new = pygpu_shader__tp_new,
};
/** \} */
@@ -601,18 +683,18 @@ PyTypeObject BPyGPUShader_Type = {
/** \name gpu.shader Module API
* \{ */
PyDoc_STRVAR(py_shader_unbind_doc,
PyDoc_STRVAR(pygpu_shader_unbind_doc,
".. function:: unbind()\n"
"\n"
" Unbind the bound shader object.\n");
static PyObject *py_shader_unbind(BPyGPUShader *UNUSED(self))
static PyObject *pygpu_shader_unbind(BPyGPUShader *UNUSED(self))
{
GPU_shader_unbind();
Py_RETURN_NONE;
}
PyDoc_STRVAR(py_shader_from_builtin_doc,
".. function:: from_builtin(shader_name)\n"
PyDoc_STRVAR(pygpu_shader_from_builtin_doc,
".. function:: from_builtin(pygpu_shader_name)\n"
"\n"
" Shaders that are embedded in the blender internal code.\n"
" They all read the uniform ``mat4 ModelViewProjectionMatrix``,\n"
@@ -620,7 +702,7 @@ PyDoc_STRVAR(py_shader_from_builtin_doc,
" For more details, you can check the shader code with the\n"
" :func:`gpu.shader.code_from_builtin` function.\n"
"\n"
" :param shader_name: One of these builtin shader names:\n\n"
" :param pygpu_shader_name: One of these builtin shader names:\n\n"
" - ``2D_UNIFORM_COLOR``\n"
" - ``2D_FLAT_COLOR``\n"
" - ``2D_SMOOTH_COLOR``\n"
@@ -628,14 +710,14 @@ PyDoc_STRVAR(py_shader_from_builtin_doc,
" - ``3D_UNIFORM_COLOR``\n"
" - ``3D_FLAT_COLOR``\n"
" - ``3D_SMOOTH_COLOR``\n"
" :type shader_name: str\n"
" :type pygpu_shader_name: str\n"
" :return: Shader object corresponding to the given name.\n"
" :rtype: :class:`bpy.types.GPUShader`\n");
static PyObject *py_shader_from_builtin(PyObject *UNUSED(self), PyObject *arg)
static PyObject *pygpu_shader_from_builtin(PyObject *UNUSED(self), PyObject *arg)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
struct PyC_StringEnum pygpu_bultinshader = {pygpu_bultinshader_items};
struct PyC_StringEnum pygpu_bultinshader = {pygpu_shader_builtin_items};
if (!PyC_ParseStringEnum(arg, &pygpu_bultinshader)) {
return NULL;
}
@@ -645,12 +727,12 @@ static PyObject *py_shader_from_builtin(PyObject *UNUSED(self), PyObject *arg)
return BPyGPUShader_CreatePyObject(shader, true);
}
PyDoc_STRVAR(py_shader_code_from_builtin_doc,
".. function:: code_from_builtin(shader_name)\n"
PyDoc_STRVAR(pygpu_shader_code_from_builtin_doc,
".. function:: code_from_builtin(pygpu_shader_name)\n"
"\n"
" Exposes the internal shader code for query.\n"
"\n"
" :param shader_name: One of these builtin shader names:\n\n"
" :param pygpu_shader_name: One of these builtin shader names:\n\n"
" - ``2D_UNIFORM_COLOR``\n"
" - ``2D_FLAT_COLOR``\n"
" - ``2D_SMOOTH_COLOR``\n"
@@ -658,10 +740,10 @@ PyDoc_STRVAR(py_shader_code_from_builtin_doc,
" - ``3D_UNIFORM_COLOR``\n"
" - ``3D_FLAT_COLOR``\n"
" - ``3D_SMOOTH_COLOR``\n"
" :type shader_name: str\n"
" :type pygpu_shader_name: str\n"
" :return: Vertex, fragment and geometry shader codes.\n"
" :rtype: dict\n");
static PyObject *py_shader_code_from_builtin(BPyGPUShader *UNUSED(self), PyObject *arg)
static PyObject *pygpu_shader_code_from_builtin(BPyGPUShader *UNUSED(self), PyObject *arg)
{
const char *vert;
const char *frag;
@@ -670,7 +752,7 @@ static PyObject *py_shader_code_from_builtin(BPyGPUShader *UNUSED(self), PyObjec
PyObject *item, *r_dict;
struct PyC_StringEnum pygpu_bultinshader = {pygpu_bultinshader_items};
struct PyC_StringEnum pygpu_bultinshader = {pygpu_shader_builtin_items};
if (!PyC_ParseStringEnum(arg, &pygpu_bultinshader)) {
return NULL;
}
@@ -697,17 +779,20 @@ static PyObject *py_shader_code_from_builtin(BPyGPUShader *UNUSED(self), PyObjec
return r_dict;
}
static struct PyMethodDef py_shader_module_methods[] = {
{"unbind", (PyCFunction)py_shader_unbind, METH_NOARGS, py_shader_unbind_doc},
{"from_builtin", (PyCFunction)py_shader_from_builtin, METH_O, py_shader_from_builtin_doc},
{"code_from_builtin",
(PyCFunction)py_shader_code_from_builtin,
static struct PyMethodDef pygpu_shader_module__tp_methods[] = {
{"unbind", (PyCFunction)pygpu_shader_unbind, METH_NOARGS, pygpu_shader_unbind_doc},
{"from_builtin",
(PyCFunction)pygpu_shader_from_builtin,
METH_O,
py_shader_code_from_builtin_doc},
pygpu_shader_from_builtin_doc},
{"code_from_builtin",
(PyCFunction)pygpu_shader_code_from_builtin,
METH_O,
pygpu_shader_code_from_builtin_doc},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(py_shader_module_doc,
PyDoc_STRVAR(pygpu_shader_module__tp_doc,
"This module provides access to GPUShader internal functions.\n"
"\n"
".. rubric:: Built-in shaders\n"
@@ -736,11 +821,11 @@ PyDoc_STRVAR(py_shader_module_doc,
"3D_SMOOTH_COLOR\n"
" :Attributes: vec3 pos, vec4 color\n"
" :Uniforms: none\n");
static PyModuleDef BPyGPU_shader_module_def = {
static PyModuleDef pygpu_shader_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.shader",
.m_doc = py_shader_module_doc,
.m_methods = py_shader_module_methods,
.m_doc = pygpu_shader_module__tp_doc,
.m_methods = pygpu_shader_module__tp_methods,
};
/** \} */
@@ -760,11 +845,11 @@ PyObject *BPyGPUShader_CreatePyObject(GPUShader *shader, bool is_builtin)
return (PyObject *)self;
}
PyObject *BPyInit_gpu_shader(void)
PyObject *bpygpu_shader_init(void)
{
PyObject *submodule;
submodule = PyModule_Create(&BPyGPU_shader_module_def);
submodule = PyModule_Create(&pygpu_shader_module_def);
return submodule;
}

View File

@@ -30,4 +30,4 @@ typedef struct BPyGPUShader {
} BPyGPUShader;
PyObject *BPyGPUShader_CreatePyObject(struct GPUShader *shader, bool is_builtin);
PyObject *BPyInit_gpu_shader(void);
PyObject *bpygpu_shader_init(void);

View File

@@ -0,0 +1,423 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the gpu.state API.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "GPU_state.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "gpu_py_state.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name Helper Functions
* \{ */
static const struct PyC_StringEnumItems pygpu_state_blend_items[] = {
{GPU_BLEND_NONE, "NONE"},
{GPU_BLEND_ALPHA, "ALPHA"},
{GPU_BLEND_ALPHA_PREMULT, "ALPHA_PREMULT"},
{GPU_BLEND_ADDITIVE, "ADDITIVE"},
{GPU_BLEND_ADDITIVE_PREMULT, "ADDITIVE_PREMULT"},
{GPU_BLEND_MULTIPLY, "MULTIPLY"},
{GPU_BLEND_SUBTRACT, "SUBTRACT"},
{GPU_BLEND_INVERT, "INVERT"},
/**
* These are quite special cases used inside the draw manager.
* {GPU_BLEND_OIT, "OIT"},
* {GPU_BLEND_BACKGROUND, "BACKGROUND"},
* {GPU_BLEND_CUSTOM, "CUSTOM"},
*/
{0, NULL},
};
static const struct PyC_StringEnumItems pygpu_state_depthtest_items[] = {
{GPU_DEPTH_NONE, "NONE"},
{GPU_DEPTH_ALWAYS, "ALWAYS"},
{GPU_DEPTH_LESS, "LESS"},
{GPU_DEPTH_LESS_EQUAL, "LESS_EQUAL"},
{GPU_DEPTH_EQUAL, "EQUAL"},
{GPU_DEPTH_GREATER, "GREATER"},
{GPU_DEPTH_GREATER_EQUAL, "GREATER_EQUAL"},
{0, NULL},
};
static const struct PyC_StringEnumItems pygpu_state_faceculling_items[] = {
{GPU_CULL_NONE, "NONE"},
{GPU_CULL_FRONT, "FRONT"},
{GPU_CULL_BACK, "BACK"},
{0, NULL},
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Manage Stack
* \{ */
PyDoc_STRVAR(pygpu_state_blend_set_doc,
".. function:: blend_set(mode)\n"
"\n"
" Defines the fixed pipeline blending equation.\n"
"\n"
" :param mode: One of these modes: {\n"
" `NONE`,\n"
" `ALPHA`,\n"
" `ALPHA_PREMULT`,\n"
" `ADDITIVE`,\n"
" `ADDITIVE_PREMULT`,\n"
" `MULTIPLY`,\n"
" `SUBTRACT`,\n"
" `INVERT`,\n"
//" `OIT`,\n"
//" `BACKGROUND`,\n"
//" `CUSTOM`,\n"
" :type mode: `str`\n");
static PyObject *pygpu_state_blend_set(PyObject *UNUSED(self), PyObject *value)
{
struct PyC_StringEnum pygpu_blend = {pygpu_state_blend_items};
if (!PyC_ParseStringEnum(value, &pygpu_blend)) {
return NULL;
}
GPU_blend(pygpu_blend.value_found);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_blend_get_doc,
".. function:: blend_get()\n"
"\n"
" Current blending equation.\n"
"\n");
static PyObject *pygpu_state_blend_get(PyObject *UNUSED(self))
{
eGPUBlend blend = GPU_blend_get();
return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_state_blend_items, blend));
}
PyDoc_STRVAR(pygpu_state_depth_test_set_doc,
".. function:: depth_test_set(mode)\n"
"\n"
" Defines the depth_test equation.\n"
"\n"
" :param mode: One of these modes: {\n"
" `NONE`,\n"
" `ALWAYS`,\n"
" `LESS`,\n"
" `LESS_EQUAL`,\n"
" `EQUAL`,\n"
" `GREATER`,\n"
" `GREATER_EQUAL`,\n"
" :type mode: `str`\n");
static PyObject *pygpu_state_depth_test_set(PyObject *UNUSED(self), PyObject *value)
{
struct PyC_StringEnum pygpu_depth_test = {pygpu_state_depthtest_items};
if (!PyC_ParseStringEnum(value, &pygpu_depth_test)) {
return NULL;
}
GPU_depth_test(pygpu_depth_test.value_found);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_depth_test_get_doc,
".. function:: blend_depth_test_get()\n"
"\n"
" Current depth_test equation.\n"
"\n");
static PyObject *pygpu_state_depth_test_get(PyObject *UNUSED(self))
{
eGPUDepthTest test = GPU_depth_test_get();
return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_state_depthtest_items, test));
}
PyDoc_STRVAR(pygpu_state_depth_mask_set_doc,
".. function:: depth_mask_set(value)\n"
"\n"
" Write to depth component.\n"
"\n"
" :param value: True for writing to the depth component.\n"
" :type near: `bool`\n");
static PyObject *pygpu_state_depth_mask_set(PyObject *UNUSED(self), PyObject *value)
{
bool write_to_depth;
if (!PyC_ParseBool(value, &write_to_depth)) {
return NULL;
}
GPU_depth_mask(write_to_depth);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_depth_mask_get_doc,
".. function:: depth_mask_set_get()\n"
"\n"
" Writing status in the depth component.\n");
static PyObject *pygpu_state_depth_mask_get(PyObject *UNUSED(self))
{
return PyBool_FromLong(GPU_depth_mask_get());
}
PyDoc_STRVAR(pygpu_state_viewport_set_doc,
".. function:: viewport_set(x, y, xsize, ysize)\n"
"\n"
" Specifies the viewport of the active framebuffer.\n"
" Note: The viewport state is not saved upon framebuffer rebind.\n"
"\n"
" :param x, y: lower left corner of the viewport_set rectangle, in pixels.\n"
" :param width, height: width and height of the viewport_set.\n"
" :type x, y, xsize, ysize: `int`\n");
static PyObject *pygpu_state_viewport_set(PyObject *UNUSED(self), PyObject *args)
{
int x, y, xsize, ysize;
if (!PyArg_ParseTuple(args, "iiii:viewport_set", &x, &y, &xsize, &ysize)) {
return NULL;
}
GPU_viewport(x, y, xsize, ysize);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_viewport_get_doc,
".. function:: viewport_get()\n"
"\n"
" Viewport of the active framebuffer.\n");
static PyObject *pygpu_state_viewport_get(PyObject *UNUSED(self), PyObject *UNUSED(args))
{
int viewport[4];
GPU_viewport_size_get_i(viewport);
PyObject *ret = PyTuple_New(4);
PyTuple_SET_ITEMS(ret,
PyLong_FromLong(viewport[0]),
PyLong_FromLong(viewport[1]),
PyLong_FromLong(viewport[2]),
PyLong_FromLong(viewport[3]));
return ret;
}
PyDoc_STRVAR(pygpu_state_line_width_set_doc,
".. function:: line_width_set(width)\n"
"\n"
" Specify the width of rasterized lines.\n"
"\n"
" :param size: New width.\n"
" :type mode: `float`\n");
static PyObject *pygpu_state_line_width_set(PyObject *UNUSED(self), PyObject *value)
{
float width = (float)PyFloat_AsDouble(value);
if (PyErr_Occurred()) {
return NULL;
}
GPU_line_width(width);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_line_width_get_doc,
".. function:: line_width_get()\n"
"\n"
" Current width of rasterized lines.\n");
static PyObject *pygpu_state_line_width_get(PyObject *UNUSED(self))
{
float width = GPU_line_width_get();
return PyFloat_FromDouble((double)width);
}
PyDoc_STRVAR(pygpu_state_point_size_set_doc,
".. function:: point_size_set(size)\n"
"\n"
" Specify the diameter of rasterized points.\n"
"\n"
" :param size: New diameter.\n"
" :type mode: `float`\n");
static PyObject *pygpu_state_point_size_set(PyObject *UNUSED(self), PyObject *value)
{
float size = (float)PyFloat_AsDouble(value);
if (PyErr_Occurred()) {
return NULL;
}
GPU_point_size(size);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_color_mask_set_doc,
".. function:: color_mask_set(r, g, b, a)\n"
"\n"
" Enable or disable writing of frame buffer color components.\n"
"\n"
" :param r, g, b, a: components red, green, blue, and alpha.\n"
" :type r, g, b, a: `bool`\n");
static PyObject *pygpu_state_color_mask_set(PyObject *UNUSED(self), PyObject *args)
{
int r, g, b, a;
if (!PyArg_ParseTuple(args, "pppp:color_mask_set", &r, &g, &b, &a)) {
return NULL;
}
GPU_color_mask((bool)r, (bool)g, (bool)b, (bool)a);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_face_culling_set_doc,
".. function:: face_culling_set(culling)\n"
"\n"
" Specify whether none, front-facing or back-facing facets can be culled.\n"
"\n"
" :param mode: One of these modes: {\n"
" `NONE`,\n"
" `FRONT`,\n"
" `BACK`,\n"
" :type mode: `str`\n");
static PyObject *pygpu_state_face_culling_set(PyObject *UNUSED(self), PyObject *value)
{
struct PyC_StringEnum pygpu_faceculling = {pygpu_state_faceculling_items};
if (!PyC_ParseStringEnum(value, &pygpu_faceculling)) {
return NULL;
}
GPU_face_culling(pygpu_faceculling.value_found);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_front_facing_set_doc,
".. function:: front_facing_set(invert)\n"
"\n"
" Specifies the orientation of front-facing polygons.\n"
"\n"
" :param invert: True for clockwise polygons as front-facing.\n"
" :type mode: `bool`\n");
static PyObject *pygpu_state_front_facing_set(PyObject *UNUSED(self), PyObject *value)
{
bool invert;
if (!PyC_ParseBool(value, &invert)) {
return NULL;
}
GPU_front_facing(invert);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_program_point_size_set_doc,
".. function:: use_program_point_size(enable)\n"
"\n"
" If enabled, the derived point size is taken from the (potentially clipped) "
"shader builtin gl_PointSize.\n"
"\n"
" :param enable: True for shader builtin gl_PointSize.\n"
" :type enable: `bool`\n");
static PyObject *pygpu_state_program_point_size_set(PyObject *UNUSED(self), PyObject *value)
{
bool enable;
if (!PyC_ParseBool(value, &enable)) {
return NULL;
}
GPU_program_point_size(enable);
Py_RETURN_NONE;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Module
* \{ */
static struct PyMethodDef pygpu_state__tp_methods[] = {
/* Manage Stack */
{"blend_set", (PyCFunction)pygpu_state_blend_set, METH_O, pygpu_state_blend_set_doc},
{"blend_get", (PyCFunction)pygpu_state_blend_get, METH_NOARGS, pygpu_state_blend_get_doc},
{"depth_test_set",
(PyCFunction)pygpu_state_depth_test_set,
METH_O,
pygpu_state_depth_test_set_doc},
{"depth_test_get",
(PyCFunction)pygpu_state_depth_test_get,
METH_NOARGS,
pygpu_state_depth_test_get_doc},
{"depth_mask_set",
(PyCFunction)pygpu_state_depth_mask_set,
METH_O,
pygpu_state_depth_mask_set_doc},
{"depth_mask_get",
(PyCFunction)pygpu_state_depth_mask_get,
METH_NOARGS,
pygpu_state_depth_mask_get_doc},
{"viewport_set",
(PyCFunction)pygpu_state_viewport_set,
METH_VARARGS,
pygpu_state_viewport_set_doc},
{"viewport_get",
(PyCFunction)pygpu_state_viewport_get,
METH_NOARGS,
pygpu_state_viewport_get_doc},
{"line_width_set",
(PyCFunction)pygpu_state_line_width_set,
METH_O,
pygpu_state_line_width_set_doc},
{"line_width_get",
(PyCFunction)pygpu_state_line_width_get,
METH_NOARGS,
pygpu_state_line_width_get_doc},
{"point_size_set",
(PyCFunction)pygpu_state_point_size_set,
METH_O,
pygpu_state_point_size_set_doc},
{"color_mask_set",
(PyCFunction)pygpu_state_color_mask_set,
METH_VARARGS,
pygpu_state_color_mask_set_doc},
{"face_culling_set",
(PyCFunction)pygpu_state_face_culling_set,
METH_O,
pygpu_state_face_culling_set_doc},
{"front_facing_set",
(PyCFunction)pygpu_state_front_facing_set,
METH_O,
pygpu_state_front_facing_set_doc},
{"program_point_size_set",
(PyCFunction)pygpu_state_program_point_size_set,
METH_O,
pygpu_state_program_point_size_set_doc},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(pygpu_state__tp_doc, "This module provides access to the gpu state.");
static PyModuleDef pygpu_state_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.state",
.m_doc = pygpu_state__tp_doc,
.m_methods = pygpu_state__tp_methods,
};
PyObject *bpygpu_state_init(void)
{
PyObject *submodule;
submodule = PyModule_Create(&pygpu_state_module_def);
return submodule;
}
/** \} */

View File

@@ -0,0 +1,23 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
PyObject *bpygpu_state_init(void);

View File

@@ -0,0 +1,555 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the texture functionalities of the 'gpu' module
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "BLI_string.h"
#include "GPU_context.h"
#include "GPU_texture.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py.h"
#include "gpu_py_buffer.h"
#include "gpu_py_texture.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPUTexture Common Utilities
* \{ */
static const struct PyC_StringEnumItems pygpu_textureformat_items[] = {
{GPU_RGBA8UI, "RGBA8UI"},
{GPU_RGBA8I, "RGBA8I"},
{GPU_RGBA8, "RGBA8"},
{GPU_RGBA32UI, "RGBA32UI"},
{GPU_RGBA32I, "RGBA32I"},
{GPU_RGBA32F, "RGBA32F"},
{GPU_RGBA16UI, "RGBA16UI"},
{GPU_RGBA16I, "RGBA16I"},
{GPU_RGBA16F, "RGBA16F"},
{GPU_RGBA16, "RGBA16"},
{GPU_RG8UI, "RG8UI"},
{GPU_RG8I, "RG8I"},
{GPU_RG8, "RG8"},
{GPU_RG32UI, "RG32UI"},
{GPU_RG32I, "RG32I"},
{GPU_RG32F, "RG32F"},
{GPU_RG16UI, "RG16UI"},
{GPU_RG16I, "RG16I"},
{GPU_RG16F, "RG16F"},
{GPU_RG16, "RG16"},
{GPU_R8UI, "R8UI"},
{GPU_R8I, "R8I"},
{GPU_R8, "R8"},
{GPU_R32UI, "R32UI"},
{GPU_R32I, "R32I"},
{GPU_R32F, "R32F"},
{GPU_R16UI, "R16UI"},
{GPU_R16I, "R16I"},
{GPU_R16F, "R16F"},
{GPU_R16, "R16"},
{GPU_R11F_G11F_B10F, "R11F_G11F_B10F"},
{GPU_DEPTH32F_STENCIL8, "DEPTH32F_STENCIL8"},
{GPU_DEPTH24_STENCIL8, "DEPTH24_STENCIL8"},
{GPU_SRGB8_A8, "SRGB8_A8"},
{GPU_RGB16F, "RGB16F"},
{GPU_SRGB8_A8_DXT1, "SRGB8_A8_DXT1"},
{GPU_SRGB8_A8_DXT3, "SRGB8_A8_DXT3"},
{GPU_SRGB8_A8_DXT5, "SRGB8_A8_DXT5"},
{GPU_RGBA8_DXT1, "RGBA8_DXT1"},
{GPU_RGBA8_DXT3, "RGBA8_DXT3"},
{GPU_RGBA8_DXT5, "RGBA8_DXT5"},
{GPU_DEPTH_COMPONENT32F, "DEPTH_COMPONENT32F"},
{GPU_DEPTH_COMPONENT24, "DEPTH_COMPONENT24"},
{GPU_DEPTH_COMPONENT16, "DEPTH_COMPONENT16"},
{0, NULL},
};
static int pygpu_texture_valid_check(BPyGPUTexture *bpygpu_tex)
{
if (UNLIKELY(bpygpu_tex->tex == NULL)) {
PyErr_SetString(PyExc_ReferenceError,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
"GPU texture was freed, no further access is valid"
#else
"GPU texture: internal error"
#endif
);
return -1;
}
return 0;
}
#define BPYGPU_TEXTURE_CHECK_OBJ(bpygpu) \
{ \
if (UNLIKELY(pygpu_texture_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} \
((void)0)
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUTexture Type
* \{ */
static PyObject *pygpu_texture__tp_new(PyTypeObject *UNUSED(self), PyObject *args, PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
PyObject *py_size;
int size[3] = {1, 1, 1};
int layers = 0;
int is_cubemap = false;
struct PyC_StringEnum pygpu_textureformat = {pygpu_textureformat_items, GPU_RGBA8};
BPyGPUBuffer *pybuffer_obj = NULL;
char err_out[256] = "unknown error. See console";
static const char *_keywords[] = {"size", "layers", "is_cubemap", "format", "data", NULL};
static _PyArg_Parser _parser = {"O|$ipO&O!:GPUTexture.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
&py_size,
&layers,
&is_cubemap,
PyC_ParseStringEnum,
&pygpu_textureformat,
&BPyGPU_BufferType,
&pybuffer_obj)) {
return NULL;
}
int len = 1;
if (PySequence_Check(py_size)) {
len = PySequence_Size(py_size);
if (PyC_AsArray(size, py_size, len, &PyLong_Type, false, "GPUTexture.__new__") == -1) {
return NULL;
}
}
else if (PyLong_Check(py_size)) {
size[0] = PyLong_AsLong(py_size);
}
else {
PyErr_SetString(PyExc_ValueError, "GPUTexture.__new__: Expected an int or tuple as first arg");
return NULL;
}
void *data = NULL;
if (pybuffer_obj) {
if (pybuffer_obj->format != GPU_DATA_FLOAT) {
PyErr_SetString(PyExc_ValueError,
"GPUTexture.__new__: Only Buffer of format `FLOAT` is currently supported");
return NULL;
}
int component_len = GPU_texture_component_len(pygpu_textureformat.value_found);
int component_size_expected = sizeof(float);
size_t data_space_expected = (size_t)size[0] * size[1] * size[2] * max_ii(1, layers) *
component_len * component_size_expected;
if (is_cubemap) {
data_space_expected *= 6 * size[0];
}
if (bpygpu_Buffer_size(pybuffer_obj) < data_space_expected) {
PyErr_SetString(PyExc_ValueError, "GPUTexture.__new__: Buffer size smaller than requested");
return NULL;
}
data = pybuffer_obj->buf.as_void;
}
GPUTexture *tex = NULL;
if (is_cubemap && len != 1) {
STRNCPY(err_out,
"In cubemaps the same dimension represents height, width and depth. No tuple needed");
}
else if (size[0] < 1 || size[1] < 1 || size[2] < 1) {
STRNCPY(err_out, "Values less than 1 are not allowed in dimensions");
}
else if (layers && len == 3) {
STRNCPY(err_out, "3D textures have no layers");
}
else if (!GPU_context_active_get()) {
STRNCPY(err_out, "No active GPU context found");
}
else {
const char *name = "python_texture";
if (is_cubemap) {
if (layers) {
tex = GPU_texture_create_cube_array(
name, size[0], layers, 1, pygpu_textureformat.value_found, data);
}
else {
tex = GPU_texture_create_cube(name, size[0], 1, pygpu_textureformat.value_found, data);
}
}
else if (layers) {
if (len == 2) {
tex = GPU_texture_create_2d_array(
name, size[0], size[1], layers, 1, pygpu_textureformat.value_found, data);
}
else {
tex = GPU_texture_create_1d_array(
name, size[0], layers, 1, pygpu_textureformat.value_found, data);
}
}
else if (len == 3) {
tex = GPU_texture_create_3d(name,
size[0],
size[1],
size[2],
1,
pygpu_textureformat.value_found,
GPU_DATA_FLOAT,
NULL);
}
else if (len == 2) {
tex = GPU_texture_create_2d(
name, size[0], size[1], 1, pygpu_textureformat.value_found, data);
}
else {
tex = GPU_texture_create_1d(name, size[0], 1, pygpu_textureformat.value_found, data);
}
}
if (tex == NULL) {
PyErr_Format(PyExc_RuntimeError, "gpu.texture.new(...) failed with '%s'", err_out);
return NULL;
}
return BPyGPUTexture_CreatePyObject(tex);
}
PyDoc_STRVAR(pygpu_texture_width_doc, "Width of the texture.\n\n:type: `int`");
static PyObject *pygpu_texture_width_get(BPyGPUTexture *self, void *UNUSED(type))
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
return PyLong_FromLong(GPU_texture_width(self->tex));
}
PyDoc_STRVAR(pygpu_texture_height_doc, "Height of the texture.\n\n:type: `int`");
static PyObject *pygpu_texture_height_get(BPyGPUTexture *self, void *UNUSED(type))
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
return PyLong_FromLong(GPU_texture_height(self->tex));
}
PyDoc_STRVAR(pygpu_texture_format_doc, "Format of the texture.\n\n:type: `str`");
static PyObject *pygpu_texture_format_get(BPyGPUTexture *self, void *UNUSED(type))
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
eGPUTextureFormat format = GPU_texture_format(self->tex);
return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_textureformat_items, format));
}
PyDoc_STRVAR(pygpu_texture_clear_doc,
".. method:: clear(format='FLOAT', value=(0.0, 0.0, 0.0, 1.0))\n"
"\n"
" Fill texture with specific value.\n"
"\n"
" :param format: One of these primitive types: {\n"
" `FLOAT`,\n"
" `INT`,\n"
" `UINT`,\n"
" `UBYTE`,\n"
" `UINT_24_8`,\n"
" `10_11_11_REV`,\n"
" :type type: `str`\n"
" :arg value: sequence each representing the value to fill.\n"
" :type value: sequence of 1, 2, 3 or 4 values\n");
static PyObject *pygpu_texture_clear(BPyGPUTexture *self, PyObject *args, PyObject *kwds)
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
struct PyC_StringEnum pygpu_dataformat = {bpygpu_dataformat_items};
union {
int i[4];
float f[4];
char c[4];
} values;
PyObject *py_values;
static const char *_keywords[] = {"format", "value", NULL};
static _PyArg_Parser _parser = {"$O&O:clear", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, PyC_ParseStringEnum, &pygpu_dataformat, &py_values)) {
return NULL;
}
int shape = PySequence_Size(py_values);
if (shape == -1) {
return NULL;
}
if (shape > 4) {
PyErr_SetString(PyExc_AttributeError, "too many dimensions, max is 4");
return NULL;
}
if (shape != 1 &&
ELEM(pygpu_dataformat.value_found, GPU_DATA_UINT_24_8, GPU_DATA_10_11_11_REV)) {
PyErr_SetString(PyExc_AttributeError,
"`UINT_24_8` and `10_11_11_REV` only support single values");
return NULL;
}
memset(&values, 0, sizeof(values));
if (PyC_AsArray(&values,
py_values,
shape,
pygpu_dataformat.value_found == GPU_DATA_FLOAT ? &PyFloat_Type : &PyLong_Type,
false,
"clear") == -1) {
return NULL;
}
if (pygpu_dataformat.value_found == GPU_DATA_UBYTE) {
/* Convert to byte. */
values.c[0] = values.i[0];
values.c[1] = values.i[1];
values.c[2] = values.i[2];
values.c[3] = values.i[3];
}
GPU_texture_clear(self->tex, pygpu_dataformat.value_found, &values);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_texture_read_doc,
".. method:: read()\n"
"\n"
" Creates a buffer with the value of all pixels.\n"
"\n");
static PyObject *pygpu_texture_read(BPyGPUTexture *self)
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
/* #GPU_texture_read is restricted in combining 'data_format' with 'tex_format'.
* So choose data_format here. */
eGPUDataFormat best_data_format;
switch (GPU_texture_format(self->tex)) {
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
case GPU_DEPTH_COMPONENT32F:
best_data_format = GPU_DATA_FLOAT;
break;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
best_data_format = GPU_DATA_UINT_24_8;
break;
case GPU_R8UI:
case GPU_R16UI:
case GPU_RG16UI:
case GPU_R32UI:
best_data_format = GPU_DATA_UINT;
break;
case GPU_RG16I:
case GPU_R16I:
best_data_format = GPU_DATA_INT;
break;
case GPU_R8:
case GPU_RG8:
case GPU_RGBA8:
case GPU_RGBA8UI:
case GPU_SRGB8_A8:
best_data_format = GPU_DATA_UBYTE;
break;
case GPU_R11F_G11F_B10F:
best_data_format = GPU_DATA_10_11_11_REV;
break;
default:
best_data_format = GPU_DATA_FLOAT;
break;
}
void *buf = GPU_texture_read(self->tex, best_data_format, 0);
const Py_ssize_t shape[2] = {GPU_texture_height(self->tex), GPU_texture_width(self->tex)};
return (PyObject *)BPyGPU_Buffer_CreatePyObject(best_data_format, shape, ARRAY_SIZE(shape), buf);
}
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
PyDoc_STRVAR(pygpu_texture_free_doc,
".. method:: free()\n"
"\n"
" Free the texture object.\n"
" The texture object will no longer be accessible.\n");
static PyObject *pygpu_texture_free(BPyGPUTexture *self)
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
GPU_texture_free(self->tex);
self->tex = NULL;
Py_RETURN_NONE;
}
#endif
static void BPyGPUTexture__tp_dealloc(BPyGPUTexture *self)
{
if (self->tex) {
GPU_texture_free(self->tex);
}
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef pygpu_texture__tp_getseters[] = {
{"width", (getter)pygpu_texture_width_get, (setter)NULL, pygpu_texture_width_doc, NULL},
{"height", (getter)pygpu_texture_height_get, (setter)NULL, pygpu_texture_height_doc, NULL},
{"format", (getter)pygpu_texture_format_get, (setter)NULL, pygpu_texture_format_doc, NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef pygpu_texture__tp_methods[] = {
{"clear",
(PyCFunction)pygpu_texture_clear,
METH_VARARGS | METH_KEYWORDS,
pygpu_texture_clear_doc},
{"read", (PyCFunction)pygpu_texture_read, METH_NOARGS, pygpu_texture_read_doc},
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
{"free", (PyCFunction)pygpu_texture_free, METH_NOARGS, pygpu_texture_free_doc},
#endif
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(
pygpu_texture__tp_doc,
".. class:: GPUTexture(size, layers=0, is_cubemap=False, format='RGBA8', data=None)\n"
"\n"
" This object gives access to off GPU textures.\n"
"\n"
" :arg size: Dimensions of the texture 1D, 2D, 3D or cubemap.\n"
" :type size: `tuple` or `int`\n"
" :arg layers: Number of layers in texture array or number of cubemaps in cubemap array\n"
" :type layers: `int`\n"
" :arg is_cubemap: Indicates the creation of a cubemap texture.\n"
" :type is_cubemap: `int`\n"
" :arg format: One of these primitive types: {\n"
" `RGBA8UI`,\n"
" `RGBA8I`,\n"
" `RGBA8`,\n"
" `RGBA32UI`,\n"
" `RGBA32I`,\n"
" `RGBA32F`,\n"
" `RGBA16UI`,\n"
" `RGBA16I`,\n"
" `RGBA16F`,\n"
" `RGBA16`,\n"
" `RG8UI`,\n"
" `RG8I`,\n"
" `RG8`,\n"
" `RG32UI`,\n"
" `RG32I`,\n"
" `RG32F`,\n"
" `RG16UI`,\n"
" `RG16I`,\n"
" `RG16F`,\n"
" `RG16`,\n"
" `R8UI`,\n"
" `R8I`,\n"
" `R8`,\n"
" `R32UI`,\n"
" `R32I`,\n"
" `R32F`,\n"
" `R16UI`,\n"
" `R16I`,\n"
" `R16F`,\n"
" `R16`,\n"
" `R11F_G11F_B10F`,\n"
" `DEPTH32F_STENCIL8`,\n"
" `DEPTH24_STENCIL8`,\n"
" `SRGB8_A8`,\n"
" `RGB16F`,\n"
" `SRGB8_A8_DXT1`,\n"
" `SRGB8_A8_DXT3`,\n"
" `SRGB8_A8_DXT5`,\n"
" `RGBA8_DXT1`,\n"
" `RGBA8_DXT3`,\n"
" `RGBA8_DXT5`,\n"
" `DEPTH_COMPONENT32F`,\n"
" `DEPTH_COMPONENT24`,\n"
" `DEPTH_COMPONENT16`,\n"
" :type format: `str`\n"
" :arg data: Buffer object to fill the texture.\n"
" :type data: `Buffer`\n");
PyTypeObject BPyGPUTexture_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUTexture",
.tp_basicsize = sizeof(BPyGPUTexture),
.tp_dealloc = (destructor)BPyGPUTexture__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = pygpu_texture__tp_doc,
.tp_methods = pygpu_texture__tp_methods,
.tp_getset = pygpu_texture__tp_getseters,
.tp_new = pygpu_texture__tp_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Local API
* \{ */
int bpygpu_ParseTexture(PyObject *o, void *p)
{
if (o == Py_None) {
*(GPUTexture **)p = NULL;
return 1;
}
if (!BPyGPUTexture_Check(o)) {
PyErr_Format(
PyExc_ValueError, "expected a texture or None object, got %s", Py_TYPE(o)->tp_name);
return 0;
}
if (UNLIKELY(pygpu_texture_valid_check((BPyGPUTexture *)o) == -1)) {
return 0;
}
*(GPUTexture **)p = ((BPyGPUTexture *)o)->tex;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUTexture_CreatePyObject(GPUTexture *tex)
{
BPyGPUTexture *self;
self = PyObject_New(BPyGPUTexture, &BPyGPUTexture_Type);
self->tex = tex;
return (PyObject *)self;
}
/** \} */
#undef BPYGPU_TEXTURE_CHECK_OBJ

View File

@@ -0,0 +1,34 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
#include "BLI_compiler_attrs.h"
extern PyTypeObject BPyGPUTexture_Type;
#define BPyGPUTexture_Check(v) (Py_TYPE(v) == &BPyGPUTexture_Type)
typedef struct BPyGPUTexture {
PyObject_HEAD struct GPUTexture *tex;
} BPyGPUTexture;
int bpygpu_ParseTexture(PyObject *o, void *p);
PyObject *BPyGPUTexture_CreatePyObject(struct GPUTexture *tex) ATTR_NONNULL(1);

View File

@@ -32,17 +32,20 @@
/** \name GPU Types Module
* \{ */
static struct PyModuleDef BPyGPU_types_module_def = {
static struct PyModuleDef pygpu_types_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.types",
};
PyObject *BPyInit_gpu_types(void)
PyObject *bpygpu_types_init(void)
{
PyObject *submodule;
submodule = PyModule_Create(&BPyGPU_types_module_def);
submodule = PyModule_Create(&pygpu_types_module_def);
if (PyType_Ready(&BPyGPU_BufferType) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUVertFormat_Type) < 0) {
return NULL;
}
@@ -61,19 +64,26 @@ PyObject *BPyInit_gpu_types(void)
if (PyType_Ready(&BPyGPUShader_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUTexture_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUFrameBuffer_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUUniformBuf_Type) < 0) {
return NULL;
}
#define MODULE_TYPE_ADD(s, t) \
PyModule_AddObject(s, t.tp_name, (PyObject *)&t); \
Py_INCREF((PyObject *)&t)
MODULE_TYPE_ADD(submodule, BPyGPUVertFormat_Type);
MODULE_TYPE_ADD(submodule, BPyGPUVertBuf_Type);
MODULE_TYPE_ADD(submodule, BPyGPUIndexBuf_Type);
MODULE_TYPE_ADD(submodule, BPyGPUBatch_Type);
MODULE_TYPE_ADD(submodule, BPyGPUOffScreen_Type);
MODULE_TYPE_ADD(submodule, BPyGPUShader_Type);
#undef MODULE_TYPE_ADD
PyModule_AddType(submodule, &BPyGPU_BufferType);
PyModule_AddType(submodule, &BPyGPUVertFormat_Type);
PyModule_AddType(submodule, &BPyGPUVertBuf_Type);
PyModule_AddType(submodule, &BPyGPUIndexBuf_Type);
PyModule_AddType(submodule, &BPyGPUBatch_Type);
PyModule_AddType(submodule, &BPyGPUOffScreen_Type);
PyModule_AddType(submodule, &BPyGPUShader_Type);
PyModule_AddType(submodule, &BPyGPUTexture_Type);
PyModule_AddType(submodule, &BPyGPUFrameBuffer_Type);
PyModule_AddType(submodule, &BPyGPUUniformBuf_Type);
return submodule;
}

View File

@@ -20,11 +20,16 @@
#pragma once
#include "gpu_py_buffer.h"
#include "gpu_py_batch.h"
#include "gpu_py_element.h"
#include "gpu_py_framebuffer.h"
#include "gpu_py_offscreen.h"
#include "gpu_py_shader.h"
#include "gpu_py_texture.h"
#include "gpu_py_uniformbuffer.h"
#include "gpu_py_vertex_buffer.h"
#include "gpu_py_vertex_format.h"
PyObject *BPyInit_gpu_types(void);
PyObject *bpygpu_types_init(void);

View File

@@ -0,0 +1,194 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the uniform buffer functionalities of the 'gpu' module
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "BLI_string.h"
#include "GPU_context.h"
#include "GPU_texture.h"
#include "GPU_uniform_buffer.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py.h"
#include "gpu_py_buffer.h"
#include "gpu_py_uniformbuffer.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPUUniformBuf Common Utilities
* \{ */
static int pygpu_uniformbuffer_valid_check(BPyGPUUniformBuf *bpygpu_ub)
{
if (UNLIKELY(bpygpu_ub->ubo == NULL)) {
PyErr_SetString(PyExc_ReferenceError,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
"GPU uniform buffer was freed, no further access is valid");
#else
"GPU uniform buffer: internal error");
#endif
return -1;
}
return 0;
}
#define BPYGPU_UNIFORMBUF_CHECK_OBJ(bpygpu) \
{ \
if (UNLIKELY(pygpu_uniformbuffer_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} \
((void)0)
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUUniformBuf Type
* \{ */
static PyObject *pygpu_uniformbuffer__tp_new(PyTypeObject *UNUSED(self),
PyObject *args,
PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
GPUUniformBuf *ubo = NULL;
BPyGPUBuffer *pybuffer_obj;
char err_out[256] = "unknown error. See console";
static const char *_keywords[] = {"data", NULL};
static _PyArg_Parser _parser = {"O!:GPUUniformBuf.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &BPyGPU_BufferType, &pybuffer_obj)) {
return NULL;
}
if (GPU_context_active_get()) {
ubo = GPU_uniformbuf_create_ex(
bpygpu_Buffer_size(pybuffer_obj), pybuffer_obj->buf.as_void, "python_uniformbuffer");
}
else {
STRNCPY(err_out, "No active GPU context found");
}
if (ubo == NULL) {
PyErr_Format(PyExc_RuntimeError, "GPUUniformBuf.__new__(...) failed with '%s'", err_out);
return NULL;
}
return BPyGPUUniformBuf_CreatePyObject(ubo);
}
PyDoc_STRVAR(pygpu_uniformbuffer_update_doc,
".. method::update(data)\n"
"\n"
" Update the data of the uniform buffer object.\n");
static PyObject *pygpu_uniformbuffer_update(BPyGPUUniformBuf *self, PyObject *obj)
{
BPYGPU_UNIFORMBUF_CHECK_OBJ(self);
if (!BPyGPU_Buffer_Check(obj)) {
return NULL;
}
GPU_uniformbuf_update(self->ubo, ((BPyGPUBuffer *)obj)->buf.as_void);
Py_RETURN_NONE;
}
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
PyDoc_STRVAR(pygpu_uniformbuffer_free_doc,
".. method::free()\n"
"\n"
" Free the uniform buffer object.\n"
" The uniform buffer object will no longer be accessible.\n");
static PyObject *pygpu_uniformbuffer_free(BPyGPUUniformBuf *self)
{
BPYGPU_UNIFORMBUF_CHECK_OBJ(self);
GPU_uniformbuf_free(self->ubo);
self->ubo = NULL;
Py_RETURN_NONE;
}
#endif
static void BPyGPUUniformBuf__tp_dealloc(BPyGPUUniformBuf *self)
{
if (self->ubo) {
GPU_uniformbuf_free(self->ubo);
}
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef pygpu_uniformbuffer__tp_getseters[] = {
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef pygpu_uniformbuffer__tp_methods[] = {
{"update", (PyCFunction)pygpu_uniformbuffer_update, METH_O, pygpu_uniformbuffer_update_doc},
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
{"free", (PyCFunction)pygpu_uniformbuffer_free, METH_NOARGS, pygpu_uniformbuffer_free_doc},
#endif
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(pygpu_uniformbuffer__tp_doc,
".. class:: GPUUniformBuf(data)\n"
"\n"
" This object gives access to off uniform buffers.\n"
"\n"
" :arg data: Buffer object.\n"
" :type data: `Buffer`\n");
PyTypeObject BPyGPUUniformBuf_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUUniformBuf",
.tp_basicsize = sizeof(BPyGPUUniformBuf),
.tp_dealloc = (destructor)BPyGPUUniformBuf__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = pygpu_uniformbuffer__tp_doc,
.tp_methods = pygpu_uniformbuffer__tp_methods,
.tp_getset = pygpu_uniformbuffer__tp_getseters,
.tp_new = pygpu_uniformbuffer__tp_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUUniformBuf_CreatePyObject(GPUUniformBuf *ubo)
{
BPyGPUUniformBuf *self;
self = PyObject_New(BPyGPUUniformBuf, &BPyGPUUniformBuf_Type);
self->ubo = ubo;
return (PyObject *)self;
}
/** \} */
#undef BPYGPU_UNIFORMBUF_CHECK_OBJ

View File

@@ -0,0 +1,33 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
#include "BLI_compiler_attrs.h"
extern PyTypeObject BPyGPUUniformBuf_Type;
#define BPyGPUUniformBuf_Check(v) (Py_TYPE(v) == &BPyGPUUniformBuf_Type)
typedef struct BPyGPUUniformBuf {
PyObject_HEAD struct GPUUniformBuf *ubo;
} BPyGPUUniformBuf;
PyObject *BPyGPUUniformBuf_CreatePyObject(struct GPUUniformBuf *ubo) ATTR_NONNULL(1);

View File

@@ -39,7 +39,7 @@
/** \name Utility Functions
* \{ */
#define PY_AS_NATIVE_SWITCH(attr) \
#define PYGPU_AS_NATIVE_SWITCH(attr) \
switch (attr->comp_type) { \
case GPU_COMP_I8: { \
PY_AS_NATIVE(int8_t, PyC_Long_AsI8); \
@@ -75,7 +75,7 @@
((void)0)
/* No error checking, callers must run PyErr_Occurred */
static void fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
static void pygpu_fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
{
#define PY_AS_NATIVE(ty_dst, py_as_native) \
{ \
@@ -84,15 +84,15 @@ static void fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVer
} \
((void)0)
PY_AS_NATIVE_SWITCH(attr);
PYGPU_AS_NATIVE_SWITCH(attr);
#undef PY_AS_NATIVE
}
/* No error checking, callers must run PyErr_Occurred */
static void fill_format_sequence(void *data_dst_void,
PyObject *py_seq_fast,
const GPUVertAttr *attr)
static void pygpu_fill_format_sequence(void *data_dst_void,
PyObject *py_seq_fast,
const GPUVertAttr *attr)
{
const uint len = attr->comp_len;
PyObject **value_fast_items = PySequence_Fast_ITEMS(py_seq_fast);
@@ -107,19 +107,19 @@ static void fill_format_sequence(void *data_dst_void,
} \
((void)0)
PY_AS_NATIVE_SWITCH(attr);
PYGPU_AS_NATIVE_SWITCH(attr);
#undef PY_AS_NATIVE
}
#undef PY_AS_NATIVE_SWITCH
#undef PYGPU_AS_NATIVE_SWITCH
#undef WARN_TYPE_LIMIT_PUSH
#undef WARN_TYPE_LIMIT_POP
static bool py_vertbuf_fill_impl(GPUVertBuf *vbo,
uint data_id,
PyObject *seq,
const char *error_prefix)
static bool pygpu_vertbuf_fill_impl(GPUVertBuf *vbo,
uint data_id,
PyObject *seq,
const char *error_prefix)
{
const char *exc_str_size_mismatch = "Expected a %s of size %d, got %u";
@@ -173,7 +173,7 @@ static bool py_vertbuf_fill_impl(GPUVertBuf *vbo,
for (uint i = 0; i < seq_len; i++) {
uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
PyObject *item = seq_items[i];
fill_format_elem(data, item, attr);
pygpu_fill_format_elem(data, item, attr);
}
}
else {
@@ -197,7 +197,7 @@ static bool py_vertbuf_fill_impl(GPUVertBuf *vbo,
}
/* May trigger error, check below */
fill_format_sequence(data, seq_fast_item, attr);
pygpu_fill_format_sequence(data, seq_fast_item, attr);
Py_DECREF(seq_fast_item);
}
}
@@ -213,7 +213,10 @@ static bool py_vertbuf_fill_impl(GPUVertBuf *vbo,
return ok;
}
static int py_attr_fill(GPUVertBuf *buf, int id, PyObject *py_seq_data, const char *error_prefix)
static int pygpu_vertbuf_fill(GPUVertBuf *buf,
int id,
PyObject *py_seq_data,
const char *error_prefix)
{
if (id < 0 || id >= GPU_vertbuf_get_format(buf)->attr_len) {
PyErr_Format(PyExc_ValueError, "Format id %d out of range", id);
@@ -225,7 +228,7 @@ static int py_attr_fill(GPUVertBuf *buf, int id, PyObject *py_seq_data, const ch
return 0;
}
if (!py_vertbuf_fill_impl(buf, (uint)id, py_seq_data, error_prefix)) {
if (!pygpu_vertbuf_fill_impl(buf, (uint)id, py_seq_data, error_prefix)) {
return 0;
}
@@ -238,7 +241,7 @@ static int py_attr_fill(GPUVertBuf *buf, int id, PyObject *py_seq_data, const ch
/** \name VertBuf Type
* \{ */
static PyObject *py_VertBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
static PyObject *pygpu_vertbuf__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
struct {
PyObject *py_fmt;
@@ -260,7 +263,7 @@ static PyObject *py_VertBuf_new(PyTypeObject *UNUSED(type), PyObject *args, PyOb
return BPyGPUVertBuf_CreatePyObject(vbo);
}
PyDoc_STRVAR(py_VertBuf_attr_fill_doc,
PyDoc_STRVAR(pygpu_vertbuf_attr_fill_doc,
".. method:: attr_fill(id, data)\n"
"\n"
" Insert data into the buffer for a single attribute.\n"
@@ -269,7 +272,7 @@ PyDoc_STRVAR(py_VertBuf_attr_fill_doc,
" :type id: int or str\n"
" :param data: Sequence of data that should be stored in the buffer\n"
" :type data: sequence of values or tuples\n");
static PyObject *py_VertBuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
static PyObject *pygpu_vertbuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
{
PyObject *data;
PyObject *identifier;
@@ -299,28 +302,28 @@ static PyObject *py_VertBuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, PyObj
return NULL;
}
if (!py_attr_fill(self->buf, id, data, "GPUVertBuf.attr_fill")) {
if (!pygpu_vertbuf_fill(self->buf, id, data, "GPUVertBuf.attr_fill")) {
return NULL;
}
Py_RETURN_NONE;
}
static struct PyMethodDef py_VertBuf_methods[] = {
static struct PyMethodDef pygpu_vertbuf__tp_methods[] = {
{"attr_fill",
(PyCFunction)py_VertBuf_attr_fill,
(PyCFunction)pygpu_vertbuf_attr_fill,
METH_VARARGS | METH_KEYWORDS,
py_VertBuf_attr_fill_doc},
pygpu_vertbuf_attr_fill_doc},
{NULL, NULL, 0, NULL},
};
static void py_VertBuf_dealloc(BPyGPUVertBuf *self)
static void pygpu_vertbuf__tp_dealloc(BPyGPUVertBuf *self)
{
GPU_vertbuf_discard(self->buf);
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(py_gpu_vertex_buffer_doc,
PyDoc_STRVAR(pygpu_vertbuf__tp_doc,
".. class:: GPUVertBuf(len, format)\n"
"\n"
" Contains a VBO.\n"
@@ -332,11 +335,11 @@ PyDoc_STRVAR(py_gpu_vertex_buffer_doc,
PyTypeObject BPyGPUVertBuf_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUVertBuf",
.tp_basicsize = sizeof(BPyGPUVertBuf),
.tp_dealloc = (destructor)py_VertBuf_dealloc,
.tp_dealloc = (destructor)pygpu_vertbuf__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_gpu_vertex_buffer_doc,
.tp_methods = py_VertBuf_methods,
.tp_new = py_VertBuf_new,
.tp_doc = pygpu_vertbuf__tp_doc,
.tp_methods = pygpu_vertbuf__tp_methods,
.tp_new = pygpu_vertbuf__tp_new,
};
/** \} */

View File

@@ -32,115 +32,31 @@
#include "gpu_py_vertex_format.h" /* own include */
#ifdef __BIG_ENDIAN__
/* big endian */
# define MAKE_ID2(c, d) ((c) << 8 | (d))
# define MAKE_ID3(a, b, c) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8)
# define MAKE_ID4(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
#else
/* little endian */
# define MAKE_ID2(c, d) ((d) << 8 | (c))
# define MAKE_ID3(a, b, c) ((int)(c) << 16 | (b) << 8 | (a))
# define MAKE_ID4(a, b, c, d) ((int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a))
#endif
/* -------------------------------------------------------------------- */
/** \name Enum Conversion
*
* Use with PyArg_ParseTuple's "O&" formatting.
* \{ */
static int py_parse_component_type(const char *str, int length)
{
if (length == 2) {
switch (*((ushort *)str)) {
case MAKE_ID2('I', '8'):
return GPU_COMP_I8;
case MAKE_ID2('U', '8'):
return GPU_COMP_U8;
default:
break;
}
}
else if (length == 3) {
switch (*((uint *)str)) {
case MAKE_ID3('I', '1', '6'):
return GPU_COMP_I16;
case MAKE_ID3('U', '1', '6'):
return GPU_COMP_U16;
case MAKE_ID3('I', '3', '2'):
return GPU_COMP_I32;
case MAKE_ID3('U', '3', '2'):
return GPU_COMP_U32;
case MAKE_ID3('F', '3', '2'):
return GPU_COMP_F32;
case MAKE_ID3('I', '1', '0'):
return GPU_COMP_I10;
default:
break;
}
}
return -1;
}
static struct PyC_StringEnumItems pygpu_vertcomptype_items[] = {
{GPU_COMP_I8, "I8"},
{GPU_COMP_U8, "U8"},
{GPU_COMP_I16, "I16"},
{GPU_COMP_U16, "U16"},
{GPU_COMP_I32, "I32"},
{GPU_COMP_U32, "U32"},
{GPU_COMP_F32, "F32"},
{GPU_COMP_I10, "I10"},
{0, NULL},
};
static int py_parse_fetch_mode(const char *str, int length)
{
#define MATCH_ID(id) \
if (length == strlen(STRINGIFY(id))) { \
if (STREQ(str, STRINGIFY(id))) { \
return GPU_FETCH_##id; \
} \
} \
((void)0)
MATCH_ID(FLOAT);
MATCH_ID(INT);
MATCH_ID(INT_TO_FLOAT_UNIT);
MATCH_ID(INT_TO_FLOAT);
#undef MATCH_ID
return -1;
}
static int py_ParseVertCompType(PyObject *o, void *p)
{
Py_ssize_t length;
const char *str = _PyUnicode_AsStringAndSize(o, &length);
if (str == NULL) {
PyErr_Format(PyExc_ValueError, "expected a string, got %s", Py_TYPE(o)->tp_name);
return 0;
}
const int comp_type = py_parse_component_type(str, length);
if (comp_type == -1) {
PyErr_Format(PyExc_ValueError, "unknown component type: '%s", str);
return 0;
}
*((GPUVertCompType *)p) = comp_type;
return 1;
}
static int py_ParseVertFetchMode(PyObject *o, void *p)
{
Py_ssize_t length;
const char *str = _PyUnicode_AsStringAndSize(o, &length);
if (str == NULL) {
PyErr_Format(PyExc_ValueError, "expected a string, got %s", Py_TYPE(o)->tp_name);
return 0;
}
const int fetch_mode = py_parse_fetch_mode(str, length);
if (fetch_mode == -1) {
PyErr_Format(PyExc_ValueError, "unknown type literal: '%s'", str);
return 0;
}
(*(GPUVertFetchMode *)p) = fetch_mode;
return 1;
}
static struct PyC_StringEnumItems pygpu_vertfetchmode_items[] = {
{GPU_FETCH_FLOAT, "FLOAT"},
{GPU_FETCH_INT, "INT"},
{GPU_FETCH_INT_TO_FLOAT_UNIT, "INT_TO_FLOAT_UNIT"},
{GPU_FETCH_INT_TO_FLOAT, "INT_TO_FLOAT"},
{0, NULL},
};
/** \} */
@@ -148,7 +64,9 @@ static int py_ParseVertFetchMode(PyObject *o, void *p)
/** \name VertFormat Type
* \{ */
static PyObject *py_VertFormat_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
static PyObject *pygpu_vertformat__tp_new(PyTypeObject *UNUSED(type),
PyObject *args,
PyObject *kwds)
{
if (PyTuple_GET_SIZE(args) || (kwds && PyDict_Size(kwds))) {
PyErr_SetString(PyExc_ValueError, "This function takes no arguments");
@@ -158,7 +76,7 @@ static PyObject *py_VertFormat_new(PyTypeObject *UNUSED(type), PyObject *args, P
}
PyDoc_STRVAR(
py_VertFormat_attr_add_doc,
pygpu_vertformat_attr_add_doc,
".. method:: attr_add(id, comp_type, len, fetch_mode)\n"
"\n"
" Add a new attribute to the format.\n"
@@ -177,14 +95,12 @@ PyDoc_STRVAR(
" converted to a normal 4 byte float when used.\n"
" Possible values are `FLOAT`, `INT`, `INT_TO_FLOAT_UNIT` and `INT_TO_FLOAT`.\n"
" :type fetch_mode: `str`\n");
static PyObject *py_VertFormat_attr_add(BPyGPUVertFormat *self, PyObject *args, PyObject *kwds)
static PyObject *pygpu_vertformat_attr_add(BPyGPUVertFormat *self, PyObject *args, PyObject *kwds)
{
struct {
const char *id;
GPUVertCompType comp_type;
uint len;
GPUVertFetchMode fetch_mode;
} params;
const char *id;
uint len;
struct PyC_StringEnum comp_type = {pygpu_vertcomptype_items, GPU_COMP_I8};
struct PyC_StringEnum fetch_mode = {pygpu_vertfetchmode_items, GPU_FETCH_FLOAT};
if (self->fmt.attr_len == GPU_VERT_ATTR_MAX_LEN) {
PyErr_SetString(PyExc_ValueError, "Maximum attr reached " STRINGIFY(GPU_VERT_ATTR_MAX_LEN));
@@ -196,45 +112,45 @@ static PyObject *py_VertFormat_attr_add(BPyGPUVertFormat *self, PyObject *args,
if (!_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
&params.id,
py_ParseVertCompType,
&params.comp_type,
&params.len,
py_ParseVertFetchMode,
&params.fetch_mode)) {
&id,
PyC_ParseStringEnum,
&comp_type,
&len,
PyC_ParseStringEnum,
&fetch_mode)) {
return NULL;
}
uint attr_id = GPU_vertformat_attr_add(
&self->fmt, params.id, params.comp_type, params.len, params.fetch_mode);
&self->fmt, id, comp_type.value_found, len, fetch_mode.value_found);
return PyLong_FromLong(attr_id);
}
static struct PyMethodDef py_VertFormat_methods[] = {
static struct PyMethodDef pygpu_vertformat__tp_methods[] = {
{"attr_add",
(PyCFunction)py_VertFormat_attr_add,
(PyCFunction)pygpu_vertformat_attr_add,
METH_VARARGS | METH_KEYWORDS,
py_VertFormat_attr_add_doc},
pygpu_vertformat_attr_add_doc},
{NULL, NULL, 0, NULL},
};
static void py_VertFormat_dealloc(BPyGPUVertFormat *self)
static void pygpu_vertformat__tp_dealloc(BPyGPUVertFormat *self)
{
Py_TYPE(self)->tp_free(self);
}
PyDoc_STRVAR(py_VertFormat_doc,
PyDoc_STRVAR(pygpu_vertformat__tp_doc,
".. class:: GPUVertFormat()\n"
"\n"
" This object contains information about the structure of a vertex buffer.\n");
PyTypeObject BPyGPUVertFormat_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUVertFormat",
.tp_basicsize = sizeof(BPyGPUVertFormat),
.tp_dealloc = (destructor)py_VertFormat_dealloc,
.tp_dealloc = (destructor)pygpu_vertformat__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = py_VertFormat_doc,
.tp_methods = py_VertFormat_methods,
.tp_new = py_VertFormat_new,
.tp_doc = pygpu_vertformat__tp_doc,
.tp_methods = pygpu_vertformat__tp_methods,
.tp_new = pygpu_vertformat__tp_new,
};
/** \} */

View File

@@ -261,7 +261,7 @@ PyDoc_STRVAR(bpy_escape_identifier_doc,
static PyObject *bpy_escape_identifier(PyObject *UNUSED(self), PyObject *value)
{
Py_ssize_t value_str_len;
const char *value_str = _PyUnicode_AsStringAndSize(value, &value_str_len);
const char *value_str = PyUnicode_AsUTF8AndSize(value, &value_str_len);
if (value_str == NULL) {
PyErr_SetString(PyExc_TypeError, "expected a string");
@@ -299,7 +299,7 @@ PyDoc_STRVAR(bpy_unescape_identifier_doc,
static PyObject *bpy_unescape_identifier(PyObject *UNUSED(self), PyObject *value)
{
Py_ssize_t value_str_len;
const char *value_str = _PyUnicode_AsStringAndSize(value, &value_str_len);
const char *value_str = PyUnicode_AsUTF8AndSize(value, &value_str_len);
if (value_str == NULL) {
PyErr_SetString(PyExc_TypeError, "expected a string");

View File

@@ -211,7 +211,7 @@ static void _build_translations_cache(PyObject *py_messages, const char *locale)
msgctxt = BLT_I18NCONTEXT_DEFAULT_BPYRNA;
}
else if (PyUnicode_Check(tmp)) {
msgctxt = _PyUnicode_AsString(tmp);
msgctxt = PyUnicode_AsUTF8(tmp);
}
else {
invalid_key = true;
@@ -219,7 +219,7 @@ static void _build_translations_cache(PyObject *py_messages, const char *locale)
tmp = PyTuple_GET_ITEM(pykey, 1);
if (PyUnicode_Check(tmp)) {
msgid = _PyUnicode_AsString(tmp);
msgid = PyUnicode_AsUTF8(tmp);
}
else {
invalid_key = true;
@@ -250,7 +250,7 @@ static void _build_translations_cache(PyObject *py_messages, const char *locale)
/* Do not overwrite existing keys! */
if (BPY_app_translations_py_pgettext(msgctxt, msgid) == msgid) {
GHashKey *key = _ghashutil_keyalloc(msgctxt, msgid);
BLI_ghash_insert(_translations_cache, key, BLI_strdup(_PyUnicode_AsString(trans)));
BLI_ghash_insert(_translations_cache, key, BLI_strdup(PyUnicode_AsUTF8(trans)));
}
}
}
@@ -341,7 +341,7 @@ static PyObject *app_translations_py_messages_register(BlenderAppTranslations *s
PyExc_ValueError,
"bpy.app.translations.register: translations message cache already contains some data for "
"addon '%s'",
(const char *)_PyUnicode_AsString(module_name));
(const char *)PyUnicode_AsUTF8(module_name));
return NULL;
}
@@ -795,11 +795,7 @@ static PyTypeObject BlenderAppTranslationsType = {
/* methods */
/* No destructor, this is a singleton! */
NULL, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,

View File

@@ -136,7 +136,7 @@ bool BPy_errors_to_report_ex(ReportList *reports,
RPT_ERROR,
TIP_("%s: %s\nlocation: %s:%d\n"),
error_prefix,
_PyUnicode_AsString(pystring),
PyUnicode_AsUTF8(pystring),
filename,
lineno);
@@ -144,12 +144,12 @@ bool BPy_errors_to_report_ex(ReportList *reports,
fprintf(stderr,
TIP_("%s: %s\nlocation: %s:%d\n"),
error_prefix,
_PyUnicode_AsString(pystring),
PyUnicode_AsUTF8(pystring),
filename,
lineno);
}
else {
BKE_reportf(reports, RPT_ERROR, "%s: %s", error_prefix, _PyUnicode_AsString(pystring));
BKE_reportf(reports, RPT_ERROR, "%s: %s", error_prefix, PyUnicode_AsUTF8(pystring));
}
Py_DECREF(pystring);

View File

@@ -20,8 +20,8 @@
#pragma once
#if PY_VERSION_HEX < 0x03070000
# error "Python 3.7 or greater is required, you'll need to update your Python."
#if PY_VERSION_HEX < 0x03090000
# error "Python 3.9 or greater is required, you'll need to update your Python."
#endif
#ifdef __cplusplus

View File

@@ -163,7 +163,7 @@ int bpy_pydriver_create_dict(void)
PyObject *arg_key, *arg_value;
Py_ssize_t arg_pos = 0;
while (PyDict_Next(mod_math_dict, &arg_pos, &arg_key, &arg_value)) {
const char *arg_str = _PyUnicode_AsString(arg_key);
const char *arg_str = PyUnicode_AsUTF8(arg_key);
if (arg_str[0] && arg_str[1] != '_') {
PyDict_SetItem(bpy_pydriver_Dict__whitelist, arg_key, Py_None);
}
@@ -363,7 +363,7 @@ static bool bpy_driver_secure_bytecode_validate(PyObject *expr_code, PyObject *d
fprintf(stderr,
"\tBPY_driver_eval() - restricted access disallows name '%s', "
"enable auto-execution to support\n",
_PyUnicode_AsString(name));
PyUnicode_AsUTF8(name));
return false;
}
}

View File

@@ -304,100 +304,153 @@ static struct _inittab bpy_internal_modules[] = {
{NULL, NULL},
};
/**
* Convenience function for #BPY_python_start.
*
* These should happen so rarely that having comprehensive errors isn't needed.
* For example if `sys.argv` fails to allocate memory.
*
* Show an error just to avoid silent failure in the unlikely event something goes wrong,
* in this case a developer will need to track down the root cause.
*/
static void pystatus_exit_on_error(PyStatus status)
{
if (UNLIKELY(PyStatus_Exception(status))) {
fputs("Internal error initializing Python!\n", stderr);
/* This calls `exit`. */
Py_ExitStatusException(status);
}
}
/* call BPY_context_set first */
void BPY_python_start(bContext *C, int argc, const char **argv)
{
#ifndef WITH_PYTHON_MODULE
PyThreadState *py_tstate = NULL;
/* Needed for Python's initialization for portable Python installations.
* We could use #Py_SetPath, but this overrides Python's internal logic
* for calculating it's own module search paths.
*
* `sys.executable` is overwritten after initialization to the Python binary. */
/* #PyPreConfig (early-configuration). */
{
const char *program_path = BKE_appdir_program_path();
wchar_t program_path_wchar[FILE_MAX];
BLI_strncpy_wchar_from_utf8(program_path_wchar, program_path, ARRAY_SIZE(program_path_wchar));
Py_SetProgramName(program_path_wchar);
PyPreConfig preconfig;
PyStatus status;
if (py_use_system_env) {
PyPreConfig_InitPythonConfig(&preconfig);
}
else {
/* Only use the systems environment variables and site when explicitly requested.
* Since an incorrect 'PYTHONPATH' causes difficult to debug errors, see: T72807.
* An alternative to setting `preconfig.use_environment = 0` */
PyPreConfig_InitIsolatedConfig(&preconfig);
}
/* Force `utf-8` on all platforms, since this is what's used for Blender's internal strings,
* providing consistent encoding behavior across all Blender installations.
*
* This also uses the `surrogateescape` error handler ensures any unexpected bytes are escaped
* instead of raising an error.
*
* Without this `sys.getfilesystemencoding()` and `sys.stdout` for example may be set to ASCII
* or some other encoding - where printing some `utf-8` values will raise an error.
*
* This can cause scripts to fail entirely on some systems.
*
* This assignment is the equivalent of enabling the `PYTHONUTF8` environment variable.
* See `PEP-540` for details on exactly what this changes. */
preconfig.utf8_mode = true;
/* Note that there is no reason to call #Py_PreInitializeFromBytesArgs here
* as this is only used so that command line arguments can be handled by Python itself,
* not for setting `sys.argv` (handled below). */
status = Py_PreInitialize(&preconfig);
pystatus_exit_on_error(status);
}
/* must run before python initializes */
/* Must run before python initializes, but after #PyPreConfig. */
PyImport_ExtendInittab(bpy_internal_modules);
/* Allow to use our own included Python. `py_path_bundle` may be NULL. */
/* #PyConfig (initialize Python). */
{
const char *py_path_bundle = BKE_appdir_folder_id(BLENDER_SYSTEM_PYTHON, NULL);
if (py_path_bundle != NULL) {
PyC_SetHomePath(py_path_bundle);
PyConfig config;
PyStatus status;
bool has_python_executable = false;
PyConfig_InitPythonConfig(&config);
/* Suppress error messages when calculating the module search path.
* While harmless, it's noisy. */
config.pathconfig_warnings = 0;
/* When using the system's Python, allow the site-directory as well. */
config.user_site_directory = py_use_system_env;
/* While `sys.argv` is set, we don't want Python to interpret it. */
config.parse_argv = 0;
status = PyConfig_SetBytesArgv(&config, argc, (char *const *)argv);
pystatus_exit_on_error(status);
/* Needed for Python's initialization for portable Python installations.
* We could use #Py_SetPath, but this overrides Python's internal logic
* for calculating it's own module search paths.
*
* `sys.executable` is overwritten after initialization to the Python binary. */
{
const char *program_path = BKE_appdir_program_path();
status = PyConfig_SetBytesString(&config, &config.program_name, program_path);
pystatus_exit_on_error(status);
}
else {
/* Common enough to use the system Python on Linux/Unix, warn on other systems. */
/* Setting the program name is important so the 'multiprocessing' module
* can launch new Python instances. */
{
char program_path[FILE_MAX];
if (BKE_appdir_program_python_search(
program_path, sizeof(program_path), PY_MAJOR_VERSION, PY_MINOR_VERSION)) {
status = PyConfig_SetBytesString(&config, &config.executable, program_path);
pystatus_exit_on_error(status);
has_python_executable = true;
}
else {
/* Set to `sys.executable = None` below (we can't do before Python is initialized). */
fprintf(stderr,
"Unable to find the python binary, "
"the multiprocessing module may not be functional!\n");
}
}
/* Allow to use our own included Python. `py_path_bundle` may be NULL. */
{
const char *py_path_bundle = BKE_appdir_folder_id(BLENDER_SYSTEM_PYTHON, NULL);
if (py_path_bundle != NULL) {
# ifdef __APPLE__
/* Mac-OS allows file/directory names to contain `:` character
* (represented as `/` in the Finder) but current Python lib (as of release 3.1.1)
* doesn't handle these correctly. */
if (strchr(py_path_bundle, ':')) {
fprintf(stderr,
"Warning! Blender application is located in a path containing ':' or '/' chars\n"
"This may make python import function fail\n");
}
# endif /* __APPLE__ */
status = PyConfig_SetBytesString(&config, &config.home, py_path_bundle);
pystatus_exit_on_error(status);
}
else {
/* Common enough to use the system Python on Linux/Unix, warn on other systems. */
# if defined(__APPLE__) || defined(_WIN32)
fprintf(stderr,
"Bundled Python not found and is expected on this platform "
"(the 'install' target may have not been built)\n");
fprintf(stderr,
"Bundled Python not found and is expected on this platform "
"(the 'install' target may have not been built)\n");
# endif
}
}
}
/* Force `utf-8` on all platforms, since this is what's used for Blender's internal strings,
* providing consistent encoding behavior across all Blender installations.
*
* This also uses the `surrogateescape` error handler ensures any unexpected bytes are escaped
* instead of raising an error.
*
* Without this `sys.getfilesystemencoding()` and `sys.stdout` for example may be set to ASCII
* or some other encoding - where printing some `utf-8` values will raise an error.
*
* This can cause scripts to fail entirely on some systems.
*
* This assignment is the equivalent of enabling the `PYTHONUTF8` environment variable.
* See `PEP-540` for details on exactly what this changes. */
Py_UTF8Mode = 1;
/* Initialize Python (also acquires lock). */
status = Py_InitializeFromConfig(&config);
pystatus_exit_on_error(status);
/* Suppress error messages when calculating the module search path.
* While harmless, it's noisy. */
Py_FrozenFlag = 1;
/* Only use the systems environment variables and site when explicitly requested.
* Since an incorrect 'PYTHONPATH' causes difficult to debug errors, see: T72807. */
Py_IgnoreEnvironmentFlag = !py_use_system_env;
Py_NoUserSiteDirectory = !py_use_system_env;
/* Initialize Python (also acquires lock). */
Py_Initialize();
/* We could convert to #wchar_t then pass to #PySys_SetArgv (or use #PyConfig in Python 3.8+).
* However this risks introducing subtle changes in encoding that are hard to track down.
*
* So rely on #PyC_UnicodeFromByte since it's a tried & true way of getting paths
* that include non `utf-8` compatible characters, see: T20021. */
{
PyObject *py_argv = PyList_New(argc);
for (int i = 0; i < argc; i++) {
PyList_SET_ITEM(py_argv, i, PyC_UnicodeFromByte(argv[i]));
}
PySys_SetObject("argv", py_argv);
Py_DECREF(py_argv);
}
/* Setting the program name is important so the 'multiprocessing' module
* can launch new Python instances. */
{
const char *sys_variable = "executable";
char program_path[FILE_MAX];
if (BKE_appdir_program_python_search(
program_path, sizeof(program_path), PY_MAJOR_VERSION, PY_MINOR_VERSION)) {
PyObject *py_program_path = PyC_UnicodeFromByte(program_path);
PySys_SetObject(sys_variable, py_program_path);
Py_DECREF(py_program_path);
}
else {
fprintf(stderr,
"Unable to find the python binary, "
"the multiprocessing module may not be functional!\n");
PySys_SetObject(sys_variable, Py_None);
if (!has_python_executable) {
PySys_SetObject("executable", Py_None);
}
}
@@ -447,8 +500,9 @@ void BPY_python_start(bContext *C, int argc, const char **argv)
/* py module runs atexit when bpy is freed */
BPY_atexit_register(); /* this can init any time */
py_tstate = PyGILState_GetThisThreadState();
PyEval_ReleaseThread(py_tstate);
/* Free the lock acquired (implicitly) when Python is initialized. */
PyEval_ReleaseThread(PyGILState_GetThisThreadState());
#endif
#ifdef WITH_PYTHON_MODULE
@@ -534,8 +588,8 @@ void BPY_python_backtrace(FILE *fp)
PyFrameObject *frame = tstate->frame;
do {
const int line = PyCode_Addr2Line(frame->f_code, frame->f_lasti);
const char *filename = _PyUnicode_AsString(frame->f_code->co_filename);
const char *funcname = _PyUnicode_AsString(frame->f_code->co_name);
const char *filename = PyUnicode_AsUTF8(frame->f_code->co_filename);
const char *funcname = PyUnicode_AsUTF8(frame->f_code->co_name);
fprintf(fp, " File \"%s\", line %d in %s\n", filename, line, funcname);
} while ((frame = frame->f_back));
}
@@ -727,7 +781,7 @@ static void bpy_module_delay_init(PyObject *bpy_proxy)
/* updating the module dict below will lose the reference to __file__ */
PyObject *filename_obj = PyModule_GetFilenameObject(bpy_proxy);
const char *filename_rel = _PyUnicode_AsString(filename_obj); /* can be relative */
const char *filename_rel = PyUnicode_AsUTF8(filename_obj); /* can be relative */
char filename_abs[1024];
BLI_strncpy(filename_abs, filename_rel, sizeof(filename_abs));

View File

@@ -94,13 +94,9 @@ static PyTypeObject bpy_lib_Type = {
0, /* tp_itemsize */
/* methods */
(destructor)bpy_lib_dealloc, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
/* tp_compare */ /* DEPRECATED in python 3.0! */
NULL, /* tp_repr */
@@ -365,7 +361,7 @@ static PyObject *bpy_lib_exit(BPy_Library *self, PyObject *UNUSED(args))
for (i = 0; i < size; i++) {
PyObject *item_src = PyList_GET_ITEM(ls, i);
PyObject *item_dst; /* must be set below */
const char *item_idname = _PyUnicode_AsString(item_src);
const char *item_idname = PyUnicode_AsUTF8(item_src);
// printf(" %s\n", item_idname);

View File

@@ -118,7 +118,7 @@ static int py_msgbus_rna_key_from_py(PyObject *py_sub,
PointerRNA data_type_ptr = {
.type = data_type,
};
const char *data_prop_str = _PyUnicode_AsString(data_prop_py);
const char *data_prop_str = PyUnicode_AsUTF8(data_prop_py);
PropertyRNA *data_prop = RNA_struct_find_property(&data_type_ptr, data_prop_str);
if (data_prop == NULL) {

View File

@@ -58,7 +58,7 @@
static wmOperatorType *ot_lookup_from_py_string(PyObject *value, const char *py_fn_id)
{
const char *opname = _PyUnicode_AsString(value);
const char *opname = PyUnicode_AsUTF8(value);
if (opname == NULL) {
PyErr_Format(PyExc_TypeError, "%s() expects a string argument", py_fn_id);
return NULL;

View File

@@ -68,7 +68,7 @@ static void operator_properties_init(wmOperatorType *ot)
if (bl_property) {
if (PyUnicode_Check(bl_property)) {
/* since the property is explicitly given, raise an error if its not found */
prop_id = _PyUnicode_AsString(bl_property);
prop_id = PyUnicode_AsUTF8(bl_property);
prop_raise_error = true;
}
else {

View File

@@ -193,6 +193,126 @@ static const EnumPropertyItem property_subtype_array_items[] = {
"'XYZ', 'XYZ_LENGTH', 'COLOR_GAMMA', 'COORDINATES', 'LAYER', 'LAYER_MEMBER', 'NONE'].\n" \
" :type subtype: string\n"
/* -------------------------------------------------------------------- */
/** \name Deferred Property Type
*
* Operators and classes use this so it can store the arguments given but defer
* running it until the operator runs where these values are used to setup
* the default arguments for that operator instance.
* \{ */
static void bpy_prop_deferred_dealloc(BPy_PropDeferred *self)
{
if (self->kw) {
PyObject_GC_UnTrack(self);
Py_CLEAR(self->kw);
}
PyObject_GC_Del(self);
}
static int bpy_prop_deferred_traverse(BPy_PropDeferred *self, visitproc visit, void *arg)
{
Py_VISIT(self->kw);
return 0;
}
static int bpy_prop_deferred_clear(BPy_PropDeferred *self)
{
Py_CLEAR(self->kw);
return 0;
}
static PyObject *bpy_prop_deferred_repr(BPy_PropDeferred *self)
{
return PyUnicode_FromFormat("<%.200s, %R, %R>", Py_TYPE(self)->tp_name, self->fn, self->kw);
}
/**
* HACK: needed by `typing.get_type_hints`
* with `from __future__ import annotations` enabled or when using Python 3.10 or newer.
*
* When callable this object type passes the test for being an acceptable annotation.
*/
static PyObject *bpy_prop_deferred_call(BPy_PropDeferred *UNUSED(self),
PyObject *UNUSED(args),
PyObject *UNUSED(kw))
{
/* Dummy value. */
Py_RETURN_NONE;
}
/* Get/Set Items. */
/**
* Expose the function in case scripts need to introspect this information
* (not currently used by Blender it's self).
*/
static PyObject *bpy_prop_deferred_function_get(BPy_PropDeferred *self, void *UNUSED(closure))
{
PyObject *ret = self->fn;
Py_IncRef(ret);
return ret;
}
/**
* Expose keywords in case scripts need to introspect this information
* (not currently used by Blender it's self).
*/
static PyObject *bpy_prop_deferred_keywords_get(BPy_PropDeferred *self, void *UNUSED(closure))
{
PyObject *ret = self->kw;
Py_IncRef(ret);
return ret;
}
static PyGetSetDef bpy_prop_deferred_getset[] = {
{"function", (getter)bpy_prop_deferred_function_get, (setter)NULL, NULL, NULL},
{"keywords", (getter)bpy_prop_deferred_keywords_get, (setter)NULL, NULL, NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
PyDoc_STRVAR(bpy_prop_deferred_doc,
"Intermediate storage for properties before registration.\n"
"\n"
".. note::\n"
"\n"
" This is not part of the stable API and may change between releases.");
PyTypeObject bpy_prop_deferred_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "_PropertyDeferred",
.tp_basicsize = sizeof(BPy_PropDeferred),
.tp_dealloc = (destructor)bpy_prop_deferred_dealloc,
.tp_repr = (reprfunc)bpy_prop_deferred_repr,
.tp_call = (ternaryfunc)bpy_prop_deferred_call,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_doc = bpy_prop_deferred_doc,
.tp_traverse = (traverseproc)bpy_prop_deferred_traverse,
.tp_clear = (inquiry)bpy_prop_deferred_clear,
.tp_getset = bpy_prop_deferred_getset,
};
static PyObject *bpy_prop_deferred_data_CreatePyObject(PyObject *fn, PyObject *kw)
{
BPy_PropDeferred *self = PyObject_GC_New(BPy_PropDeferred, &bpy_prop_deferred_Type);
self->fn = fn;
if (kw == NULL) {
kw = PyDict_New();
}
else {
Py_INCREF(kw);
}
self->kw = kw;
PyObject_GC_Track(self);
return (PyObject *)self;
}
/** \} */
/* PyObject's */
static PyObject *pymeth_BoolProperty = NULL;
static PyObject *pymeth_BoolVectorProperty = NULL;
@@ -248,27 +368,6 @@ static void bpy_prop_assign_flag_override(PropertyRNA *prop, const int flag_over
RNA_def_property_override_flag(prop, flag_override);
}
/* operators and classes use this so it can store the args given but defer
* running it until the operator runs where these values are used to setup
* the default args for that operator instance */
static PyObject *bpy_prop_deferred_return(PyObject *func, PyObject *kw)
{
PyObject *ret = PyTuple_New(2);
PyTuple_SET_ITEM(ret, 0, func);
Py_INCREF(func);
if (kw == NULL) {
kw = PyDict_New();
}
else {
Py_INCREF(kw);
}
PyTuple_SET_ITEM(ret, 1, kw);
return ret;
}
/* callbacks */
static void bpy_prop_update_cb(struct bContext *C,
struct PointerRNA *ptr,
@@ -1152,7 +1251,7 @@ static void bpy_prop_string_get_cb(struct PointerRNA *ptr, struct PropertyRNA *p
}
else {
Py_ssize_t length;
const char *buffer = _PyUnicode_AsStringAndSize(ret, &length);
const char *buffer = PyUnicode_AsUTF8AndSize(ret, &length);
memcpy(value, buffer, length + 1);
Py_DECREF(ret);
}
@@ -1213,7 +1312,7 @@ static int bpy_prop_string_length_cb(struct PointerRNA *ptr, struct PropertyRNA
}
else {
Py_ssize_t length_ssize_t = 0;
_PyUnicode_AsStringAndSize(ret, &length_ssize_t);
PyUnicode_AsUTF8AndSize(ret, &length_ssize_t);
length = length_ssize_t;
Py_DECREF(ret);
}
@@ -1488,7 +1587,7 @@ static const EnumPropertyItem *enum_items_from_py(PyObject *seq_fast,
else {
if (def) {
if (!py_long_as_int(def, &def_int_cmp)) {
def_string_cmp = _PyUnicode_AsString(def);
def_string_cmp = PyUnicode_AsUTF8(def);
if (def_string_cmp == NULL) {
PyErr_Format(PyExc_TypeError,
"EnumProperty(...): default option must be a 'str' or 'int' "
@@ -1517,14 +1616,13 @@ static const EnumPropertyItem *enum_items_from_py(PyObject *seq_fast,
if ((PyTuple_CheckExact(item)) && (item_size = PyTuple_GET_SIZE(item)) &&
(item_size >= 3 && item_size <= 5) &&
(tmp.identifier = _PyUnicode_AsStringAndSize(PyTuple_GET_ITEM(item, 0), &id_str_size)) &&
(tmp.name = _PyUnicode_AsStringAndSize(PyTuple_GET_ITEM(item, 1), &name_str_size)) &&
(tmp.description = _PyUnicode_AsStringAndSize(PyTuple_GET_ITEM(item, 2),
&desc_str_size)) &&
(tmp.identifier = PyUnicode_AsUTF8AndSize(PyTuple_GET_ITEM(item, 0), &id_str_size)) &&
(tmp.name = PyUnicode_AsUTF8AndSize(PyTuple_GET_ITEM(item, 1), &name_str_size)) &&
(tmp.description = PyUnicode_AsUTF8AndSize(PyTuple_GET_ITEM(item, 2), &desc_str_size)) &&
/* TODO, number isn't ensured to be unique from the script author */
(item_size != 4 || py_long_as_int(PyTuple_GET_ITEM(item, 3), &tmp.value)) &&
(item_size != 5 || ((py_long_as_int(PyTuple_GET_ITEM(item, 3), &tmp.icon) ||
(tmp_icon = _PyUnicode_AsString(PyTuple_GET_ITEM(item, 3)))) &&
(tmp_icon = PyUnicode_AsUTF8(PyTuple_GET_ITEM(item, 3)))) &&
py_long_as_int(PyTuple_GET_ITEM(item, 4), &tmp.value)))) {
if (is_enum_flag) {
if (item_size < 4) {
@@ -1998,7 +2096,7 @@ static void bpy_prop_callback_assign_enum(struct PropertyRNA *prop,
if (PyErr_Occurred()) { \
return NULL; \
} \
return bpy_prop_deferred_return(pymeth_##_func, kw); \
return bpy_prop_deferred_data_CreatePyObject(pymeth_##_func, kw); \
} \
(void)0
@@ -3301,7 +3399,7 @@ StructRNA *pointer_type_from_py(PyObject *value, const char *error_prefix)
if (!srna) {
if (PyErr_Occurred()) {
PyObject *msg = PyC_ExceptionBuffer();
const char *msg_char = _PyUnicode_AsString(msg);
const char *msg_char = PyUnicode_AsUTF8(msg);
PyErr_Format(
PyExc_TypeError, "%.200s expected an RNA type, failed with: %s", error_prefix, msg_char);
Py_DECREF(msg);
@@ -3669,5 +3767,10 @@ PyObject *BPY_rna_props(void)
ASSIGN_STATIC(CollectionProperty);
ASSIGN_STATIC(RemoveProperty);
if (PyType_Ready(&bpy_prop_deferred_Type) < 0) {
return NULL;
}
PyModule_AddType(submodule, &bpy_prop_deferred_Type);
return submodule;
}

View File

@@ -30,6 +30,16 @@ PyObject *BPy_PointerProperty(PyObject *self, PyObject *args, PyObject *kw);
PyObject *BPy_CollectionProperty(PyObject *self, PyObject *args, PyObject *kw);
StructRNA *pointer_type_from_py(PyObject *value, const char *error_prefix);
typedef struct {
PyObject_HEAD
/* This isn't GC tracked, it's a function from `bpy.props` so it's not going away. */
void *fn;
PyObject *kw;
} BPy_PropDeferred;
extern PyTypeObject bpy_prop_deferred_Type;
#define BPy_PropDeferred_CheckTypeExact(v) (Py_TYPE(v) == &bpy_prop_deferred_Type)
#define PYRNA_STACK_ARRAY RNA_STACK_ARRAY
#ifdef __cplusplus

View File

@@ -80,6 +80,15 @@
#define USE_MATHUTILS
#define USE_STRING_COERCE
/**
* This _must_ be enabled to support Python 3.10's postponed annotations,
* `from __future__ import annotations`.
*
* This has the disadvantage of evaluating strings at run-time, in the future we might be able to
* reinstate the older, more efficient logic using descriptors, see: pep-0649
*/
#define USE_POSTPONED_ANNOTATIONS
/* Unfortunately Python needs to hold a global reference to the context.
* If we remove this is means `bpy.context` won't be usable from some parts of the code:
* `bpy.app.handler` callbacks for example.
@@ -90,6 +99,9 @@ BPy_StructRNA *bpy_context_module = NULL; /* for fast access */
static PyObject *pyrna_struct_Subtype(PointerRNA *ptr);
static PyObject *pyrna_prop_collection_values(BPy_PropertyRNA *self);
static PyObject *pyrna_register_class(PyObject *self, PyObject *py_class);
static PyObject *pyrna_unregister_class(PyObject *self, PyObject *py_class);
#define BPY_DOC_ID_PROP_TYPE_NOTE \
" .. note::\n" \
"\n" \
@@ -323,7 +335,7 @@ static bool rna_id_write_error(PointerRNA *ptr, PyObject *key)
const char *idtype = BKE_idtype_idcode_to_name(idcode);
const char *pyname;
if (key && PyUnicode_Check(key)) {
pyname = _PyUnicode_AsString(key);
pyname = PyUnicode_AsUTF8(key);
}
else {
pyname = "<UNKNOWN>";
@@ -1252,7 +1264,7 @@ static const char *pyrna_enum_as_string(PointerRNA *ptr, PropertyRNA *prop)
static int pyrna_string_to_enum(
PyObject *item, PointerRNA *ptr, PropertyRNA *prop, int *r_value, const char *error_prefix)
{
const char *param = _PyUnicode_AsString(item);
const char *param = PyUnicode_AsUTF8(item);
if (param == NULL) {
PyErr_Format(PyExc_TypeError,
@@ -1299,7 +1311,7 @@ BLI_bitmap *pyrna_set_to_enum_bitmap(const EnumPropertyItem *items,
BLI_bitmap *bitmap = BLI_BITMAP_NEW(bitmap_size, __func__);
while (_PySet_NextEntry(value, &pos, &key, &hash)) {
const char *param = _PyUnicode_AsString(key);
const char *param = PyUnicode_AsUTF8(key);
if (param == NULL) {
PyErr_Format(PyExc_TypeError,
"%.200s expected a string, not %.200s",
@@ -1364,7 +1376,7 @@ int pyrna_set_to_enum_bitfield(const EnumPropertyItem *items,
*r_value = 0;
while (_PySet_NextEntry(value, &pos, &key, &hash)) {
const char *param = _PyUnicode_AsString(key);
const char *param = PyUnicode_AsUTF8(key);
if (param == NULL) {
PyErr_Format(PyExc_TypeError,
@@ -1662,7 +1674,7 @@ int pyrna_pydict_to_props(PointerRNA *ptr,
Py_ssize_t pos = 0;
while (PyDict_Next(kw, &pos, &key, &value)) {
arg_name = _PyUnicode_AsString(key);
arg_name = PyUnicode_AsUTF8(key);
if (RNA_struct_find_property(ptr, arg_name) == NULL) {
break;
}
@@ -1871,10 +1883,10 @@ static int pyrna_py_to_prop(
param = PyC_UnicodeAsByte(value, &value_coerce);
}
else {
param = _PyUnicode_AsString(value);
param = PyUnicode_AsUTF8(value);
}
#else /* USE_STRING_COERCE */
param = _PyUnicode_AsString(value);
param = PyUnicode_AsUTF8(value);
#endif /* USE_STRING_COERCE */
if (param == NULL) {
@@ -2186,7 +2198,7 @@ static int pyrna_py_to_prop(
if (pyrna_pydict_to_props(
&itemptr, item, true, "Converting a Python list to an RNA collection") == -1) {
PyObject *msg = PyC_ExceptionBuffer();
const char *msg_char = _PyUnicode_AsString(msg);
const char *msg_char = PyUnicode_AsUTF8(msg);
PyErr_Format(PyExc_TypeError,
"%.200s %.200s.%.200s error converting a member of a collection "
@@ -2490,7 +2502,7 @@ static int pyrna_prop_collection_subscript_str_lib_pair_ptr(BPy_PropertyRNA *sel
RNA_struct_identifier(self->ptr.type));
return -1;
}
if ((keyname = _PyUnicode_AsString(PyTuple_GET_ITEM(key, 0))) == NULL) {
if ((keyname = PyUnicode_AsUTF8(PyTuple_GET_ITEM(key, 0))) == NULL) {
PyErr_Format(PyExc_KeyError,
"%s: id must be a string, not %.200s",
err_prefix,
@@ -2507,7 +2519,7 @@ static int pyrna_prop_collection_subscript_str_lib_pair_ptr(BPy_PropertyRNA *sel
}
else if (PyUnicode_Check(keylib)) {
Main *bmain = self->ptr.data;
const char *keylib_str = _PyUnicode_AsString(keylib);
const char *keylib_str = PyUnicode_AsUTF8(keylib);
lib = BLI_findstring(&bmain->libraries, keylib_str, offsetof(Library, filepath));
if (lib == NULL) {
if (err_not_found) {
@@ -2711,7 +2723,7 @@ static PyObject *pyrna_prop_collection_subscript(BPy_PropertyRNA *self, PyObject
PYRNA_PROP_CHECK_OBJ(self);
if (PyUnicode_Check(key)) {
return pyrna_prop_collection_subscript_str(self, _PyUnicode_AsString(key));
return pyrna_prop_collection_subscript_str(self, PyUnicode_AsUTF8(key));
}
if (PyIndex_Check(key)) {
const Py_ssize_t i = PyNumber_AsSsize_t(key, PyExc_IndexError);
@@ -2838,7 +2850,7 @@ static int pyrna_prop_collection_ass_subscript(BPy_PropertyRNA *self,
#if 0
if (PyUnicode_Check(key)) {
return pyrna_prop_collection_subscript_str(self, _PyUnicode_AsString(key));
return pyrna_prop_collection_subscript_str(self, PyUnicode_AsUTF8(key));
}
else
#endif
@@ -2910,7 +2922,7 @@ static PyObject *pyrna_prop_array_subscript(BPy_PropertyArrayRNA *self, PyObject
#if 0
if (PyUnicode_Check(key)) {
return pyrna_prop_array_subscript_str(self, _PyUnicode_AsString(key));
return pyrna_prop_array_subscript_str(self, PyUnicode_AsUTF8(key));
}
else
#endif
@@ -3359,7 +3371,7 @@ static int pyrna_prop_collection_contains(BPy_PropertyRNA *self, PyObject *key)
}
/* Key in dict style check. */
const char *keyname = _PyUnicode_AsString(key);
const char *keyname = PyUnicode_AsUTF8(key);
if (keyname == NULL) {
PyErr_SetString(PyExc_TypeError,
@@ -3377,7 +3389,7 @@ static int pyrna_prop_collection_contains(BPy_PropertyRNA *self, PyObject *key)
static int pyrna_struct_contains(BPy_StructRNA *self, PyObject *value)
{
IDProperty *group;
const char *name = _PyUnicode_AsString(value);
const char *name = PyUnicode_AsUTF8(value);
PYRNA_STRUCT_CHECK_INT(self);
@@ -3447,7 +3459,7 @@ static PyObject *pyrna_struct_subscript(BPy_StructRNA *self, PyObject *key)
{
/* Mostly copied from BPy_IDGroup_Map_GetItem. */
IDProperty *group, *idprop;
const char *name = _PyUnicode_AsString(key);
const char *name = PyUnicode_AsUTF8(key);
PYRNA_STRUCT_CHECK_OBJ(self);
@@ -4231,7 +4243,7 @@ static PyObject *pyrna_struct_dir(BPy_StructRNA *self)
/* ---------------getattr-------------------------------------------- */
static PyObject *pyrna_struct_getattro(BPy_StructRNA *self, PyObject *pyname)
{
const char *name = _PyUnicode_AsString(pyname);
const char *name = PyUnicode_AsUTF8(pyname);
PyObject *ret;
PropertyRNA *prop;
FunctionRNA *func;
@@ -4353,12 +4365,6 @@ static int pyrna_struct_pydict_contains(PyObject *self, PyObject *pyname)
#endif
/* --------------- setattr------------------------------------------- */
static bool pyrna_is_deferred_prop(const PyObject *value)
{
return PyTuple_CheckExact(value) && PyTuple_GET_SIZE(value) == 2 &&
PyCFunction_Check(PyTuple_GET_ITEM(value, 0)) &&
PyDict_CheckExact(PyTuple_GET_ITEM(value, 1));
}
#if 0
static PyObject *pyrna_struct_meta_idprop_getattro(PyObject *cls, PyObject *attr)
@@ -4370,15 +4376,15 @@ static PyObject *pyrna_struct_meta_idprop_getattro(PyObject *cls, PyObject *attr
* >>> bpy.types.Scene.foo
* <bpy_struct, BoolProperty("foo")>
* ...rather than returning the deferred class register tuple
* as checked by pyrna_is_deferred_prop()
* as checked by BPy_PropDeferred_CheckTypeExact()
*
* Disable for now,
* this is faking internal behavior in a way that's too tricky to maintain well. */
# if 0
if ((ret == NULL) /* || pyrna_is_deferred_prop(ret) */ ) {
if ((ret == NULL) /* || BPy_PropDeferred_CheckTypeExact(ret) */ ) {
StructRNA *srna = srna_from_self(cls, "StructRNA.__getattr__");
if (srna) {
PropertyRNA *prop = RNA_struct_type_find_property(srna, _PyUnicode_AsString(attr));
PropertyRNA *prop = RNA_struct_type_find_property(srna, PyUnicode_AsUTF8(attr));
if (prop) {
PointerRNA tptr;
PyErr_Clear(); /* Clear error from tp_getattro. */
@@ -4396,8 +4402,8 @@ static PyObject *pyrna_struct_meta_idprop_getattro(PyObject *cls, PyObject *attr
static int pyrna_struct_meta_idprop_setattro(PyObject *cls, PyObject *attr, PyObject *value)
{
StructRNA *srna = srna_from_self(cls, "StructRNA.__setattr__");
const bool is_deferred_prop = (value && pyrna_is_deferred_prop(value));
const char *attr_str = _PyUnicode_AsString(attr);
const bool is_deferred_prop = (value && BPy_PropDeferred_CheckTypeExact(value));
const char *attr_str = PyUnicode_AsUTF8(attr);
if (srna && !pyrna_write_check() &&
(is_deferred_prop || RNA_struct_type_find_property(srna, attr_str))) {
@@ -4458,7 +4464,7 @@ static int pyrna_struct_meta_idprop_setattro(PyObject *cls, PyObject *attr, PyOb
static int pyrna_struct_setattro(BPy_StructRNA *self, PyObject *pyname, PyObject *value)
{
const char *name = _PyUnicode_AsString(pyname);
const char *name = PyUnicode_AsUTF8(pyname);
PropertyRNA *prop = NULL;
PYRNA_STRUCT_CHECK_INT(self);
@@ -4550,7 +4556,7 @@ static PyObject *pyrna_prop_array_getattro(BPy_PropertyRNA *self, PyObject *pyna
static PyObject *pyrna_prop_collection_getattro(BPy_PropertyRNA *self, PyObject *pyname)
{
const char *name = _PyUnicode_AsString(pyname);
const char *name = PyUnicode_AsUTF8(pyname);
if (name == NULL) {
PyErr_SetString(PyExc_AttributeError, "bpy_prop_collection: __getattr__ must be a string");
@@ -4618,7 +4624,7 @@ static PyObject *pyrna_prop_collection_getattro(BPy_PropertyRNA *self, PyObject
/* --------------- setattr------------------------------------------- */
static int pyrna_prop_collection_setattro(BPy_PropertyRNA *self, PyObject *pyname, PyObject *value)
{
const char *name = _PyUnicode_AsString(pyname);
const char *name = PyUnicode_AsUTF8(pyname);
PropertyRNA *prop;
PointerRNA r_ptr;
@@ -5015,7 +5021,7 @@ static PyObject *pyrna_prop_collection_get(BPy_PropertyRNA *self, PyObject *args
}
if (PyUnicode_Check(key_ob)) {
const char *key = _PyUnicode_AsString(key_ob);
const char *key = PyUnicode_AsUTF8(key_ob);
if (RNA_property_collection_lookup_string(&self->ptr, self->prop, key, &newptr)) {
return pyrna_struct_CreatePyObject(&newptr);
@@ -5050,7 +5056,7 @@ PyDoc_STRVAR(pyrna_prop_collection_find_doc,
static PyObject *pyrna_prop_collection_find(BPy_PropertyRNA *self, PyObject *key_ob)
{
Py_ssize_t key_len_ssize_t;
const char *key = _PyUnicode_AsStringAndSize(key_ob, &key_len_ssize_t);
const char *key = PyUnicode_AsUTF8AndSize(key_ob, &key_len_ssize_t);
const int key_len = (int)key_len_ssize_t; /* Compare with same type. */
char name[256], *nameptr;
@@ -6035,7 +6041,7 @@ static PyObject *small_dict_get_item_string(PyObject *dict, const char *key_look
while (PyDict_Next(dict, &pos, &key, &value)) {
if (PyUnicode_Check(key)) {
if (STREQ(key_lookup, _PyUnicode_AsString(key))) {
if (STREQ(key_lookup, PyUnicode_AsUTF8(key))) {
return value;
}
}
@@ -6189,7 +6195,7 @@ static PyObject *pyrna_func_call(BPy_FunctionRNA *self, PyObject *args, PyObject
#ifdef DEBUG_STRING_FREE
if (item) {
if (PyUnicode_Check(item)) {
PyList_APPEND(string_free_ls, PyUnicode_FromString(_PyUnicode_AsString(item)));
PyList_APPEND(string_free_ls, PyUnicode_FromString(PyUnicode_AsUTF8(item)));
}
}
#endif
@@ -6245,7 +6251,7 @@ static PyObject *pyrna_func_call(BPy_FunctionRNA *self, PyObject *args, PyObject
while (PyDict_Next(kw, &pos, &key, &value)) {
arg_name = _PyUnicode_AsString(key);
arg_name = PyUnicode_AsUTF8(key);
found = false;
if (arg_name == NULL) { /* Unlikely the argname is not a string, but ignore if it is. */
@@ -6404,11 +6410,7 @@ PyTypeObject pyrna_struct_meta_idprop_Type = {
0, /* tp_itemsize */
/* methods */
NULL, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
@@ -6460,7 +6462,7 @@ PyTypeObject pyrna_struct_meta_idprop_Type = {
#if defined(_MSC_VER)
NULL, /* defer assignment */
#else
&PyType_Type, /* struct _typeobject *tp_base; */
&PyType_Type, /* struct _typeobject *tp_base; */
#endif
NULL, /* PyObject *tp_dict; */
NULL, /* descrgetfunc tp_descr_get; */
@@ -6489,13 +6491,9 @@ PyTypeObject pyrna_struct_Type = {
0, /* tp_itemsize */
/* methods */
(destructor)pyrna_struct_dealloc, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
/* tp_compare */ /* DEPRECATED in Python 3.0! */
(reprfunc)pyrna_struct_repr, /* tp_repr */
@@ -6529,7 +6527,7 @@ PyTypeObject pyrna_struct_Type = {
/* delete references to contained objects */
(inquiry)pyrna_struct_clear, /* inquiry tp_clear; */
#else
NULL, /* traverseproc tp_traverse; */
NULL, /* traverseproc tp_traverse; */
/* delete references to contained objects */
NULL, /* inquiry tp_clear; */
@@ -6582,13 +6580,9 @@ PyTypeObject pyrna_prop_Type = {
0, /* tp_itemsize */
/* methods */
(destructor)pyrna_prop_dealloc, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
/* tp_compare */ /* DEPRECATED in Python 3.0! */
(reprfunc)pyrna_prop_repr, /* tp_repr */
@@ -6670,13 +6664,9 @@ PyTypeObject pyrna_prop_array_Type = {
0, /* tp_itemsize */
/* methods */
(destructor)pyrna_prop_array_dealloc, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
/* tp_compare */ /* DEPRECATED in Python 3.0! */
(reprfunc)pyrna_prop_array_repr, /* tp_repr */
@@ -6757,13 +6747,9 @@ PyTypeObject pyrna_prop_collection_Type = {
0, /* tp_itemsize */
/* methods */
(destructor)pyrna_prop_dealloc, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
/* tp_compare */ /* DEPRECATED in Python 3.0! */
NULL,
@@ -6847,13 +6833,9 @@ static PyTypeObject pyrna_prop_collection_idprop_Type = {
0, /* tp_itemsize */
/* methods */
(destructor)pyrna_prop_dealloc, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
/* tp_compare */ /* DEPRECATED in Python 3.0! */
NULL,
@@ -6937,11 +6919,7 @@ PyTypeObject pyrna_func_Type = {
0, /* tp_itemsize */
/* methods */
NULL, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
@@ -7037,13 +7015,9 @@ static PyTypeObject pyrna_prop_collection_iter_Type = {
0, /* tp_itemsize */
/* methods */
(destructor)pyrna_prop_collection_iter_dealloc, /* tp_dealloc */
# if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
# else
(printfunc)NULL, /* printfunc tp_print */
# endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL,
/* tp_compare */ /* DEPRECATED in Python 3.0! */
NULL,
@@ -7689,32 +7663,47 @@ PyObject *BPY_rna_doc(void)
}
#endif
/* pyrna_basetype_* - BPy_BaseTypeRNA is just a BPy_PropertyRNA struct with a different type
* the self->ptr and self->prop are always set to the "structs" collection */
/* ---------------getattr-------------------------------------------- */
static PyObject *pyrna_basetype_getattro(BPy_BaseTypeRNA *self, PyObject *pyname)
/* -------------------------------------------------------------------- */
/** \name RNA Types Module `bpy.types`
* \{ */
/**
* This could be a static variable as we only have one `bpy.types` module,
* it just keeps the data isolated to store in the module it's self.
*
* This data doesn't change one initialized.
*/
struct BPy_TypesModule_State {
/** `RNA_BlenderRNA`. */
PointerRNA ptr;
/** `RNA_BlenderRNA.structs`, exposed as `bpy.types` */
PropertyRNA *prop;
};
static PyObject *bpy_types_module_getattro(PyObject *self, PyObject *pyname)
{
struct BPy_TypesModule_State *state = PyModule_GetState(self);
PointerRNA newptr;
PyObject *ret;
const char *name = _PyUnicode_AsString(pyname);
const char *name = PyUnicode_AsUTF8(pyname);
if (name == NULL) {
PyErr_SetString(PyExc_AttributeError, "bpy.types: __getattr__ must be a string");
ret = NULL;
}
else if (RNA_property_collection_lookup_string(&self->ptr, self->prop, name, &newptr)) {
else if (RNA_property_collection_lookup_string(&state->ptr, state->prop, name, &newptr)) {
ret = pyrna_struct_Subtype(&newptr);
if (ret == NULL) {
PyErr_Format(PyExc_RuntimeError,
"bpy.types.%.200s subtype could not be generated, this is a bug!",
_PyUnicode_AsString(pyname));
PyUnicode_AsUTF8(pyname));
}
}
else {
#if 0
PyErr_Format(PyExc_AttributeError,
"bpy.types.%.200s RNA_Struct does not exist",
_PyUnicode_AsString(pyname));
PyUnicode_AsUTF8(pyname));
return NULL;
#endif
/* The error raised here will be displayed. */
@@ -7724,79 +7713,60 @@ static PyObject *pyrna_basetype_getattro(BPy_BaseTypeRNA *self, PyObject *pyname
return ret;
}
static PyObject *pyrna_basetype_dir(BPy_BaseTypeRNA *self);
static PyObject *pyrna_register_class(PyObject *self, PyObject *py_class);
static PyObject *pyrna_unregister_class(PyObject *self, PyObject *py_class);
static struct PyMethodDef pyrna_basetype_methods[] = {
{"__dir__", (PyCFunction)pyrna_basetype_dir, METH_NOARGS, ""},
{NULL, NULL, 0, NULL},
};
/* Used to call ..._keys() direct, but we need to filter out operator subclasses. */
#if 0
static PyObject *pyrna_basetype_dir(BPy_BaseTypeRNA *self)
{
PyObject *list;
# if 0
PyMethodDef *meth;
# endif
list = pyrna_prop_collection_keys(self); /* Like calling structs.keys(), avoids looping here. */
# if 0 /* For now only contains __dir__. */
for (meth = pyrna_basetype_methods; meth->ml_name; meth++) {
PyList_APPEND(list, PyUnicode_FromString(meth->ml_name));
}
# endif
return list;
}
#else
static PyObject *pyrna_basetype_dir(BPy_BaseTypeRNA *self)
static PyObject *bpy_types_module_dir(PyObject *self)
{
struct BPy_TypesModule_State *state = PyModule_GetState(self);
PyObject *ret = PyList_New(0);
RNA_PROP_BEGIN (&self->ptr, itemptr, self->prop) {
RNA_PROP_BEGIN (&state->ptr, itemptr, state->prop) {
StructRNA *srna = itemptr.data;
PyList_APPEND(ret, PyUnicode_FromString(RNA_struct_identifier(srna)));
}
RNA_PROP_END;
/* Include the modules `__dict__` for Python only types. */
PyObject *submodule_dict = PyModule_GetDict(self);
PyObject *key, *value;
Py_ssize_t pos = 0;
while (PyDict_Next(submodule_dict, &pos, &key, &value)) {
PyList_Append(ret, key);
}
return ret;
}
#endif
static struct PyMethodDef bpy_types_module_methods[] = {
{"__getattr__", (PyCFunction)bpy_types_module_getattro, METH_O, NULL},
{"__dir__", (PyCFunction)bpy_types_module_dir, METH_NOARGS, NULL},
{NULL, NULL, 0, NULL},
};
static PyTypeObject pyrna_basetype_Type = BLANK_PYTHON_TYPE;
PyDoc_STRVAR(bpy_types_module_doc, "Access to internal Blender types");
static struct PyModuleDef bpy_types_module_def = {
PyModuleDef_HEAD_INIT,
"bpy.types", /* m_name */
bpy_types_module_doc, /* m_doc */
sizeof(struct BPy_TypesModule_State), /* m_size */
bpy_types_module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
/**
* Accessed from Python as 'bpy.types'
*/
PyObject *BPY_rna_types(void)
{
BPy_BaseTypeRNA *self;
PyObject *submodule = PyModule_Create(&bpy_types_module_def);
struct BPy_TypesModule_State *state = PyModule_GetState(submodule);
if ((pyrna_basetype_Type.tp_flags & Py_TPFLAGS_READY) == 0) {
pyrna_basetype_Type.tp_name = "RNA_Types";
pyrna_basetype_Type.tp_basicsize = sizeof(BPy_BaseTypeRNA);
pyrna_basetype_Type.tp_getattro = (getattrofunc)pyrna_basetype_getattro;
pyrna_basetype_Type.tp_flags = Py_TPFLAGS_DEFAULT;
pyrna_basetype_Type.tp_methods = pyrna_basetype_methods;
if (PyType_Ready(&pyrna_basetype_Type) < 0) {
return NULL;
}
}
/* Static members for the base class. */
/* Add __name__ since help() expects it. */
PyDict_SetItem(pyrna_basetype_Type.tp_dict, bpy_intern_str___name__, bpy_intern_str_bpy_types);
RNA_blender_rna_pointer_create(&state->ptr);
state->prop = RNA_struct_find_property(&state->ptr, "structs");
/* Internal base types we have no other accessors for. */
{
PyTypeObject *pyrna_types[] = {
static PyTypeObject *pyrna_types[] = {
&pyrna_struct_meta_idprop_Type,
&pyrna_struct_Type,
&pyrna_prop_Type,
@@ -7805,23 +7775,17 @@ PyObject *BPY_rna_types(void)
&pyrna_func_Type,
};
PyObject *submodule_dict = PyModule_GetDict(submodule);
for (int i = 0; i < ARRAY_SIZE(pyrna_types); i += 1) {
PyDict_SetItemString(
pyrna_basetype_Type.tp_dict, pyrna_types[i]->tp_name, (PyObject *)pyrna_types[i]);
PyDict_SetItemString(submodule_dict, pyrna_types[i]->tp_name, (PyObject *)pyrna_types[i]);
}
}
self = (BPy_BaseTypeRNA *)PyObject_NEW(BPy_BaseTypeRNA, &pyrna_basetype_Type);
/* Avoid doing this lookup for every getattr. */
RNA_blender_rna_pointer_create(&self->ptr);
self->prop = RNA_struct_find_property(&self->ptr, "structs");
#ifdef USE_WEAKREFS
self->in_weakreflist = NULL;
#endif
return (PyObject *)self;
return submodule;
}
/** \} */
StructRNA *pyrna_struct_as_srna(PyObject *self, const bool parent, const char *error_prefix)
{
BPy_StructRNA *py_srna = NULL;
@@ -7912,83 +7876,133 @@ StructRNA *srna_from_self(PyObject *self, const char *error_prefix)
static int deferred_register_prop(StructRNA *srna, PyObject *key, PyObject *item)
{
if (!BPy_PropDeferred_CheckTypeExact(item)) {
/* No error, ignoring. */
return 0;
}
/* We only care about results from C which
* are for sure types, save some time with error */
if (pyrna_is_deferred_prop(item)) {
PyObject *py_func = ((BPy_PropDeferred *)item)->fn;
PyObject *py_kw = ((BPy_PropDeferred *)item)->kw;
PyObject *py_srna_cobject, *py_ret;
PyObject *py_func, *py_kw, *py_srna_cobject, *py_ret;
PyObject *args_fake;
if (PyArg_ParseTuple(item, "OO!", &py_func, &PyDict_Type, &py_kw)) {
PyObject *args_fake;
if (*PyUnicode_AsUTF8(key) == '_') {
PyErr_Format(PyExc_ValueError,
"bpy_struct \"%.200s\" registration error: "
"%.200s could not register because the property starts with an '_'\n",
RNA_struct_identifier(srna),
PyUnicode_AsUTF8(key));
return -1;
}
py_srna_cobject = PyCapsule_New(srna, NULL, NULL);
if (*_PyUnicode_AsString(key) == '_') {
PyErr_Format(PyExc_ValueError,
"bpy_struct \"%.200s\" registration error: "
"%.200s could not register because the property starts with an '_'\n",
RNA_struct_identifier(srna),
_PyUnicode_AsString(key));
return -1;
}
py_srna_cobject = PyCapsule_New(srna, NULL, NULL);
/* Not 100% nice :/, modifies the dict passed, should be ok. */
PyDict_SetItem(py_kw, bpy_intern_str_attr, key);
/* Not 100% nice :/, modifies the dict passed, should be ok. */
PyDict_SetItem(py_kw, bpy_intern_str_attr, key);
args_fake = PyTuple_New(1);
PyTuple_SET_ITEM(args_fake, 0, py_srna_cobject);
args_fake = PyTuple_New(1);
PyTuple_SET_ITEM(args_fake, 0, py_srna_cobject);
PyObject *type = PyDict_GetItemString(py_kw, "type");
StructRNA *type_srna = srna_from_self(type, "");
if (type_srna) {
if (!RNA_struct_idprops_datablock_allowed(srna) &&
(*(PyCFunctionWithKeywords)PyCFunction_GET_FUNCTION(py_func) == BPy_PointerProperty ||
*(PyCFunctionWithKeywords)PyCFunction_GET_FUNCTION(py_func) ==
BPy_CollectionProperty) &&
RNA_struct_idprops_contains_datablock(type_srna)) {
PyErr_Format(PyExc_ValueError,
"bpy_struct \"%.200s\" doesn't support datablock properties\n",
RNA_struct_identifier(srna));
return -1;
}
}
py_ret = PyObject_Call(py_func, args_fake, py_kw);
if (py_ret) {
Py_DECREF(py_ret);
Py_DECREF(args_fake); /* Free's py_srna_cobject too. */
}
else {
/* _must_ print before decreffing args_fake. */
PyErr_Print();
PyErr_Clear();
Py_DECREF(args_fake); /* Free's py_srna_cobject too. */
// PyC_LineSpit();
PyErr_Format(PyExc_ValueError,
"bpy_struct \"%.200s\" registration error: "
"%.200s could not register\n",
RNA_struct_identifier(srna),
_PyUnicode_AsString(key));
return -1;
}
}
else {
/* Since this is a class dict, ignore args that can't be passed. */
/* For testing only. */
#if 0
PyC_ObSpit("Why doesn't this work??", item);
PyErr_Print();
#endif
PyErr_Clear();
PyObject *type = PyDict_GetItemString(py_kw, "type");
StructRNA *type_srna = srna_from_self(type, "");
if (type_srna) {
if (!RNA_struct_idprops_datablock_allowed(srna) &&
(*(PyCFunctionWithKeywords)PyCFunction_GET_FUNCTION(py_func) == BPy_PointerProperty ||
*(PyCFunctionWithKeywords)PyCFunction_GET_FUNCTION(py_func) == BPy_CollectionProperty) &&
RNA_struct_idprops_contains_datablock(type_srna)) {
PyErr_Format(PyExc_ValueError,
"bpy_struct \"%.200s\" doesn't support datablock properties\n",
RNA_struct_identifier(srna));
return -1;
}
}
py_ret = PyObject_Call(py_func, args_fake, py_kw);
if (py_ret) {
Py_DECREF(py_ret);
Py_DECREF(args_fake); /* Free's py_srna_cobject too. */
}
else {
/* _must_ print before decreffing args_fake. */
PyErr_Print();
PyErr_Clear();
Py_DECREF(args_fake); /* Free's py_srna_cobject too. */
// PyC_LineSpit();
PyErr_Format(PyExc_ValueError,
"bpy_struct \"%.200s\" registration error: "
"%.200s could not register\n",
RNA_struct_identifier(srna),
PyUnicode_AsUTF8(key));
return -1;
}
return 0;
}
/**
* Extract `__annotations__` using `typing.get_type_hints` which handles the delayed evaluation.
*/
static int pyrna_deferred_register_class_from_type_hints(StructRNA *srna, PyTypeObject *py_class)
{
PyObject *annotations_dict = NULL;
/* `typing.get_type_hints(py_class)` */
{
PyObject *typing_mod = PyImport_ImportModuleLevel("typing", NULL, NULL, NULL, 0);
if (typing_mod != NULL) {
PyObject *get_type_hints_fn = PyObject_GetAttrString(typing_mod, "get_type_hints");
if (get_type_hints_fn != NULL) {
PyObject *args = PyTuple_New(1);
PyTuple_SET_ITEM(args, 0, (PyObject *)py_class);
Py_INCREF(py_class);
annotations_dict = PyObject_CallObject(get_type_hints_fn, args);
Py_DECREF(args);
Py_DECREF(get_type_hints_fn);
}
Py_DECREF(typing_mod);
}
}
int ret = 0;
if (annotations_dict != NULL) {
if (PyDict_CheckExact(annotations_dict)) {
PyObject *item, *key;
Py_ssize_t pos = 0;
while (PyDict_Next(annotations_dict, &pos, &key, &item)) {
ret = deferred_register_prop(srna, key, item);
if (ret != 0) {
break;
}
}
}
else {
/* Should never happen, an error wont have been raised, so raise one. */
PyErr_Format(PyExc_TypeError,
"typing.get_type_hints returned: %.200s, expected dict\n",
Py_TYPE(annotations_dict)->tp_name);
ret = -1;
}
Py_DECREF(annotations_dict);
}
else {
BLI_assert(PyErr_Occurred());
fprintf(stderr, "typing.get_type_hints failed with: %.200s\n", py_class->tp_name);
ret = -1;
}
return ret;
}
static int pyrna_deferred_register_props(StructRNA *srna, PyObject *class_dict)
{
PyObject *annotations_dict;
@@ -8009,31 +8023,6 @@ static int pyrna_deferred_register_props(StructRNA *srna, PyObject *class_dict)
}
}
if (ret == 0) {
/* This block can be removed once 2.8x is released and annotations are in use. */
bool has_warning = false;
while (PyDict_Next(class_dict, &pos, &key, &item)) {
if (pyrna_is_deferred_prop(item)) {
if (!has_warning) {
printf(
"Warning: class %.200s "
"contains a property which should be an annotation!\n",
RNA_struct_identifier(srna));
PyC_LineSpit();
has_warning = true;
}
printf(" assign as a type annotation: %.200s.%.200s\n",
RNA_struct_identifier(srna),
_PyUnicode_AsString(key));
}
ret = deferred_register_prop(srna, key, item);
if (ret != 0) {
break;
}
}
}
return ret;
}
@@ -8078,6 +8067,15 @@ int pyrna_deferred_register_class(StructRNA *srna, PyTypeObject *py_class)
return 0;
}
#ifdef USE_POSTPONED_ANNOTATIONS
const bool use_postponed_annotations = true;
#else
const bool use_postponed_annotations = false;
#endif
if (use_postponed_annotations) {
return pyrna_deferred_register_class_from_type_hints(srna, py_class);
}
return pyrna_deferred_register_class_recursive(srna, py_class);
}
@@ -9120,7 +9118,7 @@ static PyObject *pyrna_bl_owner_id_set(PyObject *UNUSED(self), PyObject *value)
name = NULL;
}
else if (PyUnicode_Check(value)) {
name = _PyUnicode_AsString(value);
name = PyUnicode_AsUTF8(value);
}
else {
PyErr_Format(PyExc_ValueError,

View File

@@ -177,9 +177,6 @@ typedef struct {
FunctionRNA *func;
} BPy_FunctionRNA;
/* cheap trick */
#define BPy_BaseTypeRNA BPy_PropertyRNA
StructRNA *srna_from_self(PyObject *self, const char *error_prefix);
StructRNA *pyrna_struct_as_srna(PyObject *self, const bool parent, const char *error_prefix);

View File

@@ -23,28 +23,24 @@
#include <Python.h>
#include "RNA_types.h"
#include "../generic/python_utildefines.h"
#include "BLI_utildefines.h"
#include "bpy_capi_utils.h"
#include "bpy_rna.h"
#include "bpy_rna_callback.h"
#include "DNA_screen_types.h"
#include "DNA_space_types.h"
#include "RNA_access.h"
#include "RNA_enum_types.h"
#include "BKE_context.h"
#include "BKE_screen.h"
#include "WM_api.h"
#include "ED_space_api.h"
#include "../generic/python_utildefines.h"
#include "BPY_extern.h" /* For public API. */
#include "bpy_capi_utils.h"
#include "bpy_rna.h"
#include "bpy_rna_callback.h" /* Own include. */
/* Use this to stop other capsules from being mis-used. */
static const char *rna_capsual_id = "RNA_HANDLE";
@@ -261,6 +257,12 @@ static eSpace_Type rna_Space_refine_reverse(StructRNA *srna)
return SPACE_EMPTY;
}
static void cb_rna_capsule_destructor(PyObject *capsule)
{
PyObject *args = PyCapsule_GetContext(capsule);
Py_DECREF(args);
}
PyObject *pyrna_callback_classmethod_add(PyObject *UNUSED(self), PyObject *args)
{
void *handle;
@@ -378,10 +380,14 @@ PyObject *pyrna_callback_classmethod_add(PyObject *UNUSED(self), PyObject *args)
return NULL;
}
/* Keep the 'args' reference as long as the callback exists.
* This reference is decremented in #BPY_callback_screen_free and #BPY_callback_wm_free. */
Py_INCREF(args);
PyObject *ret = PyCapsule_New((void *)handle, rna_capsual_id, NULL);
/* Store 'args' in context as well as the handler custom-data,
* because the handle may be freed by Blender (new file, new window... etc) */
/* Store 'args' in context as well for simple access. */
PyCapsule_SetDestructor(ret, cb_rna_capsule_destructor);
PyCapsule_SetContext(ret, args);
Py_INCREF(args);
@@ -412,7 +418,6 @@ PyObject *pyrna_callback_classmethod_remove(PyObject *UNUSED(self), PyObject *ar
"callback_remove(handler): NULL handler given, invalid or already removed");
return NULL;
}
PyObject *handle_args = PyCapsule_GetContext(py_handle);
if (srna == &RNA_WindowManager) {
if (!PyArg_ParseTuple(
@@ -466,11 +471,52 @@ PyObject *pyrna_callback_classmethod_remove(PyObject *UNUSED(self), PyObject *ar
return NULL;
}
/* The handle has been removed, so decrement its customdata. */
PyObject *handle_args = PyCapsule_GetContext(py_handle);
Py_DECREF(handle_args);
/* don't allow reuse */
if (capsule_clear) {
Py_DECREF(handle_args);
PyCapsule_Destructor destructor_fn = PyCapsule_GetDestructor(py_handle);
if (destructor_fn) {
destructor_fn(py_handle);
PyCapsule_SetDestructor(py_handle, NULL);
}
PyCapsule_SetName(py_handle, rna_capsual_id_invalid);
}
Py_RETURN_NONE;
}
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
static void cb_customdata_free(void *customdata)
{
PyObject *tuple = customdata;
bool use_gil = true; /* !PyC_IsInterpreterActive(); */
PyGILState_STATE gilstate;
if (use_gil) {
gilstate = PyGILState_Ensure();
}
Py_DECREF(tuple);
if (use_gil) {
PyGILState_Release(gilstate);
}
}
void BPY_callback_screen_free(struct ARegionType *art)
{
ED_region_draw_cb_remove_by_type(art, cb_region_draw, cb_customdata_free);
}
void BPY_callback_wm_free(struct wmWindowManager *wm)
{
WM_paint_cursor_remove_by_type(wm, cb_wm_cursor_draw, cb_customdata_free);
}
/** \} */

View File

@@ -304,7 +304,7 @@ static PyObject *bpy_batch_remove(PyObject *UNUSED(self), PyObject *args, PyObje
PyObject *ret = NULL;
static const char *_keywords[] = {"ids", NULL};
static _PyArg_Parser _parser = {"O:user_map", _keywords, 0};
static _PyArg_Parser _parser = {"O:batch_remove", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &ids)) {
return ret;
}
@@ -353,12 +353,15 @@ PyDoc_STRVAR(bpy_orphans_purge_doc,
"\n"
" Remove (delete) all IDs with no user.\n"
"\n"
" :return: The number of deleted IDs.\n"
"\n"
" WARNING: Considered experimental feature currently.\n");
static PyObject *bpy_orphans_purge(PyObject *UNUSED(self),
PyObject *UNUSED(args),
PyObject *UNUSED(kwds))
" :arg do_local_ids: Include unused local IDs in the deletion, defaults to True\n"
" :type do_local_ids: bool, optional\n"
" :arg do_linked_ids: Include unused linked IDs in the deletion, defaults to True\n"
" :type do_linked_ids: bool, optional\n"
" :arg do_recursive: Recursively check for unused IDs, ensuring no orphaned one "
"remain after a single run of that function, defaults to False\n"
" :type do_recursive: bool, optional\n"
" :return: The number of deleted IDs.\n");
static PyObject *bpy_orphans_purge(PyObject *UNUSED(self), PyObject *args, PyObject *kwds)
{
#if 0 /* If someone knows how to get a proper 'self' in that case... */
BPy_StructRNA *pyrna = (BPy_StructRNA *)self;
@@ -367,16 +370,26 @@ static PyObject *bpy_orphans_purge(PyObject *UNUSED(self),
Main *bmain = G_MAIN; /* XXX Ugly, but should work! */
#endif
ID *id;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (id->us == 0) {
id->tag |= LIB_TAG_DOIT;
}
else {
id->tag &= ~LIB_TAG_DOIT;
}
int num_tagged[INDEX_ID_MAX] = {0};
bool do_local_ids = true;
bool do_linked_ids = true;
bool do_recursive_cleanup = false;
static const char *_keywords[] = {"do_local_ids", "do_linked_ids", "do_recursive", NULL};
static _PyArg_Parser _parser = {"|$ppp:orphans_purge", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, &do_local_ids, &do_linked_ids, &do_recursive_cleanup)) {
return NULL;
}
/* Tag all IDs to delete. */
BKE_lib_query_unused_ids_tag(
bmain, LIB_TAG_DOIT, do_local_ids, do_linked_ids, do_recursive_cleanup, num_tagged);
if (num_tagged[INDEX_ID_NULL] == 0) {
return PyLong_FromSize_t(0);
}
FOREACH_MAIN_ID_END;
const size_t num_datablocks_deleted = BKE_id_multi_tagged_delete(bmain);
/* Force full redraw, mandatory to avoid crashes when running this from UI... */

View File

@@ -149,7 +149,7 @@ void python_script_error_jump(const char *filepath, int *lineno, int *offset)
PyObject *filename_py, *text_py;
if (parse_syntax_error(value, &message, &filename_py, lineno, offset, &text_py)) {
const char *filename = _PyUnicode_AsString(filename_py);
const char *filename = PyUnicode_AsUTF8(filename_py);
/* python adds a '/', prefix, so check for both */
if ((BLI_path_cmp(filename, filepath) == 0) ||
(ELEM(filename[0], '\\', '/') && BLI_path_cmp(filename + 1, filepath) == 0)) {

View File

@@ -784,11 +784,11 @@ PyMODINIT_FUNC PyInit_mathutils(void)
mod = PyModule_Create(&M_Mathutils_module_def);
/* each type has its own new() function */
PyModule_AddObject(mod, vector_Type.tp_name, (PyObject *)&vector_Type);
PyModule_AddObject(mod, matrix_Type.tp_name, (PyObject *)&matrix_Type);
PyModule_AddObject(mod, euler_Type.tp_name, (PyObject *)&euler_Type);
PyModule_AddObject(mod, quaternion_Type.tp_name, (PyObject *)&quaternion_Type);
PyModule_AddObject(mod, color_Type.tp_name, (PyObject *)&color_Type);
PyModule_AddType(mod, &vector_Type);
PyModule_AddType(mod, &matrix_Type);
PyModule_AddType(mod, &euler_Type);
PyModule_AddType(mod, &quaternion_Type);
PyModule_AddType(mod, &color_Type);
/* submodule */
PyModule_AddObject(mod, "geometry", (submodule = PyInit_mathutils_geometry()));

View File

@@ -670,7 +670,7 @@ static int Euler_order_set(EulerObject *self, PyObject *value, void *UNUSED(clos
return -1;
}
if (((order_str = _PyUnicode_AsString(value)) == NULL) ||
if (((order_str = PyUnicode_AsUTF8(value)) == NULL) ||
((order = euler_order_from_string(order_str, "euler.order")) == -1)) {
return -1;
}

View File

@@ -499,7 +499,7 @@ static PyObject *C_Matrix_Rotation(PyObject *cls, PyObject *args)
}
if (vec && PyUnicode_Check(vec)) {
axis = _PyUnicode_AsString((PyObject *)vec);
axis = PyUnicode_AsUTF8((PyObject *)vec);
if (axis == NULL || axis[0] == '\0' || axis[1] != '\0' || axis[0] < 'X' || axis[0] > 'Z') {
PyErr_SetString(PyExc_ValueError,
"Matrix.Rotation(): "
@@ -768,7 +768,7 @@ static PyObject *C_Matrix_OrthoProjection(PyObject *cls, PyObject *args)
if (PyUnicode_Check(axis)) { /* ortho projection onto cardinal plane */
Py_ssize_t plane_len;
const char *plane = _PyUnicode_AsStringAndSize(axis, &plane_len);
const char *plane = PyUnicode_AsUTF8AndSize(axis, &plane_len);
if (matSize == 2) {
if (plane_len == 1 && plane[0] == 'X') {
mat[0] = 1.0f;

View File

@@ -195,7 +195,7 @@ static PyObject *Quaternion_to_swing_twist(QuaternionObject *self, PyObject *axi
int axis;
if (axis_arg && PyUnicode_Check(axis_arg)) {
axis_str = _PyUnicode_AsString(axis_arg);
axis_str = PyUnicode_AsUTF8(axis_arg);
}
if (axis_str && axis_str[0] >= 'X' && axis_str[0] <= 'Z' && axis_str[1] == 0) {

View File

@@ -3042,15 +3042,11 @@ PyTypeObject vector_Type = {
/* Methods to implement standard operations */
(destructor)BaseMathObject_dealloc, /* destructor tp_dealloc; */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
(printfunc)NULL, /* printfunc tp_print */
#endif
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL, /* cmpfunc tp_compare; */
(reprfunc)Vector_repr, /* reprfunc tp_repr; */
0, /* tp_vectorcall_offset */
NULL, /* getattrfunc tp_getattr; */
NULL, /* setattrfunc tp_setattr; */
NULL, /* cmpfunc tp_compare; */
(reprfunc)Vector_repr, /* reprfunc tp_repr; */
/* Method suites for standard classes */
@@ -3065,7 +3061,7 @@ PyTypeObject vector_Type = {
#ifndef MATH_STANDALONE
(reprfunc)Vector_str, /* reprfunc tp_str; */
#else
NULL, /* reprfunc tp_str; */
NULL, /* reprfunc tp_str; */
#endif
NULL, /* getattrofunc tp_getattro; */
NULL, /* setattrofunc tp_setattro; */

View File

@@ -333,8 +333,8 @@ PyDoc_STRVAR(py_bvhtree_ray_cast_doc,
"\n"
" Cast a ray onto the mesh.\n"
"\n"
" :arg co: Start location of the ray in object space.\n"
" :type co: :class:`Vector`\n"
" :arg origin: Start location of the ray in object space.\n"
" :type origin: :class:`Vector`\n"
" :arg direction: Direction of the ray in object space.\n"
" :type direction: :class:`Vector`\n" PYBVH_FIND_GENERIC_DISTANCE_DOC
PYBVH_FIND_GENERIC_RETURN_DOC);
@@ -1331,7 +1331,7 @@ PyMODINIT_FUNC PyInit_mathutils_bvhtree(void)
return NULL;
}
PyModule_AddObject(m, "BVHTree", (PyObject *)&PyBVHTree_Type);
PyModule_AddType(m, &PyBVHTree_Type);
return m;
}

View File

@@ -459,7 +459,7 @@ PyMODINIT_FUNC PyInit_mathutils_kdtree(void)
if (PyType_Ready(&PyKDTree_Type)) {
return NULL;
}
PyModule_AddObject(m, "KDTree", (PyObject *)&PyKDTree_Type);
PyModule_AddType(m, &PyKDTree_Type);
return m;
}