PLY: import/export custom vertex attributes (#108948) #114320
|
@ -80,6 +80,7 @@ static int wm_ply_export_exec(bContext *C, wmOperator *op)
|
|||
export_params.export_uv = RNA_boolean_get(op->ptr, "export_uv");
|
||||
export_params.export_normals = RNA_boolean_get(op->ptr, "export_normals");
|
||||
export_params.vertex_colors = ePLYVertexColorMode(RNA_enum_get(op->ptr, "export_colors"));
|
||||
export_params.export_attributes = RNA_boolean_get(op->ptr, "export_attributes");
|
||||
export_params.export_triangulated_mesh = RNA_boolean_get(op->ptr, "export_triangulated_mesh");
|
||||
export_params.ascii_format = RNA_boolean_get(op->ptr, "ascii_format");
|
||||
|
||||
|
@ -120,6 +121,7 @@ static void ui_ply_export_settings(uiLayout *layout, PointerRNA *imfptr)
|
|||
uiItemR(sub, imfptr, "export_uv", UI_ITEM_NONE, IFACE_("UV Coordinates"), ICON_NONE);
|
||||
uiItemR(sub, imfptr, "export_normals", UI_ITEM_NONE, IFACE_("Vertex Normals"), ICON_NONE);
|
||||
uiItemR(sub, imfptr, "export_colors", UI_ITEM_NONE, IFACE_("Vertex Colors"), ICON_NONE);
|
||||
uiItemR(sub, imfptr, "export_attributes", UI_ITEM_NONE, IFACE_("Vertex Attributes"), ICON_NONE);
|
||||
uiItemR(sub,
|
||||
imfptr,
|
||||
"export_triangulated_mesh",
|
||||
|
@ -211,7 +213,11 @@ void WM_OT_ply_export(wmOperatorType *ot)
|
|||
PLY_VERTEX_COLOR_SRGB,
|
||||
"Export Vertex Colors",
|
||||
"Export vertex color attributes");
|
||||
|
||||
RNA_def_boolean(ot->srna,
|
||||
"export_attributes",
|
||||
true,
|
||||
"Export Vertex Attributes",
|
||||
"Export custom vertex attributes");
|
||||
RNA_def_boolean(ot->srna,
|
||||
"export_triangulated_mesh",
|
||||
false,
|
||||
|
@ -243,6 +249,7 @@ static int wm_ply_import_exec(bContext *C, wmOperator *op)
|
|||
params.use_scene_unit = RNA_boolean_get(op->ptr, "use_scene_unit");
|
||||
params.global_scale = RNA_float_get(op->ptr, "global_scale");
|
||||
params.merge_verts = RNA_boolean_get(op->ptr, "merge_verts");
|
||||
params.import_attributes = RNA_boolean_get(op->ptr, "import_attributes");
|
||||
params.vertex_colors = ePLYVertexColorMode(RNA_enum_get(op->ptr, "import_colors"));
|
||||
|
||||
int files_len = RNA_collection_length(op->ptr, "files");
|
||||
|
@ -316,8 +323,10 @@ void WM_OT_ply_import(wmOperatorType *ot)
|
|||
"import_colors",
|
||||
ply_vertex_colors_mode,
|
||||
PLY_VERTEX_COLOR_SRGB,
|
||||
"Import Vertex Colors",
|
||||
"Vertex Colors",
|
||||
"Import vertex color attributes");
|
||||
RNA_def_boolean(
|
||||
ot->srna, "import_attributes", true, "Vertex Attributes", "Import custom vertex attributes");
|
||||
|
||||
/* Only show .ply files by default. */
|
||||
prop = RNA_def_string(ot->srna, "filter_glob", "*.ply", 0, "Extension Filter", "");
|
||||
|
|
|
@ -43,6 +43,7 @@ struct PLYExportParams {
|
|||
bool export_uv;
|
||||
bool export_normals;
|
||||
ePLYVertexColorMode vertex_colors;
|
||||
bool export_attributes;
|
||||
bool export_triangulated_mesh;
|
||||
};
|
||||
|
||||
|
@ -54,6 +55,7 @@ struct PLYImportParams {
|
|||
bool use_scene_unit;
|
||||
float global_scale;
|
||||
ePLYVertexColorMode vertex_colors;
|
||||
bool import_attributes;
|
||||
bool merge_verts;
|
||||
};
|
||||
|
||||
|
|
|
@ -34,6 +34,10 @@ void write_vertices(FileBuffer &buffer, const PlyData &ply_data)
|
|||
buffer.write_UV(ply_data.uv_coordinates[i].x, ply_data.uv_coordinates[i].y);
|
||||
}
|
||||
|
||||
for (const PlyCustomAttribute &attr : ply_data.vertex_custom_attr) {
|
||||
buffer.write_data(attr.data[i]);
|
||||
}
|
||||
|
||||
buffer.write_vertex_end();
|
||||
}
|
||||
buffer.write_to_file();
|
||||
|
|
|
@ -50,6 +50,10 @@ void write_header(FileBuffer &buffer,
|
|||
buffer.write_header_scalar_property("float", "t");
|
||||
}
|
||||
|
||||
for (const PlyCustomAttribute &attr : ply_data.vertex_custom_attr) {
|
||||
buffer.write_header_scalar_property("float", attr.name);
|
||||
}
|
||||
|
||||
if (!ply_data.face_sizes.is_empty()) {
|
||||
buffer.write_header_element("face", int(ply_data.face_sizes.size()));
|
||||
buffer.write_header_list_property("uchar", "uint", "vertex_indices");
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "BLI_hash.hh"
|
||||
#include "BLI_math_color.hh"
|
||||
#include "BLI_math_matrix.h"
|
||||
#include "BLI_math_quaternion.hh"
|
||||
#include "BLI_math_rotation.h"
|
||||
#include "BLI_math_vector.h"
|
||||
#include "BLI_vector.hh"
|
||||
|
@ -147,6 +148,166 @@ static void generate_vertex_map(const Mesh *mesh,
|
|||
}
|
||||
}
|
||||
|
||||
static void load_custom_attributes(const Mesh *mesh, Vector<PlyCustomAttribute> &r_attributes)
|
||||
{
|
||||
const bke::AttributeAccessor attributes = mesh->attributes();
|
||||
const StringRef color_name = mesh->active_color_attribute;
|
||||
const StringRef uv_name = CustomData_get_active_layer_name(&mesh->loop_data, CD_PROP_FLOAT2);
|
||||
|
||||
attributes.for_all([&](const bke::AttributeIDRef &attribute_id,
|
||||
const bke::AttributeMetaData &meta_data) {
|
||||
/* Skip internal, standard and non-vertex domain attributes. */
|
||||
if (meta_data.domain != ATTR_DOMAIN_POINT || attribute_id.name()[0] == '.' ||
|
||||
attribute_id.is_anonymous() || ELEM(attribute_id.name(), "position", color_name, uv_name))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const GVArraySpan attribute = *mesh->attributes().lookup(
|
||||
aras_p marked this conversation as resolved
|
||||
attribute_id, meta_data.domain, meta_data.data_type);
|
||||
const int64_t size = attribute.size();
|
||||
aras_p marked this conversation as resolved
Outdated
Hans Goudey
commented
This check could happen once at the beginning of the function This check could happen once at the beginning of the function
|
||||
if (size == 0) {
|
||||
return true;
|
||||
}
|
||||
switch (meta_data.data_type) {
|
||||
case CD_PROP_FLOAT: {
|
||||
PlyCustomAttribute attr(attribute_id.name(), size);
|
||||
auto typed = attribute.typed<float>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
attr.data[i] = typed[i];
|
||||
}
|
||||
r_attributes.append(attr);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_INT8: {
|
||||
PlyCustomAttribute attr(attribute_id.name(), size);
|
||||
auto typed = attribute.typed<int8_t>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
attr.data[i] = typed[i];
|
||||
}
|
||||
r_attributes.append(attr);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_INT32: {
|
||||
PlyCustomAttribute attr(attribute_id.name(), size);
|
||||
auto typed = attribute.typed<int32_t>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
attr.data[i] = typed[i];
|
||||
}
|
||||
r_attributes.append(attr);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_INT32_2D: {
|
||||
PlyCustomAttribute attr_x(attribute_id.name() + "_x", size);
|
||||
PlyCustomAttribute attr_y(attribute_id.name() + "_y", size);
|
||||
auto typed = attribute.typed<int2>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
attr_x.data[i] = typed[i].x;
|
||||
attr_y.data[i] = typed[i].y;
|
||||
}
|
||||
aras_p marked this conversation as resolved
Hans Goudey
commented
Style guide mentions that Style guide mentions that `break` goes inside the braces for each case. Also worth double checking proper clang format is applied after too
Aras Pranckevicius
commented
That's the curious bit! I had them in separate lines, but That's the curious bit! I had them in separate lines, but `make format` put them in there. Maybe my clang is too old or smth? I'll check.
Hans Goudey
commented
You sure you didn't have them after (outside of) the braces? You sure you didn't have them after (outside of) the braces?
Aras Pranckevicius
commented
Aaaah! Ok. I should learn to read properly :) Aaaah! Ok. I should learn to read properly :)
|
||||
r_attributes.append(attr_x);
|
||||
r_attributes.append(attr_y);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_FLOAT2: {
|
||||
PlyCustomAttribute attr_x(attribute_id.name() + "_x", size);
|
||||
PlyCustomAttribute attr_y(attribute_id.name() + "_y", size);
|
||||
auto typed = attribute.typed<float2>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
attr_x.data[i] = typed[i].x;
|
||||
attr_y.data[i] = typed[i].y;
|
||||
}
|
||||
r_attributes.append(attr_x);
|
||||
r_attributes.append(attr_y);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_FLOAT3: {
|
||||
PlyCustomAttribute attr_x(attribute_id.name() + "_x", size);
|
||||
PlyCustomAttribute attr_y(attribute_id.name() + "_y", size);
|
||||
PlyCustomAttribute attr_z(attribute_id.name() + "_z", size);
|
||||
aras_p marked this conversation as resolved
Outdated
Hans Goudey
commented
Giving Giving `PlyCustomAttribute` a name and size constructor (or just using the default constructor?) would remove some of these extra boilerplate lines
|
||||
auto typed = attribute.typed<float3>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
attr_x.data[i] = typed[i].x;
|
||||
attr_y.data[i] = typed[i].y;
|
||||
attr_z.data[i] = typed[i].z;
|
||||
}
|
||||
r_attributes.append(attr_x);
|
||||
r_attributes.append(attr_y);
|
||||
r_attributes.append(attr_z);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_BYTE_COLOR: {
|
||||
PlyCustomAttribute attr_r(attribute_id.name() + "_r", size);
|
||||
PlyCustomAttribute attr_g(attribute_id.name() + "_g", size);
|
||||
PlyCustomAttribute attr_b(attribute_id.name() + "_b", size);
|
||||
PlyCustomAttribute attr_a(attribute_id.name() + "_a", size);
|
||||
auto typed = attribute.typed<ColorGeometry4b>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
ColorGeometry4f col = typed[i].decode();
|
||||
attr_r.data[i] = col.r;
|
||||
attr_g.data[i] = col.g;
|
||||
attr_b.data[i] = col.b;
|
||||
attr_a.data[i] = col.a;
|
||||
}
|
||||
r_attributes.append(attr_r);
|
||||
r_attributes.append(attr_g);
|
||||
r_attributes.append(attr_b);
|
||||
r_attributes.append(attr_a);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_COLOR: {
|
||||
PlyCustomAttribute attr_r(attribute_id.name() + "_r", size);
|
||||
PlyCustomAttribute attr_g(attribute_id.name() + "_g", size);
|
||||
PlyCustomAttribute attr_b(attribute_id.name() + "_b", size);
|
||||
PlyCustomAttribute attr_a(attribute_id.name() + "_a", size);
|
||||
auto typed = attribute.typed<ColorGeometry4f>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
ColorGeometry4f col = typed[i];
|
||||
attr_r.data[i] = col.r;
|
||||
attr_g.data[i] = col.g;
|
||||
attr_b.data[i] = col.b;
|
||||
attr_a.data[i] = col.a;
|
||||
}
|
||||
r_attributes.append(attr_r);
|
||||
r_attributes.append(attr_g);
|
||||
r_attributes.append(attr_b);
|
||||
r_attributes.append(attr_a);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_BOOL: {
|
||||
PlyCustomAttribute attr(attribute_id.name(), size);
|
||||
auto typed = attribute.typed<bool>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
attr.data[i] = typed[i] ? 1.0f : 0.0f;
|
||||
}
|
||||
r_attributes.append(attr);
|
||||
break;
|
||||
}
|
||||
case CD_PROP_QUATERNION: {
|
||||
PlyCustomAttribute attr_x(attribute_id.name() + "_x", size);
|
||||
PlyCustomAttribute attr_y(attribute_id.name() + "_y", size);
|
||||
PlyCustomAttribute attr_z(attribute_id.name() + "_z", size);
|
||||
PlyCustomAttribute attr_w(attribute_id.name() + "_w", size);
|
||||
auto typed = attribute.typed<math::Quaternion>();
|
||||
for (const int64_t i : typed.index_range()) {
|
||||
attr_x.data[i] = typed[i].x;
|
||||
attr_y.data[i] = typed[i].y;
|
||||
attr_z.data[i] = typed[i].z;
|
||||
attr_w.data[i] = typed[i].w;
|
||||
}
|
||||
r_attributes.append(attr_x);
|
||||
r_attributes.append(attr_y);
|
||||
r_attributes.append(attr_z);
|
||||
r_attributes.append(attr_w);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
BLI_assert_msg(0, "Unsupported attribute type for PLY export.");
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
void load_plydata(PlyData &plyData, Depsgraph *depsgraph, const PLYExportParams &export_params)
|
||||
{
|
||||
DEGObjectIterSettings deg_iter_settings{};
|
||||
|
@ -267,6 +428,11 @@ void load_plydata(PlyData &plyData, Depsgraph *depsgraph, const PLYExportParams
|
|||
}
|
||||
}
|
||||
|
||||
/* Custom attributes */
|
||||
if (export_params.export_attributes) {
|
||||
load_custom_attributes(mesh, plyData.vertex_custom_attr);
|
||||
}
|
||||
|
||||
/* Loose edges */
|
||||
const bke::LooseEdgeCache &loose_edges = mesh->loose_edges();
|
||||
if (loose_edges.count > 0) {
|
||||
|
|
|
@ -50,6 +50,8 @@ class FileBuffer : private NonMovable {
|
|||
|
||||
virtual void write_UV(float u, float v) = 0;
|
||||
|
||||
virtual void write_data(float v) = 0;
|
||||
|
||||
virtual void write_vertex_normal(float nx, float ny, float nz) = 0;
|
||||
|
||||
virtual void write_vertex_color(uchar r, uchar g, uchar b, uchar a) = 0;
|
||||
|
|
|
@ -20,6 +20,11 @@ void FileBufferAscii::write_UV(float u, float v)
|
|||
write_fstring(" {} {}", u, v);
|
||||
}
|
||||
|
||||
void FileBufferAscii::write_data(float v)
|
||||
{
|
||||
write_fstring(" {}", v);
|
||||
}
|
||||
|
||||
void FileBufferAscii::write_vertex_normal(float nx, float ny, float nz)
|
||||
{
|
||||
write_fstring(" {} {} {}", nx, ny, nz);
|
||||
|
|
|
@ -19,6 +19,8 @@ class FileBufferAscii : public FileBuffer {
|
|||
|
||||
void write_UV(float u, float v) override;
|
||||
|
||||
void write_data(float v) override;
|
||||
|
||||
void write_vertex_normal(float nx, float ny, float nz) override;
|
||||
|
||||
void write_vertex_color(uchar r, uchar g, uchar b, uchar a) override;
|
||||
|
|
|
@ -29,6 +29,14 @@ void FileBufferBinary::write_UV(float u, float v)
|
|||
write_bytes(span);
|
||||
}
|
||||
|
||||
void FileBufferBinary::write_data(float v)
|
||||
{
|
||||
char *bits = reinterpret_cast<char *>(&v);
|
||||
Span<char> span(bits, sizeof(float));
|
||||
|
||||
write_bytes(span);
|
||||
}
|
||||
|
||||
void FileBufferBinary::write_vertex_normal(float nx, float ny, float nz)
|
||||
{
|
||||
float3 vector(nx, ny, nz);
|
||||
|
|
|
@ -19,6 +19,8 @@ class FileBufferBinary : public FileBuffer {
|
|||
|
||||
void write_UV(float u, float v) override;
|
||||
|
||||
void write_data(float v) override;
|
||||
|
||||
void write_vertex_normal(float nx, float ny, float nz) override;
|
||||
|
||||
void write_vertex_color(uchar r, uchar g, uchar b, uchar a) override;
|
||||
|
|
|
@ -251,6 +251,19 @@ static const char *load_vertex_element(PlyReadBuffer &file,
|
|||
return "Vertex positions are not present in the file";
|
||||
}
|
||||
|
||||
Vector<int64_t> custom_attr_indices;
|
||||
for (const int64_t prop_idx : element.properties.index_range()) {
|
||||
aras_p marked this conversation as resolved
Outdated
Hans Goudey
commented
```
for (const int64_t prop_idx : element.properties.index_range()) {
```
|
||||
const PlyProperty &prop = element.properties[prop_idx];
|
||||
bool is_standard = ELEM(
|
||||
prop.name, "x", "y", "z", "nx", "ny", "nz", "red", "green", "blue", "alpha", "s", "t");
|
||||
if (is_standard)
|
||||
continue;
|
||||
|
||||
custom_attr_indices.append(prop_idx);
|
||||
PlyCustomAttribute attr(prop.name, element.count);
|
||||
data->vertex_custom_attr.append(attr);
|
||||
}
|
||||
|
||||
data->vertices.reserve(element.count);
|
||||
if (has_color) {
|
||||
data->vertex_colors.reserve(element.count);
|
||||
|
@ -329,6 +342,12 @@ static const char *load_vertex_element(PlyReadBuffer &file,
|
|||
uvmap.y = value_vec[uv_index.y];
|
||||
data->uv_coordinates.append(uvmap);
|
||||
}
|
||||
|
||||
/* Custom attributes */
|
||||
for (const int64_t ci : custom_attr_indices.index_range()) {
|
||||
float value = value_vec[custom_attr_indices[ci]];
|
||||
aras_p marked this conversation as resolved
Hans Goudey
commented
```
for (const int64_t ci : custom_attr_indices.index_range()) {
```
|
||||
data->vertex_custom_attr[ci].data[i] = value;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -100,6 +100,15 @@ Mesh *convert_ply_to_mesh(PlyData &data, const PLYImportParams ¶ms)
|
|||
uv_map.finish();
|
||||
}
|
||||
|
||||
/* Custom attributes */
|
||||
if (params.import_attributes && !data.vertex_custom_attr.is_empty()) {
|
||||
for (const PlyCustomAttribute &attr : data.vertex_custom_attr) {
|
||||
attributes.add<float>(attr.name,
|
||||
aras_p marked this conversation as resolved
Outdated
Hans Goudey
commented
Slightly simpler use of the attribute API here:
Slightly simpler use of the attribute API here:
```
attributes.add<float>(attr.name,
ATTR_DOMAIN_POINT,
bke::AttributeInitVArray(VArray<float>::ForSpan(attr.data)));
```
|
||||
ATTR_DOMAIN_POINT,
|
||||
bke::AttributeInitVArray(VArray<float>::ForSpan(attr.data)));
|
||||
}
|
||||
}
|
||||
|
||||
/* Calculate edges from the rest of the mesh. */
|
||||
BKE_mesh_calc_edges(mesh, true, false);
|
||||
|
||||
|
|
|
@ -8,17 +8,26 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "BLI_array.hh"
|
||||
#include "BLI_math_vector_types.hh"
|
||||
#include "BLI_string_ref.hh"
|
||||
#include "BLI_vector.hh"
|
||||
|
||||
namespace blender::io::ply {
|
||||
|
||||
enum PlyDataTypes { NONE, CHAR, UCHAR, SHORT, USHORT, INT, UINT, FLOAT, DOUBLE, PLY_TYPE_COUNT };
|
||||
|
||||
struct PlyCustomAttribute {
|
||||
aras_p marked this conversation as resolved
Hans Goudey
commented
I'd go with I'd go with `Array` here, since it doesn't need to be resized. `reinitialize` can be used instead of `resize`
|
||||
PlyCustomAttribute(const StringRef name_, int64_t size) : name(name_), data(size) {}
|
||||
std::string name;
|
||||
Array<float> data; /* Any custom PLY attributes are converted to floats. */
|
||||
};
|
||||
|
||||
struct PlyData {
|
||||
Vector<float3> vertices;
|
||||
Vector<float3> vertex_normals;
|
||||
Vector<float4> vertex_colors; /* Linear space, 0..1 range colors. */
|
||||
Vector<PlyCustomAttribute> vertex_custom_attr;
|
||||
Vector<std::pair<int, int>> edges;
|
||||
Vector<uint32_t> face_vertices;
|
||||
Vector<uint32_t> face_sizes;
|
||||
|
|
Loading…
Reference in New Issue
Looks like the attribute API could handle some of these conversions itself, if you thought that simplified things:
It's a bit unfortunate to have to implement all the others here, but I don't have a simple better way to do that right now. We have all these conversions implemented as multi-functions (
SeparateQuaternionFunction
for example), but setting up a field evaluation here is probably a bit too complex for here.Hmm that would make the "scalar to scalar" paths slightly simpler, but would still not address the "vectors/quats/colors need to be split into multiple scalars" complexity, which is the main issue here. I'll just probably leave it as is.
That's fine too