DRW: Add support for tangent on objects using display lists.

Only Metaballs are left unsupported.
However, the implementation does not match 100% with cycles which converts
all objects to meshes.

Fixes T63424 EEVEE: Normal map node doesn't works with curve objects
This commit is contained in:
2020-01-30 01:07:28 +01:00
parent c7c8ee6168
commit 15350c70be
3 changed files with 112 additions and 15 deletions

View File

@@ -98,9 +98,10 @@ struct GPUBatch *DRW_metaball_batch_cache_get_edge_detection(struct Object *ob,
/* DispList */
void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb, struct GPUVertBuf *vbo);
void DRW_displist_vertbuf_create_wiredata(struct ListBase *lb, struct GPUVertBuf *vbo);
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(struct ListBase *lb,
struct GPUVertBuf *vbo_pos_nor,
struct GPUVertBuf *vbo_uv);
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(struct ListBase *lb,
struct GPUVertBuf *vbo_pos_nor,
struct GPUVertBuf *vbo_uv,
struct GPUVertBuf *vbo_tan);
void DRW_displist_indexbuf_create_lines_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo);
void DRW_displist_indexbuf_create_triangles_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo);
void DRW_displist_indexbuf_create_triangles_loop_split_by_material(struct ListBase *lb,

View File

@@ -333,8 +333,7 @@ static void curve_cd_calc_used_gpu_layers(int *cd_layers,
*cd_layers |= CD_MLOOPUV;
break;
case CD_TANGENT:
/* Currently unsupported */
// *cd_layers |= CD_TANGENT;
*cd_layers |= CD_TANGENT;
break;
case CD_MCOL:
/* Curve object don't have Color data. */
@@ -358,6 +357,7 @@ typedef struct CurveBatchCache {
GPUVertBuf *loop_pos_nor;
GPUVertBuf *loop_uv;
GPUVertBuf *loop_tan;
} ordered;
struct {
@@ -992,6 +992,9 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
if (cache->cd_used & CD_MLOOPUV) {
DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_uv);
}
if (cache->cd_used & CD_TANGENT) {
DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_tan);
}
DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_pos_nor);
}
}
@@ -1007,6 +1010,7 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_pos_nor, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_uv, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_tan, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_tris, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_lines, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
@@ -1045,8 +1049,8 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
if (DRW_vbo_requested(cache->ordered.loop_pos_nor) ||
DRW_vbo_requested(cache->ordered.loop_uv)) {
DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(
lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv);
DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(
lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv, cache->ordered.loop_tan);
}
if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {

View File

@@ -345,6 +345,8 @@ static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2
}
for (int i = 0; i < 4; i++) {
/* Note: For some reason the shading U and V are swapped compared to the
* one described in the surface format. */
/* find uv based on vertex index into grid array */
r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
@@ -359,15 +361,45 @@ static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2
}
}
static GPUPackedNormal surf_tan_quad(const DispList *dl,
const float (*verts)[3],
const float nor[3],
uint index)
{
float tan[3], tmp[3];
int v = index / dl->nr;
/* Note: For some reason the shading U and V are swapped compared to the
* one described in the surface format. So tangent is along V. */
if (v == 0) {
sub_v3_v3v3(tan, verts[index + dl->nr], verts[index]);
}
else {
sub_v3_v3v3(tan, verts[index], verts[index - dl->nr]);
}
/* Orthonormalize around the normal. */
cross_v3_v3v3(tmp, nor, tan);
cross_v3_v3v3(tan, tmp, nor);
normalize_v3(tan);
GPUPackedNormal ptan = GPU_normal_convert_i10_v3(tan);
ptan.w = 1; /* Pretty sure surfaces cannot have inverted normals. */
return ptan;
}
static void displist_vertbuf_attr_set_tri_pos_nor_uv(GPUVertBufRaw *pos_step,
GPUVertBufRaw *nor_step,
GPUVertBufRaw *uv_step,
GPUVertBufRaw *tan_step,
const float v1[3],
const float v2[3],
const float v3[3],
const GPUPackedNormal *n1,
const GPUPackedNormal *n2,
const GPUPackedNormal *n3,
const GPUPackedNormal *t1,
const GPUPackedNormal *t2,
const GPUPackedNormal *t3,
const float uv1[2],
const float uv2[2],
const float uv3[2])
@@ -387,16 +419,24 @@ static void displist_vertbuf_attr_set_tri_pos_nor_uv(GPUVertBufRaw *pos_step,
normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
}
if (tan_step->size != 0) {
*(GPUPackedNormal *)GPU_vertbuf_raw_step(tan_step) = *t1;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(tan_step) = *t2;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(tan_step) = *t3;
}
}
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
GPUVertBuf *vbo_pos_nor,
GPUVertBuf *vbo_uv)
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
GPUVertBuf *vbo_pos_nor,
GPUVertBuf *vbo_uv,
GPUVertBuf *vbo_tan)
{
static GPUVertFormat format_pos_nor = {0};
static GPUVertFormat format_uv = {0};
static GPUVertFormat format_tan = {0};
static struct {
uint pos, nor, uv;
uint pos, nor, uv, tan;
} attr_id;
if (format_pos_nor.attr_len == 0) {
/* initialize vertex format */
@@ -404,9 +444,15 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
&format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor = GPU_vertformat_attr_add(
&format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
/* UVs are in [0..1] range. We can compress them. */
attr_id.uv = GPU_vertformat_attr_add(
&format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format_uv, "au");
attr_id.tan = GPU_vertformat_attr_add(
&format_tan, "t", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format_tan, "at");
}
int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
@@ -414,6 +460,7 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
GPUVertBufRaw pos_step = {0};
GPUVertBufRaw nor_step = {0};
GPUVertBufRaw uv_step = {0};
GPUVertBufRaw tan_step = {0};
if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
GPU_vertbuf_init_with_format(vbo_pos_nor, &format_pos_nor);
@@ -426,6 +473,11 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
}
if (DRW_TEST_ASSIGN_VBO(vbo_tan)) {
GPU_vertbuf_init_with_format(vbo_tan, &format_tan);
GPU_vertbuf_data_alloc(vbo_tan, vbo_len_capacity);
GPU_vertbuf_attr_get_raw_data(vbo_tan, attr_id.tan, &tan_step);
}
BKE_displist_normals_add(lb);
@@ -440,6 +492,16 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
if (dl->type == DL_INDEX3) {
/* Currently 'DL_INDEX3' is always a flat surface with a single normal. */
const GPUPackedNormal pnor = GPU_normal_convert_i10_v3(dl->nors);
GPUPackedNormal ptan = {0, 0, 0, 1};
if (tan_step.size != 0) {
float tan[3];
/* We consider the surface flat so the vector is already ortogonal to the normal. */
sub_v3_v3v3(tan, verts[idx[0]], verts[idx[2]]);
normalize_v3(tan);
ptan = GPU_normal_convert_i10_v3(tan);
}
const float x_max = (float)(dl->nr - 1);
uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
const int i_end = dl->parts;
@@ -453,12 +515,16 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[idx[0]],
verts[idx[2]],
verts[idx[1]],
&pnor,
&pnor,
&pnor,
&ptan,
&ptan,
&ptan,
uv[0],
uv[2],
uv[1]);
@@ -497,30 +563,43 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
}
GPUPackedNormal pnors_quad[4];
GPUPackedNormal ptans_quad[4];
if (is_smooth) {
for (int j = 0; j < 4; j++) {
pnors_quad[j] = GPU_normal_convert_i10_v3(nors[quad[j]]);
if (tan_step.size != 0) {
/* TODO(fclem) This is a waste of computation power because the same vertex
* is computed 4 times. */
ptans_quad[j] = surf_tan_quad(dl, verts, nors[quad[j]], quad[j]);
}
}
}
else {
float nor_flat[3];
normal_quad_v3(
nor_flat, verts[quad[0]], verts[quad[1]], verts[quad[2]], verts[quad[3]]);
if (tan_step.size != 0) {
for (int j = 0; j < 4; j++) {
ptans_quad[j] = surf_tan_quad(dl, verts, nor_flat, quad[j]);
}
}
pnors_quad[0] = GPU_normal_convert_i10_v3(nor_flat);
pnors_quad[1] = pnors_quad[0];
pnors_quad[2] = pnors_quad[0];
pnors_quad[3] = pnors_quad[0];
pnors_quad[1] = pnors_quad[2] = pnors_quad[3] = pnors_quad[0];
}
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[quad[2]],
verts[quad[0]],
verts[quad[1]],
&pnors_quad[2],
&pnors_quad[0],
&pnors_quad[1],
&ptans_quad[2],
&ptans_quad[0],
&ptans_quad[1],
uv[2],
uv[0],
uv[1]);
@@ -528,12 +607,16 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[quad[0]],
verts[quad[2]],
verts[quad[3]],
&pnors_quad[0],
&pnors_quad[2],
&pnors_quad[3],
&ptans_quad[0],
&ptans_quad[2],
&ptans_quad[3],
uv[0],
uv[2],
uv[3]);
@@ -554,6 +637,7 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
for (int i = 0; i < i_end; i++, idx += 4) {
const bool is_tri = idx[2] != idx[3];
GPUPackedNormal ptan = {0};
GPUPackedNormal pnors_idx[4];
if (is_smooth) {
int idx_len = is_tri ? 3 : 4;
@@ -578,26 +662,34 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[idx[0]],
verts[idx[2]],
verts[idx[1]],
&pnors_idx[0],
&pnors_idx[2],
&pnors_idx[1],
&ptan,
&ptan,
&ptan,
uv[0],
uv[2],
uv[1]);
if (idx[2] != idx[3]) {
if (is_tri) {
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[idx[2]],
verts[idx[0]],
verts[idx[3]],
&pnors_idx[2],
&pnors_idx[0],
&pnors_idx[3],
&ptan,
&ptan,
&ptan,
uv[2],
uv[0],
uv[3]);