summaryrefslogtreecommitdiff
path: root/gfx/src/asset/model.c
diff options
context:
space:
mode:
Diffstat (limited to 'gfx/src/asset/model.c')
-rw-r--r--gfx/src/asset/model.c493
1 files changed, 303 insertions, 190 deletions
diff --git a/gfx/src/asset/model.c b/gfx/src/asset/model.c
index 37f129e..2053dc4 100644
--- a/gfx/src/asset/model.c
+++ b/gfx/src/asset/model.c
@@ -94,6 +94,7 @@
94#include "gfx/sizes.h" 94#include "gfx/sizes.h"
95#include "gfx/util/shader.h" 95#include "gfx/util/shader.h"
96 96
97#include "gfx_assert.h"
97#include "scene/model_impl.h" 98#include "scene/model_impl.h"
98 99
99#include "cstring.h" 100#include "cstring.h"
@@ -110,7 +111,6 @@
110#define CGLTF_IMPLEMENTATION 111#define CGLTF_IMPLEMENTATION
111#include "cgltf.h" 112#include "cgltf.h"
112 113
113#include <assert.h>
114#include <stdbool.h> 114#include <stdbool.h>
115#include <stdlib.h> 115#include <stdlib.h>
116 116
@@ -323,204 +323,231 @@ static cgltf_size get_component_size(cgltf_component_type type) {
323 return 0; 323 return 0;
324} 324}
325 325
326/// Return the number dimensionality of the given data type.
327int get_num_dimensions(cgltf_type type) {
328 switch (type) {
329 case cgltf_type_scalar:
330 return 1;
331 case cgltf_type_vec2:
332 return 2;
333 case cgltf_type_vec3:
334 return 3;
335 case cgltf_type_vec4:
336 return 4;
337 case cgltf_type_mat2:
338 return 4; // 2x2
339 case cgltf_type_mat3:
340 return 9; // 3x3
341 case cgltf_type_mat4:
342 return 16; // 4x4
343 case cgltf_type_invalid:
344 FAIL("");
345 break;
346 }
347 FAIL("");
348 return 0;
349}
350
351/// Read an int64 from the given data pointer and accessor.
352/// The largest integer in glTF is u32, so we can fit all integers in an int64.
353static int64_t read_int(const void* component, const cgltf_accessor* accessor) {
354 assert(component);
355 assert(accessor);
356
357 switch (accessor->component_type) {
358 case cgltf_component_type_r_8: {
359 const int8_t c = *((int8_t*)component);
360 return c;
361 }
362 case cgltf_component_type_r_8u: {
363 const uint8_t c = *((uint8_t*)component);
364 return c;
365 }
366 case cgltf_component_type_r_16: {
367 const int16_t c = *((int16_t*)component);
368 return c;
369 }
370 case cgltf_component_type_r_16u: {
371 const uint16_t c = *((uint16_t*)component);
372 return c;
373 }
374 case cgltf_component_type_r_32u: {
375 const uint32_t c = *((uint32_t*)component);
376 return c;
377 }
378 case cgltf_component_type_r_32f: {
379 const float c = *((float*)component);
380 return (int64_t)c;
381 }
382 case cgltf_component_type_invalid:
383 FAIL("");
384 break;
385 }
386 FAIL("");
387 return 0;
388}
389
326/// Read a float from the given data pointer and accessor. 390/// Read a float from the given data pointer and accessor.
327/// 391///
328/// This function uses the normalization equations from the spec. See the 392/// This function uses the normalization equations from the spec. See the
329/// animation section: 393/// animation section:
330/// 394///
331/// https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#animations 395/// https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#animations
332static float read_float(const void* data, const cgltf_accessor* accessor) { 396static float read_float(const void* component, const cgltf_accessor* accessor) {
333 assert(data); 397 assert(component);
334 assert(accessor); 398 assert(accessor);
335 399
336 switch (accessor->component_type) { 400 switch (accessor->component_type) {
337 case cgltf_component_type_r_8: { 401 case cgltf_component_type_r_8: {
338 assert(accessor->normalized); 402 // assert(accessor->normalized);
339 const int8_t c = *((int8_t*)data); 403 const int8_t c = *((int8_t*)component);
340 return max((float)c / 127.0, -1.0); 404 return max((float)c / 127.0, -1.0);
341 } 405 }
342 case cgltf_component_type_r_8u: { 406 case cgltf_component_type_r_8u: {
343 assert(accessor->normalized); 407 // assert(accessor->normalized);
344 const uint8_t c = *((uint8_t*)data); 408 const uint8_t c = *((uint8_t*)component);
345 return (float)c / 255.0; 409 return (float)c / 255.0;
346 } 410 }
347 case cgltf_component_type_r_16: { 411 case cgltf_component_type_r_16: {
348 assert(accessor->normalized); 412 // assert(accessor->normalized);
349 const int16_t c = *((int16_t*)data); 413 const int16_t c = *((int16_t*)component);
350 return max((float)c / 32767.0, -1.0); 414 return max((float)c / 32767.0, -1.0);
351 } 415 }
352 case cgltf_component_type_r_16u: { 416 case cgltf_component_type_r_16u: {
353 assert(accessor->normalized); 417 // assert(accessor->normalized);
354 const uint16_t c = *((uint16_t*)data); 418 const uint16_t c = *((uint16_t*)component);
355 return (float)c / 65535.0; 419 return (float)c / 65535.0;
356 } 420 }
357 case cgltf_component_type_r_32u: { 421 case cgltf_component_type_r_32u: {
358 assert(accessor->normalized); 422 // assert(accessor->normalized);
359 const uint32_t c = *((uint32_t*)data); 423 const uint32_t c = *((uint32_t*)component);
360 return (float)c / 4294967295.0; 424 return (float)c / 4294967295.0;
361 } 425 }
362 case cgltf_component_type_r_32f: { 426 case cgltf_component_type_r_32f: {
363 const float c = *((float*)data); 427 const float c = *((float*)component);
364 return c; 428 return c;
365 } 429 }
366 case cgltf_component_type_invalid: 430 case cgltf_component_type_invalid:
367 assert(false); 431 FAIL("");
368 break; 432 break;
369 } 433 }
370 assert(false); 434 FAIL("");
371 return 0; 435 return 0;
372} 436}
373 437
374/// Iterate over the vectors in an accessor. 438typedef struct AccessorIter {
375#define ACCESSOR_FOREACH_VEC(dimensions, accessor, body) \ 439 const cgltf_accessor* accessor;
376 { \ 440 const uint8_t* next_element;
377 assert((1 <= dimensions) && (dimensions <= 4)); \ 441 cgltf_size comp_size; // Component size in bytes.
378 assert( \ 442 cgltf_size stride; // ELement stride in bytes.
379 ((dimensions == 1) && (accessor->type == cgltf_type_scalar)) || \ 443 cgltf_size index; // Index of the next element.
380 ((dimensions == 2) && (accessor->type == cgltf_type_vec2)) || \ 444 bool is_matrix;
381 ((dimensions == 3) && (accessor->type == cgltf_type_vec3)) || \ 445} AccessorIter;
382 ((dimensions == 4) && (accessor->type == cgltf_type_vec4))); \ 446
383 const cgltf_buffer_view* view = accessor->buffer_view; \ 447typedef struct AccessorData {
384 const cgltf_buffer* buffer = view->buffer; \ 448 union {
385 const cgltf_size offset = accessor->offset + view->offset; \ 449 struct {
386 const uint8_t* bytes = (const uint8_t*)buffer->data + offset; \ 450 float x, y, z, w; // Possibly normalized.
387 /* Component size in bytes. */ \ 451 int64_t xi, yi, zi, wi; // Always unnormalized.
388 const cgltf_size comp_size = get_component_size(accessor->component_type); \ 452 };
389 /* Element size in bytes. */ \ 453 const float* floats;
390 const cgltf_size elem_size = dimensions * comp_size; \ 454 };
391 /* Stride in bytes. If the view stride is 0, then the elements are tightly \ 455} AccessorData;
392 * packed. */ \ 456
393 const cgltf_size stride = view->stride != 0 ? view->stride : elem_size; \ 457bool accessor_iter_next(AccessorIter* iter, AccessorData* data) {
394 /* There isn't an accessor stride in the spec, but cgltf still specifies \ 458 assert(iter);
395 * one. */ \ 459 assert(data);
396 assert(accessor->stride == elem_size); \ 460
397 /* Accessor data must fit inside the buffer. */ \ 461 if (iter->index < iter->accessor->count) {
398 assert( \ 462 const int dimensions = get_num_dimensions(iter->accessor->type);
399 (offset + (accessor->count * elem_size) + \ 463 const uint8_t* component = iter->next_element;
400 ((accessor->count - 1) * view->stride)) <= buffer->size); \ 464
401 /* Accessor data must fit inside the view. */ \ 465 // So that the caller can access the element's components as an array.
402 assert(accessor->count * accessor->stride <= view->size); \ 466 data->floats = (const float*)component;
403 cgltf_float x = 0, y = 0, z = 0, w = 0; \ 467
404 /* Silence unused variable warnings. */ \ 468 if (!iter->is_matrix) { // Scalar or vector.
405 (void)y; \ 469 // x
406 (void)z; \ 470 data->x = read_float(component, iter->accessor);
407 (void)w; \ 471 data->xi = read_int(component, iter->accessor);
408 /* The {component type} X {dimensions} combinations are a pain to handle. \ 472 component += iter->comp_size;
409 For floats, we switch on type first and then lay out a loop for each \ 473 // y
410 dimension to get a tight loop with a possibly inlined body. For other \ 474 if (dimensions > 1) {
411 types, we take the performance hit and perform checks and conversions \ 475 data->y = read_float(component, iter->accessor);
412 inside the loop for simplicity. */ \ 476 data->yi = read_int(component, iter->accessor);
413 if (accessor->component_type == cgltf_component_type_r_32f) { \ 477 component += iter->comp_size;
414 switch (dimensions) { \ 478 }
415 case 1: \ 479 // z
416 assert(accessor->type == cgltf_type_scalar); \ 480 if (dimensions > 2) {
417 for (cgltf_size i = 0; i < accessor->count; ++i, bytes += stride) { \ 481 data->z = read_float(component, iter->accessor);
418 const cgltf_float* floats = (const cgltf_float*)bytes; \ 482 data->zi = read_int(component, iter->accessor);
419 x = *floats; \ 483 component += iter->comp_size;
420 body; \ 484 }
421 } \ 485 // w
422 break; \ 486 if (dimensions > 3) {
423 case 2: \ 487 data->w = read_float(component, iter->accessor);
424 assert(accessor->type == cgltf_type_vec2); \ 488 data->wi = read_int(component, iter->accessor);
425 for (cgltf_size i = 0; i < accessor->count; ++i, bytes += stride) { \ 489 component += iter->comp_size;
426 const cgltf_float* floats = (const cgltf_float*)bytes; \ 490 }
427 x = *floats++; \ 491 }
428 y = *floats; \ 492
429 body; \ 493 iter->next_element += iter->stride;
430 } \ 494 iter->index++;
431 break; \ 495 return true;
432 case 3: \
433 assert(accessor->type == cgltf_type_vec3); \
434 for (cgltf_size i = 0; i < accessor->count; ++i, bytes += stride) { \
435 const cgltf_float* floats = (const cgltf_float*)bytes; \
436 x = *floats++; \
437 y = *floats++; \
438 z = *floats; \
439 body; \
440 } \
441 break; \
442 case 4: \
443 assert(accessor->type == cgltf_type_vec4); \
444 for (cgltf_size i = 0; i < accessor->count; ++i, bytes += stride) { \
445 const cgltf_float* floats = (const cgltf_float*)bytes; \
446 x = *floats++; \
447 y = *floats++; \
448 z = *floats++; \
449 w = *floats; \
450 body; \
451 } \
452 break; \
453 } \
454 } else { \
455 for (cgltf_size i = 0; i < accessor->count; ++i, bytes += stride) { \
456 const uint8_t* component = bytes; \
457 \
458 x = read_float(component, accessor); \
459 component += comp_size; \
460 if (dimensions > 1) { \
461 y = read_float(component, accessor); \
462 component += comp_size; \
463 } \
464 if (dimensions > 2) { \
465 z = read_float(component, accessor); \
466 component += comp_size; \
467 } \
468 if (dimensions > 3) { \
469 w = read_float(component, accessor); \
470 component += comp_size; \
471 } \
472 body; \
473 } \
474 } \
475 }
476
477/// Iterate over the matrices in an accessor.
478#define ACCESSOR_FOREACH_MAT(dimensions, accessor, body) \
479 { \
480 assert((2 <= dimensions) && (dimensions <= 4)); \
481 assert(!(dimensions == 2) || (accessor->type == cgltf_type_mat2)); \
482 assert(!(dimensions == 3) || (accessor->type == cgltf_type_mat3)); \
483 assert(!(dimensions == 4) || (accessor->type == cgltf_type_mat4)); \
484 const cgltf_buffer_view* view = accessor->buffer_view; \
485 const cgltf_buffer* buffer = view->buffer; \
486 const cgltf_size offset = accessor->offset + view->offset; \
487 const cgltf_size comp_size = get_component_size(accessor->component_type); \
488 const uint8_t* bytes = (const uint8_t*)buffer->data + offset; \
489 assert( \
490 (offset + accessor->count * dimensions * comp_size) < buffer->size); \
491 /* From the spec: */ \
492 /* "Buffer views with other types of data MUST NOT not define */ \
493 /* byteStride (unless such layout is explicitly enabled by an */ \
494 /* extension)."*/ \
495 assert(view->stride == 0); \
496 assert(accessor->stride == dimensions * dimensions * comp_size); \
497 assert(accessor->component_type == cgltf_component_type_r_32f); \
498 const cgltf_float* floats = (const cgltf_float*)bytes; \
499 switch (dimensions) { \
500 case 2: \
501 assert(accessor->type == cgltf_type_mat2); \
502 for (cgltf_size i = 0; i < accessor->count; ++i) { \
503 body; \
504 floats += 4; \
505 } \
506 break; \
507 case 3: \
508 assert(accessor->type == cgltf_type_mat3); \
509 for (cgltf_size i = 0; i < accessor->count; ++i) { \
510 body; \
511 floats += 9; \
512 } \
513 break; \
514 case 4: \
515 assert(accessor->type == cgltf_type_mat4); \
516 for (cgltf_size i = 0; i < accessor->count; ++i) { \
517 body; \
518 floats += 16; \
519 } \
520 break; \
521 } \
522 } 496 }
523 497
498 return false;
499}
500
501AccessorIter make_accessor_iter(const cgltf_accessor* accessor) {
502 assert(accessor);
503
504 const bool is_matrix = (accessor->type == cgltf_type_mat2) ||
505 (accessor->type == cgltf_type_mat3) ||
506 (accessor->type == cgltf_type_mat4);
507
508 const int dimensions = get_num_dimensions(accessor->type);
509 assert(
510 ((dimensions == 1) && (accessor->type == cgltf_type_scalar)) ||
511 ((dimensions == 2) && (accessor->type == cgltf_type_vec2)) ||
512 ((dimensions == 3) && (accessor->type == cgltf_type_vec3)) ||
513 ((dimensions == 4) && (accessor->type == cgltf_type_vec4)) ||
514 ((dimensions == 4) && (accessor->type == cgltf_type_mat2)) ||
515 ((dimensions == 9) && (accessor->type == cgltf_type_mat3)) ||
516 ((dimensions == 16) && (accessor->type == cgltf_type_mat4)));
517
518 const cgltf_buffer_view* view = accessor->buffer_view;
519 const cgltf_buffer* buffer = view->buffer;
520 const cgltf_size offset = accessor->offset + view->offset;
521 const uint8_t* bytes = (const uint8_t*)buffer->data + offset;
522 // Component size in bytes.
523 const cgltf_size comp_size = get_component_size(accessor->component_type);
524 // Element size in bytes.
525 const cgltf_size elem_size = dimensions * comp_size;
526 // Stride in bytes. If the view stride is 0, then the elements are tightly
527 // packed.
528 const cgltf_size stride = view->stride != 0 ? view->stride : elem_size;
529
530 // There isn't an accessor stride in the spec, but cgltf still specifies one.
531 assert(accessor->stride == elem_size);
532
533 // Accessor data must fit inside the view.
534 assert(accessor->offset + (accessor->count * accessor->stride) <= view->size);
535
536 // Accessor data must fit inside the buffer.
537 assert(
538 (offset + (accessor->count * elem_size) +
539 ((accessor->count - 1) * view->stride)) <= buffer->size);
540
541 return (AccessorIter){
542 .accessor = accessor,
543 .next_element = bytes,
544 .comp_size = comp_size,
545 .stride = stride,
546 .index = 0,
547 .is_matrix = is_matrix,
548 };
549}
550
524/// Return the total number of primitives in the scene. Each mesh may contain 551/// Return the total number of primitives in the scene. Each mesh may contain
525/// multiple primitives. 552/// multiple primitives.
526/// 553///
@@ -703,7 +730,7 @@ static bool load_texture_and_uniform(
703 730
704 textures[texture_index] = gfx_load_texture(gfx, cmd); 731 textures[texture_index] = gfx_load_texture(gfx, cmd);
705 if (!textures[texture_index]) { 732 if (!textures[texture_index]) {
706 prepend_error( 733 log_error(
707 "Failed to load texture: %s", 734 "Failed to load texture: %s",
708 mstring_cstr(&cmd->data.texture.filepath)); 735 mstring_cstr(&cmd->data.texture.filepath));
709 return false; 736 return false;
@@ -856,20 +883,25 @@ static Material* make_default_material() {
856 883
857/// Compute the bounding box of the vertices pointed to by the accessor. 884/// Compute the bounding box of the vertices pointed to by the accessor.
858/// 'dim' is the dimension of the vertices (2D or 3D). 885/// 'dim' is the dimension of the vertices (2D or 3D).
859aabb3 compute_aabb(const cgltf_accessor* accessor, int dim) { 886aabb3 compute_aabb(const cgltf_accessor* accessor) {
860 aabb3 box = {0}; 887 aabb3 box = {0};
861 if (accessor->has_min && accessor->has_max) { 888 if (accessor->has_min && accessor->has_max) {
862 box = aabb3_make( 889 box = aabb3_make(
863 vec3_from_array(accessor->min), vec3_from_array(accessor->max)); 890 vec3_from_array(accessor->min), vec3_from_array(accessor->max));
864 } else { 891 } else {
865 ACCESSOR_FOREACH_VEC(dim, accessor, { 892 AccessorIter iter = make_accessor_iter(accessor);
866 const vec3 p = vec3_make(x, y, z); 893 AccessorData vertex = {0};
894 cgltf_size i = 0;
895
896 while (accessor_iter_next(&iter, &vertex)) {
897 const vec3 p = vec3_make(vertex.x, vertex.y, vertex.z);
867 if (i == 0) { 898 if (i == 0) {
868 box = aabb3_make(p, p); 899 box = aabb3_make(p, p);
869 } else { 900 } else {
870 box = aabb3_add(box, p); 901 box = aabb3_add(box, p);
871 } 902 }
872 }); 903 ++i;
904 }
873 } 905 }
874 return box; 906 return box;
875} 907}
@@ -1000,15 +1032,15 @@ static bool load_meshes(
1000 case cgltf_type_vec2: 1032 case cgltf_type_vec2:
1001 assert(geometry_desc.positions3d.buffer == 0); 1033 assert(geometry_desc.positions3d.buffer == 0);
1002 buffer_view_2d = &geometry_desc.positions2d; 1034 buffer_view_2d = &geometry_desc.positions2d;
1003 geometry_desc.aabb = compute_aabb(accessor, 2); 1035 geometry_desc.aabb = compute_aabb(accessor);
1004 break; 1036 break;
1005 case cgltf_type_vec3: 1037 case cgltf_type_vec3:
1006 assert(geometry_desc.positions2d.buffer == 0); 1038 assert(geometry_desc.positions2d.buffer == 0);
1007 buffer_view_3d = &geometry_desc.positions3d; 1039 buffer_view_3d = &geometry_desc.positions3d;
1008 geometry_desc.aabb = compute_aabb(accessor, 3); 1040 geometry_desc.aabb = compute_aabb(accessor);
1009 break; 1041 break;
1010 default: 1042 default:
1011 LOGE( 1043 FAIL(
1012 "Unhandled accessor type %d in vertex positions", 1044 "Unhandled accessor type %d in vertex positions",
1013 accessor->type); 1045 accessor->type);
1014 assert(false); 1046 assert(false);
@@ -1186,6 +1218,77 @@ static bool load_meshes(
1186 return true; 1218 return true;
1187} 1219}
1188 1220
1221/// Compute bounding boxes for the joints in the model.
1222static void compute_joint_bounding_boxes(
1223 const cgltf_data* data, size_t num_joints, JointDesc* joint_descs) {
1224 assert(data);
1225 assert(joint_descs);
1226 assert(num_joints <= GFX_MAX_NUM_JOINTS);
1227
1228 // Initialize bounding boxes so that we can compute unions below.
1229 for (size_t i = 0; i < num_joints; ++i) {
1230 joint_descs[i].box = aabb3_make_empty();
1231 }
1232
1233 // Iterate over the meshes -> primitives -> vertices -> joint indices, and add
1234 // the vertex to the joint's bounding box.
1235 for (cgltf_size n = 0; n < data->nodes_count; ++n) {
1236 const cgltf_node* node = &data->nodes[n];
1237
1238 if (node->skin) {
1239 if (node->mesh) {
1240 const cgltf_mesh* mesh = node->mesh;
1241
1242 for (cgltf_size pr = 0; pr < mesh->primitives_count; ++pr) {
1243 const cgltf_primitive* prim = &mesh->primitives[pr];
1244
1245 // Find the indices of the positions and joints arrays in the
1246 // primitive's attributes.
1247 int positions_index = -1;
1248 int joints_index = -1;
1249 for (int a = 0; a < (int)prim->attributes_count; ++a) {
1250 const cgltf_attribute* attrib = &prim->attributes[a];
1251
1252 if (attrib->type == cgltf_attribute_type_position) {
1253 positions_index = a;
1254 } else if (attrib->type == cgltf_attribute_type_joints) {
1255 joints_index = a;
1256 }
1257 }
1258
1259 if ((positions_index != -1) && (joints_index != -1)) {
1260 const cgltf_accessor* positions =
1261 prim->attributes[positions_index].data;
1262 const cgltf_accessor* joints = prim->attributes[joints_index].data;
1263
1264 assert(positions->count == joints->count);
1265
1266 AccessorIter positions_iter = make_accessor_iter(positions);
1267 AccessorIter joints_iter = make_accessor_iter(joints);
1268 AccessorData position = {0}, joint = {0};
1269
1270 while (accessor_iter_next(&positions_iter, &position)) {
1271 const bool advance = accessor_iter_next(&joints_iter, &joint);
1272 assert(advance); // Counts should match.
1273
1274 const vec3 p = vec3_make(position.x, position.y, position.z);
1275 const int64_t j[4] = {joint.xi, joint.yi, joint.wi, joint.zi};
1276
1277 for (int i = 0; i < 4; ++i) {
1278 const size_t joint_index = j[i];
1279 assert((size_t)joint_index < num_joints);
1280
1281 joint_descs[joint_index].box =
1282 aabb3_add(joint_descs[joint_index].box, p);
1283 }
1284 }
1285 }
1286 }
1287 }
1288 }
1289 }
1290}
1291
1189/// Find the joint node with the smallest index across all skeletons. 1292/// Find the joint node with the smallest index across all skeletons.
1190/// 1293///
1191/// The channels in glTF may target arbitrary nodes in the scene (those nodes 1294/// The channels in glTF may target arbitrary nodes in the scene (those nodes
@@ -1249,8 +1352,10 @@ static size_t load_skins(
1249 *skeleton_desc = (SkeletonDesc){.num_joints = skin->joints_count}; 1352 *skeleton_desc = (SkeletonDesc){.num_joints = skin->joints_count};
1250 1353
1251 // for (cgltf_size j = 0; j < skin->joints_count; ++j) { 1354 // for (cgltf_size j = 0; j < skin->joints_count; ++j) {
1252 ACCESSOR_FOREACH_MAT(4, matrices_accessor, { 1355 AccessorIter iter = make_accessor_iter(matrices_accessor);
1253 const mat4 inv_bind_matrix = mat4_from_array(floats); 1356 AccessorData matrix = {0};
1357 for (cgltf_size i = 0; accessor_iter_next(&iter, &matrix); ++i) {
1358 const mat4 inv_bind_matrix = mat4_from_array(matrix.floats);
1254 1359
1255 // Joint is an index/pointer into the nodes array. 1360 // Joint is an index/pointer into the nodes array.
1256 const cgltf_size node_index = skin->joints[i] - data->nodes; 1361 const cgltf_size node_index = skin->joints[i] - data->nodes;
@@ -1275,7 +1380,7 @@ static size_t load_skins(
1275 joint_desc->inv_bind_matrix = inv_bind_matrix; 1380 joint_desc->inv_bind_matrix = inv_bind_matrix;
1276 1381
1277 is_joint_node[joint_index] = true; 1382 is_joint_node[joint_index] = true;
1278 }); 1383 };
1279 1384
1280 // glTF may specify a "skeleton", which is the root of the skin's 1385 // glTF may specify a "skeleton", which is the root of the skin's
1281 // (skeleton's) node hierarchy. 1386 // (skeleton's) node hierarchy.
@@ -1352,23 +1457,32 @@ static void load_animations(
1352 .num_keyframes = 0}; 1457 .num_keyframes = 0};
1353 1458
1354 // Read time inputs. 1459 // Read time inputs.
1355 ACCESSOR_FOREACH_VEC(1, sampler->input, { 1460 AccessorIter iter = make_accessor_iter(sampler->input);
1356 channel_desc->keyframes[i].time = x; 1461 AccessorData input = {0};
1462 for (cgltf_size i = 0; accessor_iter_next(&iter, &input); ++i) {
1463 channel_desc->keyframes[i].time = input.x;
1357 channel_desc->num_keyframes++; 1464 channel_desc->num_keyframes++;
1358 }); 1465 }
1359 1466
1360 // Read transform outputs. 1467 // Read transform outputs.
1468 AccessorData output = {0};
1361 switch (channel->target_path) { 1469 switch (channel->target_path) {
1362 case cgltf_animation_path_type_translation: 1470 case cgltf_animation_path_type_translation: {
1363 ACCESSOR_FOREACH_VEC(3, sampler->output, { 1471 iter = make_accessor_iter(sampler->output);
1364 channel_desc->keyframes[i].translation = vec3_make(x, y, z); 1472 for (cgltf_size i = 0; accessor_iter_next(&iter, &output); ++i) {
1365 }); 1473 channel_desc->keyframes[i].translation =
1474 vec3_make(output.x, output.y, output.z);
1475 }
1366 break; 1476 break;
1367 case cgltf_animation_path_type_rotation: 1477 }
1368 ACCESSOR_FOREACH_VEC(4, sampler->output, { 1478 case cgltf_animation_path_type_rotation: {
1369 channel_desc->keyframes[i].rotation = qmake(x, y, z, w); 1479 iter = make_accessor_iter(sampler->output);
1370 }); 1480 for (cgltf_size i = 0; accessor_iter_next(&iter, &output); ++i) {
1481 channel_desc->keyframes[i].rotation =
1482 qmake(output.x, output.y, output.z, output.w);
1483 }
1371 break; 1484 break;
1485 }
1372 default: 1486 default:
1373 // TODO: Handle other channel transformations. 1487 // TODO: Handle other channel transformations.
1374 break; 1488 break;
@@ -1421,10 +1535,6 @@ static void load_nodes(
1421 assert(skin_index < data->skins_count); 1535 assert(skin_index < data->skins_count);
1422 const Skeleton* skeleton = gfx_get_anima_skeleton(anima, skin_index); 1536 const Skeleton* skeleton = gfx_get_anima_skeleton(anima, skin_index);
1423 gfx_set_object_skeleton(object, skeleton); 1537 gfx_set_object_skeleton(object, skeleton);
1424
1425 // TODO: Compute AABBs/OOBBs for the skeleton's joints here. Iterate
1426 // over the mesh's primitives, its vertices, their joint indices, and
1427 // add the vertex to the AABB/OOBB.
1428 } 1538 }
1429 } else if (node->camera) { 1539 } else if (node->camera) {
1430 assert(next_camera < data->cameras_count); 1540 assert(next_camera < data->cameras_count);
@@ -1686,6 +1796,9 @@ static Model* load_scene(
1686 anima_desc->num_joints = load_skins(data, buffers, base, anima_desc); 1796 anima_desc->num_joints = load_skins(data, buffers, base, anima_desc);
1687 load_animations(data, base, anima_desc); 1797 load_animations(data, base, anima_desc);
1688 1798
1799 compute_joint_bounding_boxes(
1800 data, anima_desc->num_joints, anima_desc->joints);
1801
1689 anima = gfx_make_anima(anima_desc); 1802 anima = gfx_make_anima(anima_desc);
1690 gfx_construct_anima_node(root_node, anima); 1803 gfx_construct_anima_node(root_node, anima);
1691 } 1804 }