aboutsummaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
author3gg <3gg@shellblade.net>2025-06-27 10:18:39 -0700
committer3gg <3gg@shellblade.net>2025-06-27 10:18:39 -0700
commitbd57f345ed9dbed1d81683e48199626de2ea9044 (patch)
tree4221f2f2a7ad2244d2e93052bd68187ec91b8ea9 /contrib
parent9a82ce0083437a4f9f58108b2c23b957d2249ad8 (diff)
Restructure projectHEADmain
Diffstat (limited to 'contrib')
-rw-r--r--contrib/cgltf-tangents/CMakeLists.txt13
-rw-r--r--contrib/cgltf-tangents/LICENSE79
-rw-r--r--contrib/cgltf-tangents/MikkTSpace/README.md4
-rw-r--r--contrib/cgltf-tangents/MikkTSpace/mikktspace.c1899
-rw-r--r--contrib/cgltf-tangents/MikkTSpace/mikktspace.h145
-rw-r--r--contrib/cgltf-tangents/README.md42
-rw-r--r--contrib/cgltf-tangents/cgltf_tangents.c618
-rw-r--r--contrib/cgltf-tangents/cgltf_tangents.h67
-rw-r--r--contrib/cgltf-tangents/test/CMakeLists.txt11
-rw-r--r--contrib/cgltf-tangents/test/main.c86
-rw-r--r--contrib/cgltf/CMakeLists.txt8
-rw-r--r--contrib/cgltf/LICENSE7
-rw-r--r--contrib/cgltf/README.md154
-rw-r--r--contrib/cgltf/cgltf.h5746
-rw-r--r--contrib/cgltf/cgltf_write.h1173
-rw-r--r--contrib/stb/CMakeLists.txt8
-rw-r--r--contrib/stb/stb_image.h7762
17 files changed, 17822 insertions, 0 deletions
diff --git a/contrib/cgltf-tangents/CMakeLists.txt b/contrib/cgltf-tangents/CMakeLists.txt
new file mode 100644
index 0000000..2c0771e
--- /dev/null
+++ b/contrib/cgltf-tangents/CMakeLists.txt
@@ -0,0 +1,13 @@
1cmake_minimum_required(VERSION 3.0)
2
3project(cgltf-tangents)
4
5add_library(cgltf-tangents
6 cgltf_tangents.c
7 MikkTSpace/mikktspace.c)
8
9target_include_directories(cgltf-tangents PUBLIC
10 ${CMAKE_CURRENT_SOURCE_DIR})
11
12target_link_libraries(cgltf-tangents PUBLIC
13 cgltf)
diff --git a/contrib/cgltf-tangents/LICENSE b/contrib/cgltf-tangents/LICENSE
new file mode 100644
index 0000000..7796e37
--- /dev/null
+++ b/contrib/cgltf-tangents/LICENSE
@@ -0,0 +1,79 @@
1This project has two third-party dependencies:
2- MikkTSpace
3- cgltf
4
5The license for this project and its dependencies are included below.
6
7--------------------------------------------------------------------------------
8cgltf-tangents
9--------------------------------------------------------------------------------
10
11Copyright 2022 Marc Sunet
12
13Redistribution and use in source and binary forms, with or without modification,
14are permitted provided that the following conditions are met:
15
161. Redistributions of source code must retain the above copyright notice, this
17list of conditions and the following disclaimer.
18
192. Redistributions in binary form must reproduce the above copyright notice,
20this list of conditions and the following disclaimer in the documentation and/or
21other materials provided with the distribution.
22
23THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
24ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
27ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
34--------------------------------------------------------------------------------
35MikkTSpace
36--------------------------------------------------------------------------------
37
38Copyright (C) 2011 by Morten S. Mikkelsen
39
40This software is provided 'as-is', without any express or implied
41warranty. In no event will the authors be held liable for any damages
42arising from the use of this software.
43
44Permission is granted to anyone to use this software for any purpose,
45including commercial applications, and to alter it and redistribute it
46freely, subject to the following restrictions:
47
481. The origin of this software must not be misrepresented; you must not
49 claim that you wrote the original software. If you use this software
50 in a product, an acknowledgment in the product documentation would be
51 appreciated but is not required.
52
532. Altered source versions must be plainly marked as such, and must not be
54 misrepresented as being the original software.
55
563. This notice may not be removed or altered from any source distribution.
57
58--------------------------------------------------------------------------------
59cgltf
60--------------------------------------------------------------------------------
61
62Copyright (c) 2018-2021 Johannes Kuhlmann
63
64Permission is hereby granted, free of charge, to any person obtaining a copy of
65this software and associated documentation files (the "Software"), to deal in
66the Software without restriction, including without limitation the rights to
67use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
68the Software, and to permit persons to whom the Software is furnished to do so,
69subject to the following conditions:
70
71The above copyright notice and this permission notice shall be included in all
72copies or substantial portions of the Software.
73
74THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
75IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
76FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
77COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
78IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
79CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/cgltf-tangents/MikkTSpace/README.md b/contrib/cgltf-tangents/MikkTSpace/README.md
new file mode 100644
index 0000000..9fda155
--- /dev/null
+++ b/contrib/cgltf-tangents/MikkTSpace/README.md
@@ -0,0 +1,4 @@
1# MikkTSpace
2A common standard for tangent space used in baking tools to produce normal maps.
3
4More information can be found at http://www.mikktspace.com/.
diff --git a/contrib/cgltf-tangents/MikkTSpace/mikktspace.c b/contrib/cgltf-tangents/MikkTSpace/mikktspace.c
new file mode 100644
index 0000000..0342ae0
--- /dev/null
+++ b/contrib/cgltf-tangents/MikkTSpace/mikktspace.c
@@ -0,0 +1,1899 @@
1/** \file mikktspace/mikktspace.c
2 * \ingroup mikktspace
3 */
4/**
5 * Copyright (C) 2011 by Morten S. Mikkelsen
6 *
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
10 *
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, subject to the following restrictions:
14 *
15 * 1. The origin of this software must not be misrepresented; you must not
16 * claim that you wrote the original software. If you use this software
17 * in a product, an acknowledgment in the product documentation would be
18 * appreciated but is not required.
19 * 2. Altered source versions must be plainly marked as such, and must not be
20 * misrepresented as being the original software.
21 * 3. This notice may not be removed or altered from any source distribution.
22 */
23
24#include <assert.h>
25#include <stdio.h>
26#include <math.h>
27#include <string.h>
28#include <float.h>
29#include <stdlib.h>
30
31#include "mikktspace.h"
32
33#define TFALSE 0
34#define TTRUE 1
35
36#ifndef M_PI
37#define M_PI 3.1415926535897932384626433832795
38#endif
39
40#define INTERNAL_RND_SORT_SEED 39871946
41
42// internal structure
43typedef struct {
44 float x, y, z;
45} SVec3;
46
47static tbool veq( const SVec3 v1, const SVec3 v2 )
48{
49 return (v1.x == v2.x) && (v1.y == v2.y) && (v1.z == v2.z);
50}
51
52static SVec3 vadd( const SVec3 v1, const SVec3 v2 )
53{
54 SVec3 vRes;
55
56 vRes.x = v1.x + v2.x;
57 vRes.y = v1.y + v2.y;
58 vRes.z = v1.z + v2.z;
59
60 return vRes;
61}
62
63
64static SVec3 vsub( const SVec3 v1, const SVec3 v2 )
65{
66 SVec3 vRes;
67
68 vRes.x = v1.x - v2.x;
69 vRes.y = v1.y - v2.y;
70 vRes.z = v1.z - v2.z;
71
72 return vRes;
73}
74
75static SVec3 vscale(const float fS, const SVec3 v)
76{
77 SVec3 vRes;
78
79 vRes.x = fS * v.x;
80 vRes.y = fS * v.y;
81 vRes.z = fS * v.z;
82
83 return vRes;
84}
85
86static float LengthSquared( const SVec3 v )
87{
88 return v.x*v.x + v.y*v.y + v.z*v.z;
89}
90
91static float Length( const SVec3 v )
92{
93 return sqrtf(LengthSquared(v));
94}
95
96static SVec3 Normalize( const SVec3 v )
97{
98 return vscale(1 / Length(v), v);
99}
100
101static float vdot( const SVec3 v1, const SVec3 v2)
102{
103 return v1.x*v2.x + v1.y*v2.y + v1.z*v2.z;
104}
105
106
107static tbool NotZero(const float fX)
108{
109 // could possibly use FLT_EPSILON instead
110 return fabsf(fX) > FLT_MIN;
111}
112
113static tbool VNotZero(const SVec3 v)
114{
115 // might change this to an epsilon based test
116 return NotZero(v.x) || NotZero(v.y) || NotZero(v.z);
117}
118
119
120
121typedef struct {
122 int iNrFaces;
123 int * pTriMembers;
124} SSubGroup;
125
126typedef struct {
127 int iNrFaces;
128 int * pFaceIndices;
129 int iVertexRepresentitive;
130 tbool bOrientPreservering;
131} SGroup;
132
133//
134#define MARK_DEGENERATE 1
135#define QUAD_ONE_DEGEN_TRI 2
136#define GROUP_WITH_ANY 4
137#define ORIENT_PRESERVING 8
138
139
140
141typedef struct {
142 int FaceNeighbors[3];
143 SGroup * AssignedGroup[3];
144
145 // normalized first order face derivatives
146 SVec3 vOs, vOt;
147 float fMagS, fMagT; // original magnitudes
148
149 // determines if the current and the next triangle are a quad.
150 int iOrgFaceNumber;
151 int iFlag, iTSpacesOffs;
152 unsigned char vert_num[4];
153} STriInfo;
154
155typedef struct {
156 SVec3 vOs;
157 float fMagS;
158 SVec3 vOt;
159 float fMagT;
160 int iCounter; // this is to average back into quads.
161 tbool bOrient;
162} STSpace;
163
164static int GenerateInitialVerticesIndexList(STriInfo pTriInfos[], int piTriList_out[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn);
165static void GenerateSharedVerticesIndexList(int piTriList_in_and_out[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn);
166static void InitTriInfo(STriInfo pTriInfos[], const int piTriListIn[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn);
167static int Build4RuleGroups(STriInfo pTriInfos[], SGroup pGroups[], int piGroupTrianglesBuffer[], const int piTriListIn[], const int iNrTrianglesIn);
168static tbool GenerateTSpaces(STSpace psTspace[], const STriInfo pTriInfos[], const SGroup pGroups[],
169 const int iNrActiveGroups, const int piTriListIn[], const float fThresCos,
170 const SMikkTSpaceContext * pContext);
171
172static int MakeIndex(const int iFace, const int iVert)
173{
174 assert(iVert>=0 && iVert<4 && iFace>=0);
175 return (iFace<<2) | (iVert&0x3);
176}
177
178static void IndexToData(int * piFace, int * piVert, const int iIndexIn)
179{
180 piVert[0] = iIndexIn&0x3;
181 piFace[0] = iIndexIn>>2;
182}
183
184static STSpace AvgTSpace(const STSpace * pTS0, const STSpace * pTS1)
185{
186 STSpace ts_res;
187
188 // this if is important. Due to floating point precision
189 // averaging when ts0==ts1 will cause a slight difference
190 // which results in tangent space splits later on
191 if (pTS0->fMagS==pTS1->fMagS && pTS0->fMagT==pTS1->fMagT &&
192 veq(pTS0->vOs,pTS1->vOs) && veq(pTS0->vOt, pTS1->vOt))
193 {
194 ts_res.fMagS = pTS0->fMagS;
195 ts_res.fMagT = pTS0->fMagT;
196 ts_res.vOs = pTS0->vOs;
197 ts_res.vOt = pTS0->vOt;
198 }
199 else
200 {
201 ts_res.fMagS = 0.5f*(pTS0->fMagS+pTS1->fMagS);
202 ts_res.fMagT = 0.5f*(pTS0->fMagT+pTS1->fMagT);
203 ts_res.vOs = vadd(pTS0->vOs,pTS1->vOs);
204 ts_res.vOt = vadd(pTS0->vOt,pTS1->vOt);
205 if ( VNotZero(ts_res.vOs) ) ts_res.vOs = Normalize(ts_res.vOs);
206 if ( VNotZero(ts_res.vOt) ) ts_res.vOt = Normalize(ts_res.vOt);
207 }
208
209 return ts_res;
210}
211
212
213
214static SVec3 GetPosition(const SMikkTSpaceContext * pContext, const int index);
215static SVec3 GetNormal(const SMikkTSpaceContext * pContext, const int index);
216static SVec3 GetTexCoord(const SMikkTSpaceContext * pContext, const int index);
217
218
219// degen triangles
220static void DegenPrologue(STriInfo pTriInfos[], int piTriList_out[], const int iNrTrianglesIn, const int iTotTris);
221static void DegenEpilogue(STSpace psTspace[], STriInfo pTriInfos[], int piTriListIn[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn, const int iTotTris);
222
223
224tbool genTangSpaceDefault(const SMikkTSpaceContext * pContext)
225{
226 return genTangSpace(pContext, 180.0f);
227}
228
229tbool genTangSpace(const SMikkTSpaceContext * pContext, const float fAngularThreshold)
230{
231 // count nr_triangles
232 int * piTriListIn = NULL, * piGroupTrianglesBuffer = NULL;
233 STriInfo * pTriInfos = NULL;
234 SGroup * pGroups = NULL;
235 STSpace * psTspace = NULL;
236 int iNrTrianglesIn = 0, f=0, t=0, i=0;
237 int iNrTSPaces = 0, iTotTris = 0, iDegenTriangles = 0, iNrMaxGroups = 0;
238 int iNrActiveGroups = 0, index = 0;
239 const int iNrFaces = pContext->m_pInterface->m_getNumFaces(pContext);
240 tbool bRes = TFALSE;
241 const float fThresCos = (float) cos((fAngularThreshold*(float)M_PI)/180.0f);
242
243 // verify all call-backs have been set
244 if ( pContext->m_pInterface->m_getNumFaces==NULL ||
245 pContext->m_pInterface->m_getNumVerticesOfFace==NULL ||
246 pContext->m_pInterface->m_getPosition==NULL ||
247 pContext->m_pInterface->m_getNormal==NULL ||
248 pContext->m_pInterface->m_getTexCoord==NULL )
249 return TFALSE;
250
251 // count triangles on supported faces
252 for (f=0; f<iNrFaces; f++)
253 {
254 const int verts = pContext->m_pInterface->m_getNumVerticesOfFace(pContext, f);
255 if (verts==3) ++iNrTrianglesIn;
256 else if (verts==4) iNrTrianglesIn += 2;
257 }
258 if (iNrTrianglesIn<=0) return TFALSE;
259
260 // allocate memory for an index list
261 piTriListIn = (int *) malloc(sizeof(int)*3*iNrTrianglesIn);
262 pTriInfos = (STriInfo *) malloc(sizeof(STriInfo)*iNrTrianglesIn);
263 if (piTriListIn==NULL || pTriInfos==NULL)
264 {
265 if (piTriListIn!=NULL) free(piTriListIn);
266 if (pTriInfos!=NULL) free(pTriInfos);
267 return TFALSE;
268 }
269
270 // make an initial triangle --> face index list
271 iNrTSPaces = GenerateInitialVerticesIndexList(pTriInfos, piTriListIn, pContext, iNrTrianglesIn);
272
273 // make a welded index list of identical positions and attributes (pos, norm, texc)
274 //printf("gen welded index list begin\n");
275 GenerateSharedVerticesIndexList(piTriListIn, pContext, iNrTrianglesIn);
276 //printf("gen welded index list end\n");
277
278 // Mark all degenerate triangles
279 iTotTris = iNrTrianglesIn;
280 iDegenTriangles = 0;
281 for (t=0; t<iTotTris; t++)
282 {
283 const int i0 = piTriListIn[t*3+0];
284 const int i1 = piTriListIn[t*3+1];
285 const int i2 = piTriListIn[t*3+2];
286 const SVec3 p0 = GetPosition(pContext, i0);
287 const SVec3 p1 = GetPosition(pContext, i1);
288 const SVec3 p2 = GetPosition(pContext, i2);
289 if (veq(p0,p1) || veq(p0,p2) || veq(p1,p2)) // degenerate
290 {
291 pTriInfos[t].iFlag |= MARK_DEGENERATE;
292 ++iDegenTriangles;
293 }
294 }
295 iNrTrianglesIn = iTotTris - iDegenTriangles;
296
297 // mark all triangle pairs that belong to a quad with only one
298 // good triangle. These need special treatment in DegenEpilogue().
299 // Additionally, move all good triangles to the start of
300 // pTriInfos[] and piTriListIn[] without changing order and
301 // put the degenerate triangles last.
302 DegenPrologue(pTriInfos, piTriListIn, iNrTrianglesIn, iTotTris);
303
304
305 // evaluate triangle level attributes and neighbor list
306 //printf("gen neighbors list begin\n");
307 InitTriInfo(pTriInfos, piTriListIn, pContext, iNrTrianglesIn);
308 //printf("gen neighbors list end\n");
309
310
311 // based on the 4 rules, identify groups based on connectivity
312 iNrMaxGroups = iNrTrianglesIn*3;
313 pGroups = (SGroup *) malloc(sizeof(SGroup)*iNrMaxGroups);
314 piGroupTrianglesBuffer = (int *) malloc(sizeof(int)*iNrTrianglesIn*3);
315 if (pGroups==NULL || piGroupTrianglesBuffer==NULL)
316 {
317 if (pGroups!=NULL) free(pGroups);
318 if (piGroupTrianglesBuffer!=NULL) free(piGroupTrianglesBuffer);
319 free(piTriListIn);
320 free(pTriInfos);
321 return TFALSE;
322 }
323 //printf("gen 4rule groups begin\n");
324 iNrActiveGroups =
325 Build4RuleGroups(pTriInfos, pGroups, piGroupTrianglesBuffer, piTriListIn, iNrTrianglesIn);
326 //printf("gen 4rule groups end\n");
327
328 //
329
330 psTspace = (STSpace *) malloc(sizeof(STSpace)*iNrTSPaces);
331 if (psTspace==NULL)
332 {
333 free(piTriListIn);
334 free(pTriInfos);
335 free(pGroups);
336 free(piGroupTrianglesBuffer);
337 return TFALSE;
338 }
339 memset(psTspace, 0, sizeof(STSpace)*iNrTSPaces);
340 for (t=0; t<iNrTSPaces; t++)
341 {
342 psTspace[t].vOs.x=1.0f; psTspace[t].vOs.y=0.0f; psTspace[t].vOs.z=0.0f; psTspace[t].fMagS = 1.0f;
343 psTspace[t].vOt.x=0.0f; psTspace[t].vOt.y=1.0f; psTspace[t].vOt.z=0.0f; psTspace[t].fMagT = 1.0f;
344 }
345
346 // make tspaces, each group is split up into subgroups if necessary
347 // based on fAngularThreshold. Finally a tangent space is made for
348 // every resulting subgroup
349 //printf("gen tspaces begin\n");
350 bRes = GenerateTSpaces(psTspace, pTriInfos, pGroups, iNrActiveGroups, piTriListIn, fThresCos, pContext);
351 //printf("gen tspaces end\n");
352
353 // clean up
354 free(pGroups);
355 free(piGroupTrianglesBuffer);
356
357 if (!bRes) // if an allocation in GenerateTSpaces() failed
358 {
359 // clean up and return false
360 free(pTriInfos); free(piTriListIn); free(psTspace);
361 return TFALSE;
362 }
363
364
365 // degenerate quads with one good triangle will be fixed by copying a space from
366 // the good triangle to the coinciding vertex.
367 // all other degenerate triangles will just copy a space from any good triangle
368 // with the same welded index in piTriListIn[].
369 DegenEpilogue(psTspace, pTriInfos, piTriListIn, pContext, iNrTrianglesIn, iTotTris);
370
371 free(pTriInfos); free(piTriListIn);
372
373 index = 0;
374 for (f=0; f<iNrFaces; f++)
375 {
376 const int verts = pContext->m_pInterface->m_getNumVerticesOfFace(pContext, f);
377 if (verts!=3 && verts!=4) continue;
378
379
380 // I've decided to let degenerate triangles and group-with-anythings
381 // vary between left/right hand coordinate systems at the vertices.
382 // All healthy triangles on the other hand are built to always be either or.
383
384 /*// force the coordinate system orientation to be uniform for every face.
385 // (this is already the case for good triangles but not for
386 // degenerate ones and those with bGroupWithAnything==true)
387 bool bOrient = psTspace[index].bOrient;
388 if (psTspace[index].iCounter == 0) // tspace was not derived from a group
389 {
390 // look for a space created in GenerateTSpaces() by iCounter>0
391 bool bNotFound = true;
392 int i=1;
393 while (i<verts && bNotFound)
394 {
395 if (psTspace[index+i].iCounter > 0) bNotFound=false;
396 else ++i;
397 }
398 if (!bNotFound) bOrient = psTspace[index+i].bOrient;
399 }*/
400
401 // set data
402 for (i=0; i<verts; i++)
403 {
404 const STSpace * pTSpace = &psTspace[index];
405 float tang[] = {pTSpace->vOs.x, pTSpace->vOs.y, pTSpace->vOs.z};
406 float bitang[] = {pTSpace->vOt.x, pTSpace->vOt.y, pTSpace->vOt.z};
407 if (pContext->m_pInterface->m_setTSpace!=NULL)
408 pContext->m_pInterface->m_setTSpace(pContext, tang, bitang, pTSpace->fMagS, pTSpace->fMagT, pTSpace->bOrient, f, i);
409 if (pContext->m_pInterface->m_setTSpaceBasic!=NULL)
410 pContext->m_pInterface->m_setTSpaceBasic(pContext, tang, pTSpace->bOrient==TTRUE ? 1.0f : (-1.0f), f, i);
411
412 ++index;
413 }
414 }
415
416 free(psTspace);
417
418
419 return TTRUE;
420}
421
422///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
423
424typedef struct {
425 float vert[3];
426 int index;
427} STmpVert;
428
429static const int g_iCells = 2048;
430
431#ifdef _MSC_VER
432# define NOINLINE __declspec(noinline)
433#else
434# define NOINLINE __attribute__ ((noinline))
435#endif
436
437// it is IMPORTANT that this function is called to evaluate the hash since
438// inlining could potentially reorder instructions and generate different
439// results for the same effective input value fVal.
440static NOINLINE int FindGridCell(const float fMin, const float fMax, const float fVal)
441{
442 const float fIndex = g_iCells * ((fVal-fMin)/(fMax-fMin));
443 const int iIndex = (int)fIndex;
444 return iIndex < g_iCells ? (iIndex >= 0 ? iIndex : 0) : (g_iCells - 1);
445}
446
447static void MergeVertsFast(int piTriList_in_and_out[], STmpVert pTmpVert[], const SMikkTSpaceContext * pContext, const int iL_in, const int iR_in);
448static void MergeVertsSlow(int piTriList_in_and_out[], const SMikkTSpaceContext * pContext, const int pTable[], const int iEntries);
449static void GenerateSharedVerticesIndexListSlow(int piTriList_in_and_out[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn);
450
451static void GenerateSharedVerticesIndexList(int piTriList_in_and_out[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn)
452{
453
454 // Generate bounding box
455 int * piHashTable=NULL, * piHashCount=NULL, * piHashOffsets=NULL, * piHashCount2=NULL;
456 STmpVert * pTmpVert = NULL;
457 int i=0, iChannel=0, k=0, e=0;
458 int iMaxCount=0;
459 SVec3 vMin = GetPosition(pContext, 0), vMax = vMin, vDim;
460 float fMin, fMax;
461 for (i=1; i<(iNrTrianglesIn*3); i++)
462 {
463 const int index = piTriList_in_and_out[i];
464
465 const SVec3 vP = GetPosition(pContext, index);
466 if (vMin.x > vP.x) vMin.x = vP.x;
467 else if (vMax.x < vP.x) vMax.x = vP.x;
468 if (vMin.y > vP.y) vMin.y = vP.y;
469 else if (vMax.y < vP.y) vMax.y = vP.y;
470 if (vMin.z > vP.z) vMin.z = vP.z;
471 else if (vMax.z < vP.z) vMax.z = vP.z;
472 }
473
474 vDim = vsub(vMax,vMin);
475 iChannel = 0;
476 fMin = vMin.x; fMax=vMax.x;
477 if (vDim.y>vDim.x && vDim.y>vDim.z)
478 {
479 iChannel=1;
480 fMin = vMin.y;
481 fMax = vMax.y;
482 }
483 else if (vDim.z>vDim.x)
484 {
485 iChannel=2;
486 fMin = vMin.z;
487 fMax = vMax.z;
488 }
489
490 // make allocations
491 piHashTable = (int *) malloc(sizeof(int)*iNrTrianglesIn*3);
492 piHashCount = (int *) malloc(sizeof(int)*g_iCells);
493 piHashOffsets = (int *) malloc(sizeof(int)*g_iCells);
494 piHashCount2 = (int *) malloc(sizeof(int)*g_iCells);
495
496 if (piHashTable==NULL || piHashCount==NULL || piHashOffsets==NULL || piHashCount2==NULL)
497 {
498 if (piHashTable!=NULL) free(piHashTable);
499 if (piHashCount!=NULL) free(piHashCount);
500 if (piHashOffsets!=NULL) free(piHashOffsets);
501 if (piHashCount2!=NULL) free(piHashCount2);
502 GenerateSharedVerticesIndexListSlow(piTriList_in_and_out, pContext, iNrTrianglesIn);
503 return;
504 }
505 memset(piHashCount, 0, sizeof(int)*g_iCells);
506 memset(piHashCount2, 0, sizeof(int)*g_iCells);
507
508 // count amount of elements in each cell unit
509 for (i=0; i<(iNrTrianglesIn*3); i++)
510 {
511 const int index = piTriList_in_and_out[i];
512 const SVec3 vP = GetPosition(pContext, index);
513 const float fVal = iChannel==0 ? vP.x : (iChannel==1 ? vP.y : vP.z);
514 const int iCell = FindGridCell(fMin, fMax, fVal);
515 ++piHashCount[iCell];
516 }
517
518 // evaluate start index of each cell.
519 piHashOffsets[0]=0;
520 for (k=1; k<g_iCells; k++)
521 piHashOffsets[k]=piHashOffsets[k-1]+piHashCount[k-1];
522
523 // insert vertices
524 for (i=0; i<(iNrTrianglesIn*3); i++)
525 {
526 const int index = piTriList_in_and_out[i];
527 const SVec3 vP = GetPosition(pContext, index);
528 const float fVal = iChannel==0 ? vP.x : (iChannel==1 ? vP.y : vP.z);
529 const int iCell = FindGridCell(fMin, fMax, fVal);
530 int * pTable = NULL;
531
532 assert(piHashCount2[iCell]<piHashCount[iCell]);
533 pTable = &piHashTable[piHashOffsets[iCell]];
534 pTable[piHashCount2[iCell]] = i; // vertex i has been inserted.
535 ++piHashCount2[iCell];
536 }
537 for (k=0; k<g_iCells; k++)
538 assert(piHashCount2[k] == piHashCount[k]); // verify the count
539 free(piHashCount2);
540
541 // find maximum amount of entries in any hash entry
542 iMaxCount = piHashCount[0];
543 for (k=1; k<g_iCells; k++)
544 if (iMaxCount<piHashCount[k])
545 iMaxCount=piHashCount[k];
546 pTmpVert = (STmpVert *) malloc(sizeof(STmpVert)*iMaxCount);
547
548
549 // complete the merge
550 for (k=0; k<g_iCells; k++)
551 {
552 // extract table of cell k and amount of entries in it
553 int * pTable = &piHashTable[piHashOffsets[k]];
554 const int iEntries = piHashCount[k];
555 if (iEntries < 2) continue;
556
557 if (pTmpVert!=NULL)
558 {
559 for (e=0; e<iEntries; e++)
560 {
561 int i = pTable[e];
562 const SVec3 vP = GetPosition(pContext, piTriList_in_and_out[i]);
563 pTmpVert[e].vert[0] = vP.x; pTmpVert[e].vert[1] = vP.y;
564 pTmpVert[e].vert[2] = vP.z; pTmpVert[e].index = i;
565 }
566 MergeVertsFast(piTriList_in_and_out, pTmpVert, pContext, 0, iEntries-1);
567 }
568 else
569 MergeVertsSlow(piTriList_in_and_out, pContext, pTable, iEntries);
570 }
571
572 if (pTmpVert!=NULL) { free(pTmpVert); }
573 free(piHashTable);
574 free(piHashCount);
575 free(piHashOffsets);
576}
577
578static void MergeVertsFast(int piTriList_in_and_out[], STmpVert pTmpVert[], const SMikkTSpaceContext * pContext, const int iL_in, const int iR_in)
579{
580 // make bbox
581 int c=0, l=0, channel=0;
582 float fvMin[3], fvMax[3];
583 float dx=0, dy=0, dz=0, fSep=0;
584 for (c=0; c<3; c++)
585 { fvMin[c]=pTmpVert[iL_in].vert[c]; fvMax[c]=fvMin[c]; }
586 for (l=(iL_in+1); l<=iR_in; l++) {
587 for (c=0; c<3; c++) {
588 if (fvMin[c]>pTmpVert[l].vert[c]) fvMin[c]=pTmpVert[l].vert[c];
589 if (fvMax[c]<pTmpVert[l].vert[c]) fvMax[c]=pTmpVert[l].vert[c];
590 }
591 }
592
593 dx = fvMax[0]-fvMin[0];
594 dy = fvMax[1]-fvMin[1];
595 dz = fvMax[2]-fvMin[2];
596
597 channel = 0;
598 if (dy>dx && dy>dz) channel=1;
599 else if (dz>dx) channel=2;
600
601 fSep = 0.5f*(fvMax[channel]+fvMin[channel]);
602
603 // stop if all vertices are NaNs
604 if (!isfinite(fSep))
605 return;
606
607 // terminate recursion when the separation/average value
608 // is no longer strictly between fMin and fMax values.
609 if (fSep>=fvMax[channel] || fSep<=fvMin[channel])
610 {
611 // complete the weld
612 for (l=iL_in; l<=iR_in; l++)
613 {
614 int i = pTmpVert[l].index;
615 const int index = piTriList_in_and_out[i];
616 const SVec3 vP = GetPosition(pContext, index);
617 const SVec3 vN = GetNormal(pContext, index);
618 const SVec3 vT = GetTexCoord(pContext, index);
619
620 tbool bNotFound = TTRUE;
621 int l2=iL_in, i2rec=-1;
622 while (l2<l && bNotFound)
623 {
624 const int i2 = pTmpVert[l2].index;
625 const int index2 = piTriList_in_and_out[i2];
626 const SVec3 vP2 = GetPosition(pContext, index2);
627 const SVec3 vN2 = GetNormal(pContext, index2);
628 const SVec3 vT2 = GetTexCoord(pContext, index2);
629 i2rec=i2;
630
631 //if (vP==vP2 && vN==vN2 && vT==vT2)
632 if (vP.x==vP2.x && vP.y==vP2.y && vP.z==vP2.z &&
633 vN.x==vN2.x && vN.y==vN2.y && vN.z==vN2.z &&
634 vT.x==vT2.x && vT.y==vT2.y && vT.z==vT2.z)
635 bNotFound = TFALSE;
636 else
637 ++l2;
638 }
639
640 // merge if previously found
641 if (!bNotFound)
642 piTriList_in_and_out[i] = piTriList_in_and_out[i2rec];
643 }
644 }
645 else
646 {
647 int iL=iL_in, iR=iR_in;
648 assert((iR_in-iL_in)>0); // at least 2 entries
649
650 // separate (by fSep) all points between iL_in and iR_in in pTmpVert[]
651 while (iL < iR)
652 {
653 tbool bReadyLeftSwap = TFALSE, bReadyRightSwap = TFALSE;
654 while ((!bReadyLeftSwap) && iL<iR)
655 {
656 assert(iL>=iL_in && iL<=iR_in);
657 bReadyLeftSwap = !(pTmpVert[iL].vert[channel]<fSep);
658 if (!bReadyLeftSwap) ++iL;
659 }
660 while ((!bReadyRightSwap) && iL<iR)
661 {
662 assert(iR>=iL_in && iR<=iR_in);
663 bReadyRightSwap = pTmpVert[iR].vert[channel]<fSep;
664 if (!bReadyRightSwap) --iR;
665 }
666 assert( (iL<iR) || !(bReadyLeftSwap && bReadyRightSwap) );
667
668 if (bReadyLeftSwap && bReadyRightSwap)
669 {
670 const STmpVert sTmp = pTmpVert[iL];
671 assert(iL<iR);
672 pTmpVert[iL] = pTmpVert[iR];
673 pTmpVert[iR] = sTmp;
674 ++iL; --iR;
675 }
676 }
677
678 assert(iL==(iR+1) || (iL==iR));
679 if (iL==iR)
680 {
681 const tbool bReadyRightSwap = pTmpVert[iR].vert[channel]<fSep;
682 if (bReadyRightSwap) ++iL;
683 else --iR;
684 }
685
686 // only need to weld when there is more than 1 instance of the (x,y,z)
687 if (iL_in < iR)
688 MergeVertsFast(piTriList_in_and_out, pTmpVert, pContext, iL_in, iR); // weld all left of fSep
689 if (iL < iR_in)
690 MergeVertsFast(piTriList_in_and_out, pTmpVert, pContext, iL, iR_in); // weld all right of (or equal to) fSep
691 }
692}
693
694static void MergeVertsSlow(int piTriList_in_and_out[], const SMikkTSpaceContext * pContext, const int pTable[], const int iEntries)
695{
696 // this can be optimized further using a tree structure or more hashing.
697 int e=0;
698 for (e=0; e<iEntries; e++)
699 {
700 int i = pTable[e];
701 const int index = piTriList_in_and_out[i];
702 const SVec3 vP = GetPosition(pContext, index);
703 const SVec3 vN = GetNormal(pContext, index);
704 const SVec3 vT = GetTexCoord(pContext, index);
705
706 tbool bNotFound = TTRUE;
707 int e2=0, i2rec=-1;
708 while (e2<e && bNotFound)
709 {
710 const int i2 = pTable[e2];
711 const int index2 = piTriList_in_and_out[i2];
712 const SVec3 vP2 = GetPosition(pContext, index2);
713 const SVec3 vN2 = GetNormal(pContext, index2);
714 const SVec3 vT2 = GetTexCoord(pContext, index2);
715 i2rec = i2;
716
717 if (veq(vP,vP2) && veq(vN,vN2) && veq(vT,vT2))
718 bNotFound = TFALSE;
719 else
720 ++e2;
721 }
722
723 // merge if previously found
724 if (!bNotFound)
725 piTriList_in_and_out[i] = piTriList_in_and_out[i2rec];
726 }
727}
728
729static void GenerateSharedVerticesIndexListSlow(int piTriList_in_and_out[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn)
730{
731 int iNumUniqueVerts = 0, t=0, i=0;
732 for (t=0; t<iNrTrianglesIn; t++)
733 {
734 for (i=0; i<3; i++)
735 {
736 const int offs = t*3 + i;
737 const int index = piTriList_in_and_out[offs];
738
739 const SVec3 vP = GetPosition(pContext, index);
740 const SVec3 vN = GetNormal(pContext, index);
741 const SVec3 vT = GetTexCoord(pContext, index);
742
743 tbool bFound = TFALSE;
744 int t2=0, index2rec=-1;
745 while (!bFound && t2<=t)
746 {
747 int j=0;
748 while (!bFound && j<3)
749 {
750 const int index2 = piTriList_in_and_out[t2*3 + j];
751 const SVec3 vP2 = GetPosition(pContext, index2);
752 const SVec3 vN2 = GetNormal(pContext, index2);
753 const SVec3 vT2 = GetTexCoord(pContext, index2);
754
755 if (veq(vP,vP2) && veq(vN,vN2) && veq(vT,vT2))
756 bFound = TTRUE;
757 else
758 ++j;
759 }
760 if (!bFound) ++t2;
761 }
762
763 assert(bFound);
764 // if we found our own
765 if (index2rec == index) { ++iNumUniqueVerts; }
766
767 piTriList_in_and_out[offs] = index2rec;
768 }
769 }
770}
771
772static int GenerateInitialVerticesIndexList(STriInfo pTriInfos[], int piTriList_out[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn)
773{
774 int iTSpacesOffs = 0, f=0, t=0;
775 int iDstTriIndex = 0;
776 for (f=0; f<pContext->m_pInterface->m_getNumFaces(pContext); f++)
777 {
778 const int verts = pContext->m_pInterface->m_getNumVerticesOfFace(pContext, f);
779 if (verts!=3 && verts!=4) continue;
780
781 pTriInfos[iDstTriIndex].iOrgFaceNumber = f;
782 pTriInfos[iDstTriIndex].iTSpacesOffs = iTSpacesOffs;
783
784 if (verts==3)
785 {
786 unsigned char * pVerts = pTriInfos[iDstTriIndex].vert_num;
787 pVerts[0]=0; pVerts[1]=1; pVerts[2]=2;
788 piTriList_out[iDstTriIndex*3+0] = MakeIndex(f, 0);
789 piTriList_out[iDstTriIndex*3+1] = MakeIndex(f, 1);
790 piTriList_out[iDstTriIndex*3+2] = MakeIndex(f, 2);
791 ++iDstTriIndex; // next
792 }
793 else
794 {
795 {
796 pTriInfos[iDstTriIndex+1].iOrgFaceNumber = f;
797 pTriInfos[iDstTriIndex+1].iTSpacesOffs = iTSpacesOffs;
798 }
799
800 {
801 // need an order independent way to evaluate
802 // tspace on quads. This is done by splitting
803 // along the shortest diagonal.
804 const int i0 = MakeIndex(f, 0);
805 const int i1 = MakeIndex(f, 1);
806 const int i2 = MakeIndex(f, 2);
807 const int i3 = MakeIndex(f, 3);
808 const SVec3 T0 = GetTexCoord(pContext, i0);
809 const SVec3 T1 = GetTexCoord(pContext, i1);
810 const SVec3 T2 = GetTexCoord(pContext, i2);
811 const SVec3 T3 = GetTexCoord(pContext, i3);
812 const float distSQ_02 = LengthSquared(vsub(T2,T0));
813 const float distSQ_13 = LengthSquared(vsub(T3,T1));
814 tbool bQuadDiagIs_02;
815 if (distSQ_02<distSQ_13)
816 bQuadDiagIs_02 = TTRUE;
817 else if (distSQ_13<distSQ_02)
818 bQuadDiagIs_02 = TFALSE;
819 else
820 {
821 const SVec3 P0 = GetPosition(pContext, i0);
822 const SVec3 P1 = GetPosition(pContext, i1);
823 const SVec3 P2 = GetPosition(pContext, i2);
824 const SVec3 P3 = GetPosition(pContext, i3);
825 const float distSQ_02 = LengthSquared(vsub(P2,P0));
826 const float distSQ_13 = LengthSquared(vsub(P3,P1));
827
828 bQuadDiagIs_02 = distSQ_13<distSQ_02 ? TFALSE : TTRUE;
829 }
830
831 if (bQuadDiagIs_02)
832 {
833 {
834 unsigned char * pVerts_A = pTriInfos[iDstTriIndex].vert_num;
835 pVerts_A[0]=0; pVerts_A[1]=1; pVerts_A[2]=2;
836 }
837 piTriList_out[iDstTriIndex*3+0] = i0;
838 piTriList_out[iDstTriIndex*3+1] = i1;
839 piTriList_out[iDstTriIndex*3+2] = i2;
840 ++iDstTriIndex; // next
841 {
842 unsigned char * pVerts_B = pTriInfos[iDstTriIndex].vert_num;
843 pVerts_B[0]=0; pVerts_B[1]=2; pVerts_B[2]=3;
844 }
845 piTriList_out[iDstTriIndex*3+0] = i0;
846 piTriList_out[iDstTriIndex*3+1] = i2;
847 piTriList_out[iDstTriIndex*3+2] = i3;
848 ++iDstTriIndex; // next
849 }
850 else
851 {
852 {
853 unsigned char * pVerts_A = pTriInfos[iDstTriIndex].vert_num;
854 pVerts_A[0]=0; pVerts_A[1]=1; pVerts_A[2]=3;
855 }
856 piTriList_out[iDstTriIndex*3+0] = i0;
857 piTriList_out[iDstTriIndex*3+1] = i1;
858 piTriList_out[iDstTriIndex*3+2] = i3;
859 ++iDstTriIndex; // next
860 {
861 unsigned char * pVerts_B = pTriInfos[iDstTriIndex].vert_num;
862 pVerts_B[0]=1; pVerts_B[1]=2; pVerts_B[2]=3;
863 }
864 piTriList_out[iDstTriIndex*3+0] = i1;
865 piTriList_out[iDstTriIndex*3+1] = i2;
866 piTriList_out[iDstTriIndex*3+2] = i3;
867 ++iDstTriIndex; // next
868 }
869 }
870 }
871
872 iTSpacesOffs += verts;
873 assert(iDstTriIndex<=iNrTrianglesIn);
874 }
875
876 for (t=0; t<iNrTrianglesIn; t++)
877 pTriInfos[t].iFlag = 0;
878
879 // return total amount of tspaces
880 return iTSpacesOffs;
881}
882
883static SVec3 GetPosition(const SMikkTSpaceContext * pContext, const int index)
884{
885 int iF, iI;
886 SVec3 res; float pos[3];
887 IndexToData(&iF, &iI, index);
888 pContext->m_pInterface->m_getPosition(pContext, pos, iF, iI);
889 res.x=pos[0]; res.y=pos[1]; res.z=pos[2];
890 return res;
891}
892
893static SVec3 GetNormal(const SMikkTSpaceContext * pContext, const int index)
894{
895 int iF, iI;
896 SVec3 res; float norm[3];
897 IndexToData(&iF, &iI, index);
898 pContext->m_pInterface->m_getNormal(pContext, norm, iF, iI);
899 res.x=norm[0]; res.y=norm[1]; res.z=norm[2];
900 return res;
901}
902
903static SVec3 GetTexCoord(const SMikkTSpaceContext * pContext, const int index)
904{
905 int iF, iI;
906 SVec3 res; float texc[2];
907 IndexToData(&iF, &iI, index);
908 pContext->m_pInterface->m_getTexCoord(pContext, texc, iF, iI);
909 res.x=texc[0]; res.y=texc[1]; res.z=1.0f;
910 return res;
911}
912
913/////////////////////////////////////////////////////////////////////////////////////////////////////
914/////////////////////////////////////////////////////////////////////////////////////////////////////
915
916typedef union {
917 struct
918 {
919 int i0, i1, f;
920 };
921 int array[3];
922} SEdge;
923
924static void BuildNeighborsFast(STriInfo pTriInfos[], SEdge * pEdges, const int piTriListIn[], const int iNrTrianglesIn);
925static void BuildNeighborsSlow(STriInfo pTriInfos[], const int piTriListIn[], const int iNrTrianglesIn);
926
927// returns the texture area times 2
928static float CalcTexArea(const SMikkTSpaceContext * pContext, const int indices[])
929{
930 const SVec3 t1 = GetTexCoord(pContext, indices[0]);
931 const SVec3 t2 = GetTexCoord(pContext, indices[1]);
932 const SVec3 t3 = GetTexCoord(pContext, indices[2]);
933
934 const float t21x = t2.x-t1.x;
935 const float t21y = t2.y-t1.y;
936 const float t31x = t3.x-t1.x;
937 const float t31y = t3.y-t1.y;
938
939 const float fSignedAreaSTx2 = t21x*t31y - t21y*t31x;
940
941 return fSignedAreaSTx2<0 ? (-fSignedAreaSTx2) : fSignedAreaSTx2;
942}
943
944static void InitTriInfo(STriInfo pTriInfos[], const int piTriListIn[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn)
945{
946 int f=0, i=0, t=0;
947 // pTriInfos[f].iFlag is cleared in GenerateInitialVerticesIndexList() which is called before this function.
948
949 // generate neighbor info list
950 for (f=0; f<iNrTrianglesIn; f++)
951 for (i=0; i<3; i++)
952 {
953 pTriInfos[f].FaceNeighbors[i] = -1;
954 pTriInfos[f].AssignedGroup[i] = NULL;
955
956 pTriInfos[f].vOs.x=0.0f; pTriInfos[f].vOs.y=0.0f; pTriInfos[f].vOs.z=0.0f;
957 pTriInfos[f].vOt.x=0.0f; pTriInfos[f].vOt.y=0.0f; pTriInfos[f].vOt.z=0.0f;
958 pTriInfos[f].fMagS = 0;
959 pTriInfos[f].fMagT = 0;
960
961 // assumed bad
962 pTriInfos[f].iFlag |= GROUP_WITH_ANY;
963 }
964
965 // evaluate first order derivatives
966 for (f=0; f<iNrTrianglesIn; f++)
967 {
968 // initial values
969 const SVec3 v1 = GetPosition(pContext, piTriListIn[f*3+0]);
970 const SVec3 v2 = GetPosition(pContext, piTriListIn[f*3+1]);
971 const SVec3 v3 = GetPosition(pContext, piTriListIn[f*3+2]);
972 const SVec3 t1 = GetTexCoord(pContext, piTriListIn[f*3+0]);
973 const SVec3 t2 = GetTexCoord(pContext, piTriListIn[f*3+1]);
974 const SVec3 t3 = GetTexCoord(pContext, piTriListIn[f*3+2]);
975
976 const float t21x = t2.x-t1.x;
977 const float t21y = t2.y-t1.y;
978 const float t31x = t3.x-t1.x;
979 const float t31y = t3.y-t1.y;
980 const SVec3 d1 = vsub(v2,v1);
981 const SVec3 d2 = vsub(v3,v1);
982
983 const float fSignedAreaSTx2 = t21x*t31y - t21y*t31x;
984 //assert(fSignedAreaSTx2!=0);
985 SVec3 vOs = vsub(vscale(t31y,d1), vscale(t21y,d2)); // eq 18
986 SVec3 vOt = vadd(vscale(-t31x,d1), vscale(t21x,d2)); // eq 19
987
988 pTriInfos[f].iFlag |= (fSignedAreaSTx2>0 ? ORIENT_PRESERVING : 0);
989
990 if ( NotZero(fSignedAreaSTx2) )
991 {
992 const float fAbsArea = fabsf(fSignedAreaSTx2);
993 const float fLenOs = Length(vOs);
994 const float fLenOt = Length(vOt);
995 const float fS = (pTriInfos[f].iFlag&ORIENT_PRESERVING)==0 ? (-1.0f) : 1.0f;
996 if ( NotZero(fLenOs) ) pTriInfos[f].vOs = vscale(fS/fLenOs, vOs);
997 if ( NotZero(fLenOt) ) pTriInfos[f].vOt = vscale(fS/fLenOt, vOt);
998
999 // evaluate magnitudes prior to normalization of vOs and vOt
1000 pTriInfos[f].fMagS = fLenOs / fAbsArea;
1001 pTriInfos[f].fMagT = fLenOt / fAbsArea;
1002
1003 // if this is a good triangle
1004 if ( NotZero(pTriInfos[f].fMagS) && NotZero(pTriInfos[f].fMagT))
1005 pTriInfos[f].iFlag &= (~GROUP_WITH_ANY);
1006 }
1007 }
1008
1009 // force otherwise healthy quads to a fixed orientation
1010 while (t<(iNrTrianglesIn-1))
1011 {
1012 const int iFO_a = pTriInfos[t].iOrgFaceNumber;
1013 const int iFO_b = pTriInfos[t+1].iOrgFaceNumber;
1014 if (iFO_a==iFO_b) // this is a quad
1015 {
1016 const tbool bIsDeg_a = (pTriInfos[t].iFlag&MARK_DEGENERATE)!=0 ? TTRUE : TFALSE;
1017 const tbool bIsDeg_b = (pTriInfos[t+1].iFlag&MARK_DEGENERATE)!=0 ? TTRUE : TFALSE;
1018
1019 // bad triangles should already have been removed by
1020 // DegenPrologue(), but just in case check bIsDeg_a and bIsDeg_a are false
1021 if ((bIsDeg_a||bIsDeg_b)==TFALSE)
1022 {
1023 const tbool bOrientA = (pTriInfos[t].iFlag&ORIENT_PRESERVING)!=0 ? TTRUE : TFALSE;
1024 const tbool bOrientB = (pTriInfos[t+1].iFlag&ORIENT_PRESERVING)!=0 ? TTRUE : TFALSE;
1025 // if this happens the quad has extremely bad mapping!!
1026 if (bOrientA!=bOrientB)
1027 {
1028 //printf("found quad with bad mapping\n");
1029 tbool bChooseOrientFirstTri = TFALSE;
1030 if ((pTriInfos[t+1].iFlag&GROUP_WITH_ANY)!=0) bChooseOrientFirstTri = TTRUE;
1031 else if ( CalcTexArea(pContext, &piTriListIn[t*3+0]) >= CalcTexArea(pContext, &piTriListIn[(t+1)*3+0]) )
1032 bChooseOrientFirstTri = TTRUE;
1033
1034 // force match
1035 {
1036 const int t0 = bChooseOrientFirstTri ? t : (t+1);
1037 const int t1 = bChooseOrientFirstTri ? (t+1) : t;
1038 pTriInfos[t1].iFlag &= (~ORIENT_PRESERVING); // clear first
1039 pTriInfos[t1].iFlag |= (pTriInfos[t0].iFlag&ORIENT_PRESERVING); // copy bit
1040 }
1041 }
1042 }
1043 t += 2;
1044 }
1045 else
1046 ++t;
1047 }
1048
1049 // match up edge pairs
1050 {
1051 SEdge * pEdges = (SEdge *) malloc(sizeof(SEdge)*iNrTrianglesIn*3);
1052 if (pEdges==NULL)
1053 BuildNeighborsSlow(pTriInfos, piTriListIn, iNrTrianglesIn);
1054 else
1055 {
1056 BuildNeighborsFast(pTriInfos, pEdges, piTriListIn, iNrTrianglesIn);
1057
1058 free(pEdges);
1059 }
1060 }
1061}
1062
1063/////////////////////////////////////////////////////////////////////////////////////////////////////
1064/////////////////////////////////////////////////////////////////////////////////////////////////////
1065
1066static tbool AssignRecur(const int piTriListIn[], STriInfo psTriInfos[], const int iMyTriIndex, SGroup * pGroup);
1067static void AddTriToGroup(SGroup * pGroup, const int iTriIndex);
1068
1069static int Build4RuleGroups(STriInfo pTriInfos[], SGroup pGroups[], int piGroupTrianglesBuffer[], const int piTriListIn[], const int iNrTrianglesIn)
1070{
1071 const int iNrMaxGroups = iNrTrianglesIn*3;
1072 int iNrActiveGroups = 0;
1073 int iOffset = 0, f=0, i=0;
1074 (void)iNrMaxGroups; /* quiet warnings in non debug mode */
1075 for (f=0; f<iNrTrianglesIn; f++)
1076 {
1077 for (i=0; i<3; i++)
1078 {
1079 // if not assigned to a group
1080 if ((pTriInfos[f].iFlag&GROUP_WITH_ANY)==0 && pTriInfos[f].AssignedGroup[i]==NULL)
1081 {
1082 tbool bOrPre;
1083 int neigh_indexL, neigh_indexR;
1084 const int vert_index = piTriListIn[f*3+i];
1085 assert(iNrActiveGroups<iNrMaxGroups);
1086 pTriInfos[f].AssignedGroup[i] = &pGroups[iNrActiveGroups];
1087 pTriInfos[f].AssignedGroup[i]->iVertexRepresentitive = vert_index;
1088 pTriInfos[f].AssignedGroup[i]->bOrientPreservering = (pTriInfos[f].iFlag&ORIENT_PRESERVING)!=0;
1089 pTriInfos[f].AssignedGroup[i]->iNrFaces = 0;
1090 pTriInfos[f].AssignedGroup[i]->pFaceIndices = &piGroupTrianglesBuffer[iOffset];
1091 ++iNrActiveGroups;
1092
1093 AddTriToGroup(pTriInfos[f].AssignedGroup[i], f);
1094 bOrPre = (pTriInfos[f].iFlag&ORIENT_PRESERVING)!=0 ? TTRUE : TFALSE;
1095 neigh_indexL = pTriInfos[f].FaceNeighbors[i];
1096 neigh_indexR = pTriInfos[f].FaceNeighbors[i>0?(i-1):2];
1097 if (neigh_indexL>=0) // neighbor
1098 {
1099 const tbool bAnswer =
1100 AssignRecur(piTriListIn, pTriInfos, neigh_indexL,
1101 pTriInfos[f].AssignedGroup[i] );
1102
1103 const tbool bOrPre2 = (pTriInfos[neigh_indexL].iFlag&ORIENT_PRESERVING)!=0 ? TTRUE : TFALSE;
1104 const tbool bDiff = bOrPre!=bOrPre2 ? TTRUE : TFALSE;
1105 assert(bAnswer || bDiff);
1106 (void)bAnswer, (void)bDiff; /* quiet warnings in non debug mode */
1107 }
1108 if (neigh_indexR>=0) // neighbor
1109 {
1110 const tbool bAnswer =
1111 AssignRecur(piTriListIn, pTriInfos, neigh_indexR,
1112 pTriInfos[f].AssignedGroup[i] );
1113
1114 const tbool bOrPre2 = (pTriInfos[neigh_indexR].iFlag&ORIENT_PRESERVING)!=0 ? TTRUE : TFALSE;
1115 const tbool bDiff = bOrPre!=bOrPre2 ? TTRUE : TFALSE;
1116 assert(bAnswer || bDiff);
1117 (void)bAnswer, (void)bDiff; /* quiet warnings in non debug mode */
1118 }
1119
1120 // update offset
1121 iOffset += pTriInfos[f].AssignedGroup[i]->iNrFaces;
1122 // since the groups are disjoint a triangle can never
1123 // belong to more than 3 groups. Subsequently something
1124 // is completely screwed if this assertion ever hits.
1125 assert(iOffset <= iNrMaxGroups);
1126 }
1127 }
1128 }
1129
1130 return iNrActiveGroups;
1131}
1132
1133static void AddTriToGroup(SGroup * pGroup, const int iTriIndex)
1134{
1135 pGroup->pFaceIndices[pGroup->iNrFaces] = iTriIndex;
1136 ++pGroup->iNrFaces;
1137}
1138
1139static tbool AssignRecur(const int piTriListIn[], STriInfo psTriInfos[],
1140 const int iMyTriIndex, SGroup * pGroup)
1141{
1142 STriInfo * pMyTriInfo = &psTriInfos[iMyTriIndex];
1143
1144 // track down vertex
1145 const int iVertRep = pGroup->iVertexRepresentitive;
1146 const int * pVerts = &piTriListIn[3*iMyTriIndex+0];
1147 int i=-1;
1148 if (pVerts[0]==iVertRep) i=0;
1149 else if (pVerts[1]==iVertRep) i=1;
1150 else if (pVerts[2]==iVertRep) i=2;
1151 assert(i>=0 && i<3);
1152
1153 // early out
1154 if (pMyTriInfo->AssignedGroup[i] == pGroup) return TTRUE;
1155 else if (pMyTriInfo->AssignedGroup[i]!=NULL) return TFALSE;
1156 if ((pMyTriInfo->iFlag&GROUP_WITH_ANY)!=0)
1157 {
1158 // first to group with a group-with-anything triangle
1159 // determines it's orientation.
1160 // This is the only existing order dependency in the code!!
1161 if ( pMyTriInfo->AssignedGroup[0] == NULL &&
1162 pMyTriInfo->AssignedGroup[1] == NULL &&
1163 pMyTriInfo->AssignedGroup[2] == NULL )
1164 {
1165 pMyTriInfo->iFlag &= (~ORIENT_PRESERVING);
1166 pMyTriInfo->iFlag |= (pGroup->bOrientPreservering ? ORIENT_PRESERVING : 0);
1167 }
1168 }
1169 {
1170 const tbool bOrient = (pMyTriInfo->iFlag&ORIENT_PRESERVING)!=0 ? TTRUE : TFALSE;
1171 if (bOrient != pGroup->bOrientPreservering) return TFALSE;
1172 }
1173
1174 AddTriToGroup(pGroup, iMyTriIndex);
1175 pMyTriInfo->AssignedGroup[i] = pGroup;
1176
1177 {
1178 const int neigh_indexL = pMyTriInfo->FaceNeighbors[i];
1179 const int neigh_indexR = pMyTriInfo->FaceNeighbors[i>0?(i-1):2];
1180 if (neigh_indexL>=0)
1181 AssignRecur(piTriListIn, psTriInfos, neigh_indexL, pGroup);
1182 if (neigh_indexR>=0)
1183 AssignRecur(piTriListIn, psTriInfos, neigh_indexR, pGroup);
1184 }
1185
1186
1187
1188 return TTRUE;
1189}
1190
1191/////////////////////////////////////////////////////////////////////////////////////////////////////
1192/////////////////////////////////////////////////////////////////////////////////////////////////////
1193
1194static tbool CompareSubGroups(const SSubGroup * pg1, const SSubGroup * pg2);
1195static void QuickSort(int* pSortBuffer, int iLeft, int iRight, unsigned int uSeed);
1196static STSpace EvalTspace(int face_indices[], const int iFaces, const int piTriListIn[], const STriInfo pTriInfos[], const SMikkTSpaceContext * pContext, const int iVertexRepresentitive);
1197
1198static tbool GenerateTSpaces(STSpace psTspace[], const STriInfo pTriInfos[], const SGroup pGroups[],
1199 const int iNrActiveGroups, const int piTriListIn[], const float fThresCos,
1200 const SMikkTSpaceContext * pContext)
1201{
1202 STSpace * pSubGroupTspace = NULL;
1203 SSubGroup * pUniSubGroups = NULL;
1204 int * pTmpMembers = NULL;
1205 int iMaxNrFaces=0, iUniqueTspaces=0, g=0, i=0;
1206 for (g=0; g<iNrActiveGroups; g++)
1207 if (iMaxNrFaces < pGroups[g].iNrFaces)
1208 iMaxNrFaces = pGroups[g].iNrFaces;
1209
1210 if (iMaxNrFaces == 0) return TTRUE;
1211
1212 // make initial allocations
1213 pSubGroupTspace = (STSpace *) malloc(sizeof(STSpace)*iMaxNrFaces);
1214 pUniSubGroups = (SSubGroup *) malloc(sizeof(SSubGroup)*iMaxNrFaces);
1215 pTmpMembers = (int *) malloc(sizeof(int)*iMaxNrFaces);
1216 if (pSubGroupTspace==NULL || pUniSubGroups==NULL || pTmpMembers==NULL)
1217 {
1218 if (pSubGroupTspace!=NULL) free(pSubGroupTspace);
1219 if (pUniSubGroups!=NULL) free(pUniSubGroups);
1220 if (pTmpMembers!=NULL) free(pTmpMembers);
1221 return TFALSE;
1222 }
1223
1224
1225 iUniqueTspaces = 0;
1226 for (g=0; g<iNrActiveGroups; g++)
1227 {
1228 const SGroup * pGroup = &pGroups[g];
1229 int iUniqueSubGroups = 0, s=0;
1230
1231 for (i=0; i<pGroup->iNrFaces; i++) // triangles
1232 {
1233 const int f = pGroup->pFaceIndices[i]; // triangle number
1234 int index=-1, iVertIndex=-1, iOF_1=-1, iMembers=0, j=0, l=0;
1235 SSubGroup tmp_group;
1236 tbool bFound;
1237 SVec3 n, vOs, vOt;
1238 if (pTriInfos[f].AssignedGroup[0]==pGroup) index=0;
1239 else if (pTriInfos[f].AssignedGroup[1]==pGroup) index=1;
1240 else if (pTriInfos[f].AssignedGroup[2]==pGroup) index=2;
1241 assert(index>=0 && index<3);
1242
1243 iVertIndex = piTriListIn[f*3+index];
1244 assert(iVertIndex==pGroup->iVertexRepresentitive);
1245
1246 // is normalized already
1247 n = GetNormal(pContext, iVertIndex);
1248
1249 // project
1250 vOs = vsub(pTriInfos[f].vOs, vscale(vdot(n,pTriInfos[f].vOs), n));
1251 vOt = vsub(pTriInfos[f].vOt, vscale(vdot(n,pTriInfos[f].vOt), n));
1252 if ( VNotZero(vOs) ) vOs = Normalize(vOs);
1253 if ( VNotZero(vOt) ) vOt = Normalize(vOt);
1254
1255 // original face number
1256 iOF_1 = pTriInfos[f].iOrgFaceNumber;
1257
1258 iMembers = 0;
1259 for (j=0; j<pGroup->iNrFaces; j++)
1260 {
1261 const int t = pGroup->pFaceIndices[j]; // triangle number
1262 const int iOF_2 = pTriInfos[t].iOrgFaceNumber;
1263
1264 // project
1265 SVec3 vOs2 = vsub(pTriInfos[t].vOs, vscale(vdot(n,pTriInfos[t].vOs), n));
1266 SVec3 vOt2 = vsub(pTriInfos[t].vOt, vscale(vdot(n,pTriInfos[t].vOt), n));
1267 if ( VNotZero(vOs2) ) vOs2 = Normalize(vOs2);
1268 if ( VNotZero(vOt2) ) vOt2 = Normalize(vOt2);
1269
1270 {
1271 const tbool bAny = ( (pTriInfos[f].iFlag | pTriInfos[t].iFlag) & GROUP_WITH_ANY )!=0 ? TTRUE : TFALSE;
1272 // make sure triangles which belong to the same quad are joined.
1273 const tbool bSameOrgFace = iOF_1==iOF_2 ? TTRUE : TFALSE;
1274
1275 const float fCosS = vdot(vOs,vOs2);
1276 const float fCosT = vdot(vOt,vOt2);
1277
1278 assert(f!=t || bSameOrgFace); // sanity check
1279 if (bAny || bSameOrgFace || (fCosS>fThresCos && fCosT>fThresCos))
1280 pTmpMembers[iMembers++] = t;
1281 }
1282 }
1283
1284 // sort pTmpMembers
1285 tmp_group.iNrFaces = iMembers;
1286 tmp_group.pTriMembers = pTmpMembers;
1287 if (iMembers>1)
1288 {
1289 unsigned int uSeed = INTERNAL_RND_SORT_SEED; // could replace with a random seed?
1290 QuickSort(pTmpMembers, 0, iMembers-1, uSeed);
1291 }
1292
1293 // look for an existing match
1294 bFound = TFALSE;
1295 l=0;
1296 while (l<iUniqueSubGroups && !bFound)
1297 {
1298 bFound = CompareSubGroups(&tmp_group, &pUniSubGroups[l]);
1299 if (!bFound) ++l;
1300 }
1301
1302 // assign tangent space index
1303 assert(bFound || l==iUniqueSubGroups);
1304 //piTempTangIndices[f*3+index] = iUniqueTspaces+l;
1305
1306 // if no match was found we allocate a new subgroup
1307 if (!bFound)
1308 {
1309 // insert new subgroup
1310 int * pIndices = (int *) malloc(sizeof(int)*iMembers);
1311 if (pIndices==NULL)
1312 {
1313 // clean up and return false
1314 int s=0;
1315 for (s=0; s<iUniqueSubGroups; s++)
1316 free(pUniSubGroups[s].pTriMembers);
1317 free(pUniSubGroups);
1318 free(pTmpMembers);
1319 free(pSubGroupTspace);
1320 return TFALSE;
1321 }
1322 pUniSubGroups[iUniqueSubGroups].iNrFaces = iMembers;
1323 pUniSubGroups[iUniqueSubGroups].pTriMembers = pIndices;
1324 memcpy(pIndices, tmp_group.pTriMembers, iMembers*sizeof(int));
1325 pSubGroupTspace[iUniqueSubGroups] =
1326 EvalTspace(tmp_group.pTriMembers, iMembers, piTriListIn, pTriInfos, pContext, pGroup->iVertexRepresentitive);
1327 ++iUniqueSubGroups;
1328 }
1329
1330 // output tspace
1331 {
1332 const int iOffs = pTriInfos[f].iTSpacesOffs;
1333 const int iVert = pTriInfos[f].vert_num[index];
1334 STSpace * pTS_out = &psTspace[iOffs+iVert];
1335 assert(pTS_out->iCounter<2);
1336 assert(((pTriInfos[f].iFlag&ORIENT_PRESERVING)!=0) == pGroup->bOrientPreservering);
1337 if (pTS_out->iCounter==1)
1338 {
1339 *pTS_out = AvgTSpace(pTS_out, &pSubGroupTspace[l]);
1340 pTS_out->iCounter = 2; // update counter
1341 pTS_out->bOrient = pGroup->bOrientPreservering;
1342 }
1343 else
1344 {
1345 assert(pTS_out->iCounter==0);
1346 *pTS_out = pSubGroupTspace[l];
1347 pTS_out->iCounter = 1; // update counter
1348 pTS_out->bOrient = pGroup->bOrientPreservering;
1349 }
1350 }
1351 }
1352
1353 // clean up and offset iUniqueTspaces
1354 for (s=0; s<iUniqueSubGroups; s++)
1355 free(pUniSubGroups[s].pTriMembers);
1356 iUniqueTspaces += iUniqueSubGroups;
1357 }
1358
1359 // clean up
1360 free(pUniSubGroups);
1361 free(pTmpMembers);
1362 free(pSubGroupTspace);
1363
1364 return TTRUE;
1365}
1366
1367static STSpace EvalTspace(int face_indices[], const int iFaces, const int piTriListIn[], const STriInfo pTriInfos[],
1368 const SMikkTSpaceContext * pContext, const int iVertexRepresentitive)
1369{
1370 STSpace res;
1371 float fAngleSum = 0;
1372 int face=0;
1373 res.vOs.x=0.0f; res.vOs.y=0.0f; res.vOs.z=0.0f;
1374 res.vOt.x=0.0f; res.vOt.y=0.0f; res.vOt.z=0.0f;
1375 res.fMagS = 0; res.fMagT = 0;
1376
1377 for (face=0; face<iFaces; face++)
1378 {
1379 const int f = face_indices[face];
1380
1381 // only valid triangles get to add their contribution
1382 if ( (pTriInfos[f].iFlag&GROUP_WITH_ANY)==0 )
1383 {
1384 SVec3 n, vOs, vOt, p0, p1, p2, v1, v2;
1385 float fCos, fAngle, fMagS, fMagT;
1386 int i=-1, index=-1, i0=-1, i1=-1, i2=-1;
1387 if (piTriListIn[3*f+0]==iVertexRepresentitive) i=0;
1388 else if (piTriListIn[3*f+1]==iVertexRepresentitive) i=1;
1389 else if (piTriListIn[3*f+2]==iVertexRepresentitive) i=2;
1390 assert(i>=0 && i<3);
1391
1392 // project
1393 index = piTriListIn[3*f+i];
1394 n = GetNormal(pContext, index);
1395 vOs = vsub(pTriInfos[f].vOs, vscale(vdot(n,pTriInfos[f].vOs), n));
1396 vOt = vsub(pTriInfos[f].vOt, vscale(vdot(n,pTriInfos[f].vOt), n));
1397 if ( VNotZero(vOs) ) vOs = Normalize(vOs);
1398 if ( VNotZero(vOt) ) vOt = Normalize(vOt);
1399
1400 i2 = piTriListIn[3*f + (i<2?(i+1):0)];
1401 i1 = piTriListIn[3*f + i];
1402 i0 = piTriListIn[3*f + (i>0?(i-1):2)];
1403
1404 p0 = GetPosition(pContext, i0);
1405 p1 = GetPosition(pContext, i1);
1406 p2 = GetPosition(pContext, i2);
1407 v1 = vsub(p0,p1);
1408 v2 = vsub(p2,p1);
1409
1410 // project
1411 v1 = vsub(v1, vscale(vdot(n,v1),n)); if ( VNotZero(v1) ) v1 = Normalize(v1);
1412 v2 = vsub(v2, vscale(vdot(n,v2),n)); if ( VNotZero(v2) ) v2 = Normalize(v2);
1413
1414 // weight contribution by the angle
1415 // between the two edge vectors
1416 fCos = vdot(v1,v2); fCos=fCos>1?1:(fCos<(-1) ? (-1) : fCos);
1417 fAngle = (float) acos(fCos);
1418 fMagS = pTriInfos[f].fMagS;
1419 fMagT = pTriInfos[f].fMagT;
1420
1421 res.vOs=vadd(res.vOs, vscale(fAngle,vOs));
1422 res.vOt=vadd(res.vOt,vscale(fAngle,vOt));
1423 res.fMagS+=(fAngle*fMagS);
1424 res.fMagT+=(fAngle*fMagT);
1425 fAngleSum += fAngle;
1426 }
1427 }
1428
1429 // normalize
1430 if ( VNotZero(res.vOs) ) res.vOs = Normalize(res.vOs);
1431 if ( VNotZero(res.vOt) ) res.vOt = Normalize(res.vOt);
1432 if (fAngleSum>0)
1433 {
1434 res.fMagS /= fAngleSum;
1435 res.fMagT /= fAngleSum;
1436 }
1437
1438 return res;
1439}
1440
1441static tbool CompareSubGroups(const SSubGroup * pg1, const SSubGroup * pg2)
1442{
1443 tbool bStillSame=TTRUE;
1444 int i=0;
1445 if (pg1->iNrFaces!=pg2->iNrFaces) return TFALSE;
1446 while (i<pg1->iNrFaces && bStillSame)
1447 {
1448 bStillSame = pg1->pTriMembers[i]==pg2->pTriMembers[i] ? TTRUE : TFALSE;
1449 if (bStillSame) ++i;
1450 }
1451 return bStillSame;
1452}
1453
1454static void QuickSort(int* pSortBuffer, int iLeft, int iRight, unsigned int uSeed)
1455{
1456 int iL, iR, n, index, iMid, iTmp;
1457
1458 // Random
1459 unsigned int t=uSeed&31;
1460 t=(uSeed<<t)|(uSeed>>(32-t));
1461 uSeed=uSeed+t+3;
1462 // Random end
1463
1464 iL=iLeft; iR=iRight;
1465 n = (iR-iL)+1;
1466 assert(n>=0);
1467 index = (int) (uSeed%n);
1468
1469 iMid=pSortBuffer[index + iL];
1470
1471
1472 do
1473 {
1474 while (pSortBuffer[iL] < iMid)
1475 ++iL;
1476 while (pSortBuffer[iR] > iMid)
1477 --iR;
1478
1479 if (iL <= iR)
1480 {
1481 iTmp = pSortBuffer[iL];
1482 pSortBuffer[iL] = pSortBuffer[iR];
1483 pSortBuffer[iR] = iTmp;
1484 ++iL; --iR;
1485 }
1486 }
1487 while (iL <= iR);
1488
1489 if (iLeft < iR)
1490 QuickSort(pSortBuffer, iLeft, iR, uSeed);
1491 if (iL < iRight)
1492 QuickSort(pSortBuffer, iL, iRight, uSeed);
1493}
1494
1495/////////////////////////////////////////////////////////////////////////////////////////////
1496/////////////////////////////////////////////////////////////////////////////////////////////
1497
1498static void QuickSortEdges(SEdge * pSortBuffer, int iLeft, int iRight, const int channel, unsigned int uSeed);
1499static void GetEdge(int * i0_out, int * i1_out, int * edgenum_out, const int indices[], const int i0_in, const int i1_in);
1500
1501static void BuildNeighborsFast(STriInfo pTriInfos[], SEdge * pEdges, const int piTriListIn[], const int iNrTrianglesIn)
1502{
1503 // build array of edges
1504 unsigned int uSeed = INTERNAL_RND_SORT_SEED; // could replace with a random seed?
1505 int iEntries=0, iCurStartIndex=-1, f=0, i=0;
1506 for (f=0; f<iNrTrianglesIn; f++)
1507 for (i=0; i<3; i++)
1508 {
1509 const int i0 = piTriListIn[f*3+i];
1510 const int i1 = piTriListIn[f*3+(i<2?(i+1):0)];
1511 pEdges[f*3+i].i0 = i0 < i1 ? i0 : i1; // put minimum index in i0
1512 pEdges[f*3+i].i1 = !(i0 < i1) ? i0 : i1; // put maximum index in i1
1513 pEdges[f*3+i].f = f; // record face number
1514 }
1515
1516 // sort over all edges by i0, this is the pricy one.
1517 QuickSortEdges(pEdges, 0, iNrTrianglesIn*3-1, 0, uSeed); // sort channel 0 which is i0
1518
1519 // sub sort over i1, should be fast.
1520 // could replace this with a 64 bit int sort over (i0,i1)
1521 // with i0 as msb in the quicksort call above.
1522 iEntries = iNrTrianglesIn*3;
1523 iCurStartIndex = 0;
1524 for (i=1; i<iEntries; i++)
1525 {
1526 if (pEdges[iCurStartIndex].i0 != pEdges[i].i0)
1527 {
1528 const int iL = iCurStartIndex;
1529 const int iR = i-1;
1530 //const int iElems = i-iL;
1531 iCurStartIndex = i;
1532 QuickSortEdges(pEdges, iL, iR, 1, uSeed); // sort channel 1 which is i1
1533 }
1534 }
1535
1536 // sub sort over f, which should be fast.
1537 // this step is to remain compliant with BuildNeighborsSlow() when
1538 // more than 2 triangles use the same edge (such as a butterfly topology).
1539 iCurStartIndex = 0;
1540 for (i=1; i<iEntries; i++)
1541 {
1542 if (pEdges[iCurStartIndex].i0 != pEdges[i].i0 || pEdges[iCurStartIndex].i1 != pEdges[i].i1)
1543 {
1544 const int iL = iCurStartIndex;
1545 const int iR = i-1;
1546 //const int iElems = i-iL;
1547 iCurStartIndex = i;
1548 QuickSortEdges(pEdges, iL, iR, 2, uSeed); // sort channel 2 which is f
1549 }
1550 }
1551
1552 // pair up, adjacent triangles
1553 for (i=0; i<iEntries; i++)
1554 {
1555 const int i0=pEdges[i].i0;
1556 const int i1=pEdges[i].i1;
1557 const int f = pEdges[i].f;
1558 tbool bUnassigned_A;
1559
1560 int i0_A, i1_A;
1561 int edgenum_A, edgenum_B=0; // 0,1 or 2
1562 GetEdge(&i0_A, &i1_A, &edgenum_A, &piTriListIn[f*3], i0, i1); // resolve index ordering and edge_num
1563 bUnassigned_A = pTriInfos[f].FaceNeighbors[edgenum_A] == -1 ? TTRUE : TFALSE;
1564
1565 if (bUnassigned_A)
1566 {
1567 // get true index ordering
1568 int j=i+1, t;
1569 tbool bNotFound = TTRUE;
1570 while (j<iEntries && i0==pEdges[j].i0 && i1==pEdges[j].i1 && bNotFound)
1571 {
1572 tbool bUnassigned_B;
1573 int i0_B, i1_B;
1574 t = pEdges[j].f;
1575 // flip i0_B and i1_B
1576 GetEdge(&i1_B, &i0_B, &edgenum_B, &piTriListIn[t*3], pEdges[j].i0, pEdges[j].i1); // resolve index ordering and edge_num
1577 //assert(!(i0_A==i1_B && i1_A==i0_B));
1578 bUnassigned_B = pTriInfos[t].FaceNeighbors[edgenum_B]==-1 ? TTRUE : TFALSE;
1579 if (i0_A==i0_B && i1_A==i1_B && bUnassigned_B)
1580 bNotFound = TFALSE;
1581 else
1582 ++j;
1583 }
1584
1585 if (!bNotFound)
1586 {
1587 int t = pEdges[j].f;
1588 pTriInfos[f].FaceNeighbors[edgenum_A] = t;
1589 //assert(pTriInfos[t].FaceNeighbors[edgenum_B]==-1);
1590 pTriInfos[t].FaceNeighbors[edgenum_B] = f;
1591 }
1592 }
1593 }
1594}
1595
1596static void BuildNeighborsSlow(STriInfo pTriInfos[], const int piTriListIn[], const int iNrTrianglesIn)
1597{
1598 int f=0, i=0;
1599 for (f=0; f<iNrTrianglesIn; f++)
1600 {
1601 for (i=0; i<3; i++)
1602 {
1603 // if unassigned
1604 if (pTriInfos[f].FaceNeighbors[i] == -1)
1605 {
1606 const int i0_A = piTriListIn[f*3+i];
1607 const int i1_A = piTriListIn[f*3+(i<2?(i+1):0)];
1608
1609 // search for a neighbor
1610 tbool bFound = TFALSE;
1611 int t=0, j=0;
1612 while (!bFound && t<iNrTrianglesIn)
1613 {
1614 if (t!=f)
1615 {
1616 j=0;
1617 while (!bFound && j<3)
1618 {
1619 // in rev order
1620 const int i1_B = piTriListIn[t*3+j];
1621 const int i0_B = piTriListIn[t*3+(j<2?(j+1):0)];
1622 //assert(!(i0_A==i1_B && i1_A==i0_B));
1623 if (i0_A==i0_B && i1_A==i1_B)
1624 bFound = TTRUE;
1625 else
1626 ++j;
1627 }
1628 }
1629
1630 if (!bFound) ++t;
1631 }
1632
1633 // assign neighbors
1634 if (bFound)
1635 {
1636 pTriInfos[f].FaceNeighbors[i] = t;
1637 //assert(pTriInfos[t].FaceNeighbors[j]==-1);
1638 pTriInfos[t].FaceNeighbors[j] = f;
1639 }
1640 }
1641 }
1642 }
1643}
1644
1645static void QuickSortEdges(SEdge * pSortBuffer, int iLeft, int iRight, const int channel, unsigned int uSeed)
1646{
1647 unsigned int t;
1648 int iL, iR, n, index, iMid;
1649
1650 // early out
1651 SEdge sTmp;
1652 const int iElems = iRight-iLeft+1;
1653 if (iElems<2) return;
1654 else if (iElems==2)
1655 {
1656 if (pSortBuffer[iLeft].array[channel] > pSortBuffer[iRight].array[channel])
1657 {
1658 sTmp = pSortBuffer[iLeft];
1659 pSortBuffer[iLeft] = pSortBuffer[iRight];
1660 pSortBuffer[iRight] = sTmp;
1661 }
1662 return;
1663 }
1664
1665 // Random
1666 t=uSeed&31;
1667 t=(uSeed<<t)|(uSeed>>(32-t));
1668 uSeed=uSeed+t+3;
1669 // Random end
1670
1671 iL = iLeft;
1672 iR = iRight;
1673 n = (iR-iL)+1;
1674 assert(n>=0);
1675 index = (int) (uSeed%n);
1676
1677 iMid=pSortBuffer[index + iL].array[channel];
1678
1679 do
1680 {
1681 while (pSortBuffer[iL].array[channel] < iMid)
1682 ++iL;
1683 while (pSortBuffer[iR].array[channel] > iMid)
1684 --iR;
1685
1686 if (iL <= iR)
1687 {
1688 sTmp = pSortBuffer[iL];
1689 pSortBuffer[iL] = pSortBuffer[iR];
1690 pSortBuffer[iR] = sTmp;
1691 ++iL; --iR;
1692 }
1693 }
1694 while (iL <= iR);
1695
1696 if (iLeft < iR)
1697 QuickSortEdges(pSortBuffer, iLeft, iR, channel, uSeed);
1698 if (iL < iRight)
1699 QuickSortEdges(pSortBuffer, iL, iRight, channel, uSeed);
1700}
1701
1702// resolve ordering and edge number
1703static void GetEdge(int * i0_out, int * i1_out, int * edgenum_out, const int indices[], const int i0_in, const int i1_in)
1704{
1705 *edgenum_out = -1;
1706
1707 // test if first index is on the edge
1708 if (indices[0]==i0_in || indices[0]==i1_in)
1709 {
1710 // test if second index is on the edge
1711 if (indices[1]==i0_in || indices[1]==i1_in)
1712 {
1713 edgenum_out[0]=0; // first edge
1714 i0_out[0]=indices[0];
1715 i1_out[0]=indices[1];
1716 }
1717 else
1718 {
1719 edgenum_out[0]=2; // third edge
1720 i0_out[0]=indices[2];
1721 i1_out[0]=indices[0];
1722 }
1723 }
1724 else
1725 {
1726 // only second and third index is on the edge
1727 edgenum_out[0]=1; // second edge
1728 i0_out[0]=indices[1];
1729 i1_out[0]=indices[2];
1730 }
1731}
1732
1733
1734/////////////////////////////////////////////////////////////////////////////////////////////
1735/////////////////////////////////// Degenerate triangles ////////////////////////////////////
1736
1737static void DegenPrologue(STriInfo pTriInfos[], int piTriList_out[], const int iNrTrianglesIn, const int iTotTris)
1738{
1739 int iNextGoodTriangleSearchIndex=-1;
1740 tbool bStillFindingGoodOnes;
1741
1742 // locate quads with only one good triangle
1743 int t=0;
1744 while (t<(iTotTris-1))
1745 {
1746 const int iFO_a = pTriInfos[t].iOrgFaceNumber;
1747 const int iFO_b = pTriInfos[t+1].iOrgFaceNumber;
1748 if (iFO_a==iFO_b) // this is a quad
1749 {
1750 const tbool bIsDeg_a = (pTriInfos[t].iFlag&MARK_DEGENERATE)!=0 ? TTRUE : TFALSE;
1751 const tbool bIsDeg_b = (pTriInfos[t+1].iFlag&MARK_DEGENERATE)!=0 ? TTRUE : TFALSE;
1752 if ((bIsDeg_a^bIsDeg_b)!=0)
1753 {
1754 pTriInfos[t].iFlag |= QUAD_ONE_DEGEN_TRI;
1755 pTriInfos[t+1].iFlag |= QUAD_ONE_DEGEN_TRI;
1756 }
1757 t += 2;
1758 }
1759 else
1760 ++t;
1761 }
1762
1763 // reorder list so all degen triangles are moved to the back
1764 // without reordering the good triangles
1765 iNextGoodTriangleSearchIndex = 1;
1766 t=0;
1767 bStillFindingGoodOnes = TTRUE;
1768 while (t<iNrTrianglesIn && bStillFindingGoodOnes)
1769 {
1770 const tbool bIsGood = (pTriInfos[t].iFlag&MARK_DEGENERATE)==0 ? TTRUE : TFALSE;
1771 if (bIsGood)
1772 {
1773 if (iNextGoodTriangleSearchIndex < (t+2))
1774 iNextGoodTriangleSearchIndex = t+2;
1775 }
1776 else
1777 {
1778 int t0, t1;
1779 // search for the first good triangle.
1780 tbool bJustADegenerate = TTRUE;
1781 while (bJustADegenerate && iNextGoodTriangleSearchIndex<iTotTris)
1782 {
1783 const tbool bIsGood = (pTriInfos[iNextGoodTriangleSearchIndex].iFlag&MARK_DEGENERATE)==0 ? TTRUE : TFALSE;
1784 if (bIsGood) bJustADegenerate=TFALSE;
1785 else ++iNextGoodTriangleSearchIndex;
1786 }
1787
1788 t0 = t;
1789 t1 = iNextGoodTriangleSearchIndex;
1790 ++iNextGoodTriangleSearchIndex;
1791 assert(iNextGoodTriangleSearchIndex > (t+1));
1792
1793 // swap triangle t0 and t1
1794 if (!bJustADegenerate)
1795 {
1796 int i=0;
1797 for (i=0; i<3; i++)
1798 {
1799 const int index = piTriList_out[t0*3+i];
1800 piTriList_out[t0*3+i] = piTriList_out[t1*3+i];
1801 piTriList_out[t1*3+i] = index;
1802 }
1803 {
1804 const STriInfo tri_info = pTriInfos[t0];
1805 pTriInfos[t0] = pTriInfos[t1];
1806 pTriInfos[t1] = tri_info;
1807 }
1808 }
1809 else
1810 bStillFindingGoodOnes = TFALSE; // this is not supposed to happen
1811 }
1812
1813 if (bStillFindingGoodOnes) ++t;
1814 }
1815
1816 assert(bStillFindingGoodOnes); // code will still work.
1817 assert(iNrTrianglesIn == t);
1818}
1819
1820static void DegenEpilogue(STSpace psTspace[], STriInfo pTriInfos[], int piTriListIn[], const SMikkTSpaceContext * pContext, const int iNrTrianglesIn, const int iTotTris)
1821{
1822 int t=0, i=0;
1823 // deal with degenerate triangles
1824 // punishment for degenerate triangles is O(N^2)
1825 for (t=iNrTrianglesIn; t<iTotTris; t++)
1826 {
1827 // degenerate triangles on a quad with one good triangle are skipped
1828 // here but processed in the next loop
1829 const tbool bSkip = (pTriInfos[t].iFlag&QUAD_ONE_DEGEN_TRI)!=0 ? TTRUE : TFALSE;
1830
1831 if (!bSkip)
1832 {
1833 for (i=0; i<3; i++)
1834 {
1835 const int index1 = piTriListIn[t*3+i];
1836 // search through the good triangles
1837 tbool bNotFound = TTRUE;
1838 int j=0;
1839 while (bNotFound && j<(3*iNrTrianglesIn))
1840 {
1841 const int index2 = piTriListIn[j];
1842 if (index1==index2) bNotFound=TFALSE;
1843 else ++j;
1844 }
1845
1846 if (!bNotFound)
1847 {
1848 const int iTri = j/3;
1849 const int iVert = j%3;
1850 const int iSrcVert=pTriInfos[iTri].vert_num[iVert];
1851 const int iSrcOffs=pTriInfos[iTri].iTSpacesOffs;
1852 const int iDstVert=pTriInfos[t].vert_num[i];
1853 const int iDstOffs=pTriInfos[t].iTSpacesOffs;
1854
1855 // copy tspace
1856 psTspace[iDstOffs+iDstVert] = psTspace[iSrcOffs+iSrcVert];
1857 }
1858 }
1859 }
1860 }
1861
1862 // deal with degenerate quads with one good triangle
1863 for (t=0; t<iNrTrianglesIn; t++)
1864 {
1865 // this triangle belongs to a quad where the
1866 // other triangle is degenerate
1867 if ( (pTriInfos[t].iFlag&QUAD_ONE_DEGEN_TRI)!=0 )
1868 {
1869 SVec3 vDstP;
1870 int iOrgF=-1, i=0;
1871 tbool bNotFound;
1872 unsigned char * pV = pTriInfos[t].vert_num;
1873 int iFlag = (1<<pV[0]) | (1<<pV[1]) | (1<<pV[2]);
1874 int iMissingIndex = 0;
1875 if ((iFlag&2)==0) iMissingIndex=1;
1876 else if ((iFlag&4)==0) iMissingIndex=2;
1877 else if ((iFlag&8)==0) iMissingIndex=3;
1878
1879 iOrgF = pTriInfos[t].iOrgFaceNumber;
1880 vDstP = GetPosition(pContext, MakeIndex(iOrgF, iMissingIndex));
1881 bNotFound = TTRUE;
1882 i=0;
1883 while (bNotFound && i<3)
1884 {
1885 const int iVert = pV[i];
1886 const SVec3 vSrcP = GetPosition(pContext, MakeIndex(iOrgF, iVert));
1887 if (veq(vSrcP, vDstP)==TTRUE)
1888 {
1889 const int iOffs = pTriInfos[t].iTSpacesOffs;
1890 psTspace[iOffs+iMissingIndex] = psTspace[iOffs+iVert];
1891 bNotFound=TFALSE;
1892 }
1893 else
1894 ++i;
1895 }
1896 assert(!bNotFound);
1897 }
1898 }
1899}
diff --git a/contrib/cgltf-tangents/MikkTSpace/mikktspace.h b/contrib/cgltf-tangents/MikkTSpace/mikktspace.h
new file mode 100644
index 0000000..52c44a7
--- /dev/null
+++ b/contrib/cgltf-tangents/MikkTSpace/mikktspace.h
@@ -0,0 +1,145 @@
1/** \file mikktspace/mikktspace.h
2 * \ingroup mikktspace
3 */
4/**
5 * Copyright (C) 2011 by Morten S. Mikkelsen
6 *
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
10 *
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, subject to the following restrictions:
14 *
15 * 1. The origin of this software must not be misrepresented; you must not
16 * claim that you wrote the original software. If you use this software
17 * in a product, an acknowledgment in the product documentation would be
18 * appreciated but is not required.
19 * 2. Altered source versions must be plainly marked as such, and must not be
20 * misrepresented as being the original software.
21 * 3. This notice may not be removed or altered from any source distribution.
22 */
23
24#ifndef __MIKKTSPACE_H__
25#define __MIKKTSPACE_H__
26
27
28#ifdef __cplusplus
29extern "C" {
30#endif
31
32/* Author: Morten S. Mikkelsen
33 * Version: 1.0
34 *
35 * The files mikktspace.h and mikktspace.c are designed to be
36 * stand-alone files and it is important that they are kept this way.
37 * Not having dependencies on structures/classes/libraries specific
38 * to the program, in which they are used, allows them to be copied
39 * and used as is into any tool, program or plugin.
40 * The code is designed to consistently generate the same
41 * tangent spaces, for a given mesh, in any tool in which it is used.
42 * This is done by performing an internal welding step and subsequently an order-independent evaluation
43 * of tangent space for meshes consisting of triangles and quads.
44 * This means faces can be received in any order and the same is true for
45 * the order of vertices of each face. The generated result will not be affected
46 * by such reordering. Additionally, whether degenerate (vertices or texture coordinates)
47 * primitives are present or not will not affect the generated results either.
48 * Once tangent space calculation is done the vertices of degenerate primitives will simply
49 * inherit tangent space from neighboring non degenerate primitives.
50 * The analysis behind this implementation can be found in my master's thesis
51 * which is available for download --> http://image.diku.dk/projects/media/morten.mikkelsen.08.pdf
52 * Note that though the tangent spaces at the vertices are generated in an order-independent way,
53 * by this implementation, the interpolated tangent space is still affected by which diagonal is
54 * chosen to split each quad. A sensible solution is to have your tools pipeline always
55 * split quads by the shortest diagonal. This choice is order-independent and works with mirroring.
56 * If these have the same length then compare the diagonals defined by the texture coordinates.
57 * XNormal which is a tool for baking normal maps allows you to write your own tangent space plugin
58 * and also quad triangulator plugin.
59 */
60
61
62typedef int tbool;
63typedef struct SMikkTSpaceContext SMikkTSpaceContext;
64
65typedef struct {
66 // Returns the number of faces (triangles/quads) on the mesh to be processed.
67 int (*m_getNumFaces)(const SMikkTSpaceContext * pContext);
68
69 // Returns the number of vertices on face number iFace
70 // iFace is a number in the range {0, 1, ..., getNumFaces()-1}
71 int (*m_getNumVerticesOfFace)(const SMikkTSpaceContext * pContext, const int iFace);
72
73 // returns the position/normal/texcoord of the referenced face of vertex number iVert.
74 // iVert is in the range {0,1,2} for triangles and {0,1,2,3} for quads.
75 void (*m_getPosition)(const SMikkTSpaceContext * pContext, float fvPosOut[], const int iFace, const int iVert);
76 void (*m_getNormal)(const SMikkTSpaceContext * pContext, float fvNormOut[], const int iFace, const int iVert);
77 void (*m_getTexCoord)(const SMikkTSpaceContext * pContext, float fvTexcOut[], const int iFace, const int iVert);
78
79 // either (or both) of the two setTSpace callbacks can be set.
80 // The call-back m_setTSpaceBasic() is sufficient for basic normal mapping.
81
82 // This function is used to return the tangent and fSign to the application.
83 // fvTangent is a unit length vector.
84 // For normal maps it is sufficient to use the following simplified version of the bitangent which is generated at pixel/vertex level.
85 // bitangent = fSign * cross(vN, tangent);
86 // Note that the results are returned unindexed. It is possible to generate a new index list
87 // But averaging/overwriting tangent spaces by using an already existing index list WILL produce INCRORRECT results.
88 // DO NOT! use an already existing index list.
89 void (*m_setTSpaceBasic)(const SMikkTSpaceContext * pContext, const float fvTangent[], const float fSign, const int iFace, const int iVert);
90
91 // This function is used to return tangent space results to the application.
92 // fvTangent and fvBiTangent are unit length vectors and fMagS and fMagT are their
93 // true magnitudes which can be used for relief mapping effects.
94 // fvBiTangent is the "real" bitangent and thus may not be perpendicular to fvTangent.
95 // However, both are perpendicular to the vertex normal.
96 // For normal maps it is sufficient to use the following simplified version of the bitangent which is generated at pixel/vertex level.
97 // fSign = bIsOrientationPreserving ? 1.0f : (-1.0f);
98 // bitangent = fSign * cross(vN, tangent);
99 // Note that the results are returned unindexed. It is possible to generate a new index list
100 // But averaging/overwriting tangent spaces by using an already existing index list WILL produce INCRORRECT results.
101 // DO NOT! use an already existing index list.
102 void (*m_setTSpace)(const SMikkTSpaceContext * pContext, const float fvTangent[], const float fvBiTangent[], const float fMagS, const float fMagT,
103 const tbool bIsOrientationPreserving, const int iFace, const int iVert);
104} SMikkTSpaceInterface;
105
106struct SMikkTSpaceContext
107{
108 SMikkTSpaceInterface * m_pInterface; // initialized with callback functions
109 void * m_pUserData; // pointer to client side mesh data etc. (passed as the first parameter with every interface call)
110};
111
112// these are both thread safe!
113tbool genTangSpaceDefault(const SMikkTSpaceContext * pContext); // Default (recommended) fAngularThreshold is 180 degrees (which means threshold disabled)
114tbool genTangSpace(const SMikkTSpaceContext * pContext, const float fAngularThreshold);
115
116
117// To avoid visual errors (distortions/unwanted hard edges in lighting), when using sampled normal maps, the
118// normal map sampler must use the exact inverse of the pixel shader transformation.
119// The most efficient transformation we can possibly do in the pixel shader is
120// achieved by using, directly, the "unnormalized" interpolated tangent, bitangent and vertex normal: vT, vB and vN.
121// pixel shader (fast transform out)
122// vNout = normalize( vNt.x * vT + vNt.y * vB + vNt.z * vN );
123// where vNt is the tangent space normal. The normal map sampler must likewise use the
124// interpolated and "unnormalized" tangent, bitangent and vertex normal to be compliant with the pixel shader.
125// sampler does (exact inverse of pixel shader):
126// float3 row0 = cross(vB, vN);
127// float3 row1 = cross(vN, vT);
128// float3 row2 = cross(vT, vB);
129// float fSign = dot(vT, row0)<0 ? -1 : 1;
130// vNt = normalize( fSign * float3(dot(vNout,row0), dot(vNout,row1), dot(vNout,row2)) );
131// where vNout is the sampled normal in some chosen 3D space.
132//
133// Should you choose to reconstruct the bitangent in the pixel shader instead
134// of the vertex shader, as explained earlier, then be sure to do this in the normal map sampler also.
135// Finally, beware of quad triangulations. If the normal map sampler doesn't use the same triangulation of
136// quads as your renderer then problems will occur since the interpolated tangent spaces will differ
137// eventhough the vertex level tangent spaces match. This can be solved either by triangulating before
138// sampling/exporting or by using the order-independent choice of diagonal for splitting quads suggested earlier.
139// However, this must be used both by the sampler and your tools/rendering pipeline.
140
141#ifdef __cplusplus
142}
143#endif
144
145#endif
diff --git a/contrib/cgltf-tangents/README.md b/contrib/cgltf-tangents/README.md
new file mode 100644
index 0000000..2a68b27
--- /dev/null
+++ b/contrib/cgltf-tangents/README.md
@@ -0,0 +1,42 @@
1# cgltf-tangents
2
3A library to compute missing tangent vectors in glTF models using MikkTSpace.
4
5## Example
6
7```
8// Load the glTF scene and buffers as usual.
9cgltf_result result = cgltf_parse_file(&options, filepath, &data);
10cgltf_load_buffers(&options, data, filepath);
11
12// Compute missing tangents.
13cgltfTangentBuffer* tangent_buffers = 0;
14cgltf_size num_tangent_buffers = 0;
15cgltf_compute_tangents(&options, data, &tangent_buffers, &num_tangent_buffers);
16```
17
18## About
19
20This is a single-header/source library that combines
21[MikkTSpace](https://github.com/mmikk/MikkTSpace) and
22[cgltf](https://github.com/jkuhlmann/cgltf) to compute missing tangent vectors
23for models.
24
25Mesh primitives in glTF may have a normal map but not necessarily tangent
26vectors. An example is the
27[DamagedHelmet](https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0/DamagedHelmet/glTF)
28sample. From the
29[spec](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#meshes):
30
31*"When tangents are not specified, client implementations SHOULD calculate
32tangents using default MikkTSpace algorithms with the specified vertex
33positions, normals, and texture coordinates associated with the normal texture."*
34
35cgltf-tangents takes an input glTF scene and scans it for mesh primitives that
36have a normal map but no tangents. cgltf-tangents then invokes MikkTSpace to
37compute tangents for those mesh primitives and outputs an array of tangent
38buffers. The client can then upload these buffers to GPU memory for rendering.
39
40See `test/` for a complete example.
41
42MikkTSpace is packaged here for convenience. cgltf must be obtained separately.
diff --git a/contrib/cgltf-tangents/cgltf_tangents.c b/contrib/cgltf-tangents/cgltf_tangents.c
new file mode 100644
index 0000000..80b1e56
--- /dev/null
+++ b/contrib/cgltf-tangents/cgltf_tangents.c
@@ -0,0 +1,618 @@
1/*
2Copyright 2022 Marc Sunet
3
4Redistribution and use in source and binary forms, with or without modification,
5are permitted provided that the following conditions are met:
6
71. Redistributions of source code must retain the above copyright notice, this
8list of conditions and the following disclaimer.
9
102. Redistributions in binary form must reproduce the above copyright notice,
11this list of conditions and the following disclaimer in the documentation and/or
12other materials provided with the distribution.
13
14THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
15ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
18ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24*/
25#include "cgltf_tangents.h"
26#include "cgltf.h"
27
28#include "MikkTSpace/mikktspace.h"
29
30#include <assert.h>
31#include <stdbool.h>
32#include <stdint.h>
33#include <stdlib.h>
34#include <string.h>
35
36#ifdef CGLTF_TANGENTS_DEBUG
37#include <stdio.h>
38#define DLOG printf
39#else
40#define DLOG(...)
41#endif
42
43#include <stdio.h> // TODO: Remove me.
44
45#define CGLTF_OPTIONS_MALLOC(size) \
46 options->memory.alloc(options->memory.user_data, size)
47
48#define CGLTF_OPTIONS_FREE(ptr) \
49 options->memory.free(options->memory.user_data, ptr)
50
51static void* cgltf_default_alloc(void* user, cgltf_size size) {
52 (void)user;
53 return malloc(size);
54}
55
56static void cgltf_default_free(void* user, void* ptr) {
57 (void)user;
58 free(ptr);
59}
60
61static const cgltf_size NUM_TANGENT_COMPONENTS = 4; // X,Y,Z,fSign
62
63static float normalize_i8(int8_t x) { return (float)x / 128.0; }
64static float normalize_u8(uint8_t x) { return (float)x / 255.0; }
65static float normalize_i16(int16_t x) { return (float)x / 32768.0; }
66static float normalize_u16(uint16_t x) { return (float)x / 65535.0; }
67static float normalize_u32(uint32_t x) { return (float)x / 4294967295.0; }
68
69static cgltf_size num_vertex_attrib_components(cgltf_type type) {
70 switch (type) {
71 case cgltf_type_scalar:
72 return 1;
73 case cgltf_type_vec2:
74 return 2;
75 case cgltf_type_vec3:
76 return 3;
77 case cgltf_type_vec4:
78 return 4;
79 default:
80 assert(false);
81 return 0;
82 }
83}
84
85static cgltf_size cgltf_component_type_size_bytes(cgltf_component_type type) {
86 switch (type) {
87 case cgltf_component_type_r_8:
88 return 1;
89 case cgltf_component_type_r_8u:
90 return 1;
91 case cgltf_component_type_r_16:
92 return 2;
93 case cgltf_component_type_r_16u:
94 return 2;
95 case cgltf_component_type_r_32u:
96 return 4;
97 case cgltf_component_type_r_32f:
98 return 4;
99 default:
100 assert(false);
101 return 0;
102 }
103}
104
105static cgltf_size default_stride(cgltf_type type,
106 cgltf_component_type component_type) {
107 return num_vertex_attrib_components(type) *
108 cgltf_component_type_size_bytes(component_type);
109}
110
111// -----------------------------------------------------------------------------
112// MikkTSpace interface
113
114// An array of values for a given vertex attribute or for vertex indices.
115// For positions and normals, glTF mandates floats.
116// Texcoords and indices can be different types and vary in size: 8-bit, 16-bit,
117// or 32-bit.
118// We store void* pointers so that we can do byte pointer arithmetic.
119typedef struct Buffer {
120 const void* start; // X-coordinate of the first attribute.
121 const void* end; // One byte past the end of the buffer.
122 cgltf_size stride_bytes; // Stride in bytes between each value.
123 cgltf_component_type type; // Type of each value in the buffer.
124} Buffer;
125
126// User data for mesh processing.
127// See: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#meshes
128// Buffer pointers have the accessor + view offsets baked in so that we do the
129// addition only once.
130typedef struct SMikkUserData {
131 const cgltf_primitive* primitive;
132 // Index buffer may be empty (mesh primitive has no indices).
133 Buffer indices;
134 // Vertex attributes.
135 Buffer positions;
136 Buffer normals;
137 Buffer texcoords;
138 // Output tangents.
139 void* tangents;
140} SMikkUserData;
141
142static cgltf_size get_vertex_index(const SMikkUserData* data, cgltf_size iFace,
143 cgltf_size iVert) {
144 const cgltf_primitive* primitive = data->primitive;
145
146 // First compute a vertex index as if the mesh primitive had no indices.
147 cgltf_size vertex_idx = 0;
148 switch (primitive->type) {
149 case cgltf_primitive_type_triangles:
150 vertex_idx = iFace * 3 + iVert;
151 break;
152 case cgltf_primitive_type_triangle_strip:
153 // For triangle strips:
154 // face 0 -> verts 0, 1, 2
155 // face 1 -> verts 1, 3, 2 (1, 2, 3 flipped)
156 // face 2 -> verts 2, 3, 4
157 // face 3 -> verts 3, 5, 4 (3, 4, 5 flipped)
158 // ...
159 // face N=2k -> verts N, N+1, N+2
160 // face N=2k+1 -> verts N, N+2, N+1
161 if (iFace & 1) {
162 // Flip the winding of odd faces so that the is consistent with the even
163 // ones.
164 // iVert = 0 -> vert 0
165 // iVert = 1 -> vert 2
166 // iVert = 2 -> vert 1
167 vertex_idx = iFace + (2 - iVert);
168 } else {
169 vertex_idx = iFace + iVert;
170 }
171 break;
172 case cgltf_primitive_type_triangle_fan:
173 // For triangle fans:
174 // face 0 -> verts 0, 1, 2
175 // face 1 -> verts 0, 2, 3
176 // face 2 -> verts 0, 3, 4
177 // face 3 -> verts 0, 4, 5
178 // ...
179 // face N -> verts 0, N=1, N=2
180 if (iVert == 0) {
181 vertex_idx = 0;
182 } else {
183 vertex_idx = iFace + iVert;
184 }
185 break;
186 default:
187 assert(false);
188 break;
189 }
190
191 // If the mesh primitive has vertex indices, then vertex_idx is actually the
192 // index of the index. Index the index buffer with vertex_idx to find the
193 // real vertex index.
194 if (primitive->indices != NULL) {
195 const void* p_idx =
196 data->indices.start + vertex_idx * data->indices.stride_bytes;
197 switch (data->indices.type) {
198 case cgltf_component_type_r_8:
199 vertex_idx = *((int8_t*)p_idx);
200 break;
201 case cgltf_component_type_r_8u:
202 vertex_idx = *((uint8_t*)p_idx);
203 break;
204 case cgltf_component_type_r_16:
205 vertex_idx = *((int16_t*)p_idx);
206 break;
207 case cgltf_component_type_r_16u:
208 vertex_idx = *((uint16_t*)p_idx);
209 break;
210 case cgltf_component_type_r_32u:
211 vertex_idx = *((uint32_t*)p_idx);
212 break;
213 default:
214 assert(false);
215 break;
216 }
217 }
218
219 return vertex_idx;
220}
221
222static const void* get_vertex(const Buffer* buffer, cgltf_size index) {
223 // Stride is the offset in bytes between vertex attributes.
224 const void* vertex = buffer->start + buffer->stride_bytes * index;
225 assert(vertex < buffer->end);
226 return vertex;
227}
228
229static const void* get_position(const SMikkUserData* data, cgltf_size index) {
230 return get_vertex(&data->positions, index);
231}
232
233static const void* get_normal(const SMikkUserData* data, cgltf_size index) {
234 return get_vertex(&data->normals, index);
235}
236
237static const void* get_texcoord(const SMikkUserData* data, cgltf_size index) {
238 return get_vertex(&data->texcoords, index);
239}
240
241static float* get_tangent(void* buffer, cgltf_size index) {
242 // Tangents are tightly packed.
243 return (float*)(buffer) + NUM_TANGENT_COMPONENTS * index;
244}
245
246static int SMikk_get_num_faces(const SMikkTSpaceContext* pContext) {
247 SMikkUserData* data = (SMikkUserData*)pContext->m_pUserData;
248 const cgltf_primitive* primitive = data->primitive;
249
250 // Find the number of effective vertices (vertices or indices) in the mesh
251 // primitive.
252 //
253 // https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#meshes
254 //
255 // "All attribute accessors for a given primitive MUST have the same count.
256 // When indices property is not defined, attribute accessors' count indicates
257 // the number of vertices to render; when indices property is defined, it
258 // indicates the upper (exclusive) bound on the index values in the indices
259 // accessor, i.e., all index values MUST be less than attribute accessors'
260 // count."
261 const cgltf_size num_verts = (primitive->indices != NULL)
262 ? primitive->indices->count
263 : primitive->attributes_count;
264
265 // Determine the number of faces given the number of vertices.
266 //
267 // We use the fact that glTF only supports triangles for faces.
268 // https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#meshes
269 switch (primitive->type) {
270 case cgltf_primitive_type_triangles:
271 return (int)num_verts / 3;
272 case cgltf_primitive_type_triangle_strip:
273 case cgltf_primitive_type_triangle_fan:
274 return (int)num_verts - 2;
275 default:
276 return 0;
277 }
278}
279
280int SMikk_get_num_vertices_of_face(const SMikkTSpaceContext* pContext,
281 const int iFace) {
282 // Triangles are the only faces supported by glTF.
283 // https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#meshes
284 return 3;
285}
286
287void SMikk_get_position(const SMikkTSpaceContext* pContext, float fvPosOut[],
288 const int iFace, const int iVert) {
289 const SMikkUserData* data = (SMikkUserData*)pContext->m_pUserData;
290 const cgltf_primitive* primitive = data->primitive;
291
292 const cgltf_size idx = get_vertex_index(data, iFace, iVert);
293 const float* coord = get_position(data, idx);
294 fvPosOut[0] = *coord++;
295 fvPosOut[1] = *coord++;
296 fvPosOut[2] = *coord;
297 DLOG("Position (face: %d, vert: %d): %f, %f, %f; idx: %lu\n", iFace, iVert,
298 fvPosOut[0], fvPosOut[1], fvPosOut[2], idx);
299}
300
301void SMikk_get_normal(const SMikkTSpaceContext* pContext, float fvNormOut[],
302 const int iFace, const int iVert) {
303 const SMikkUserData* data = (SMikkUserData*)pContext->m_pUserData;
304 const cgltf_primitive* primitive = data->primitive;
305
306 const cgltf_size idx = get_vertex_index(data, iFace, iVert);
307 const float* coord = get_normal(data, idx);
308 fvNormOut[0] = *coord++;
309 fvNormOut[1] = *coord++;
310 fvNormOut[2] = *coord;
311 DLOG("Normal (face: %d, vert: %d): %f, %f, %f\n", iFace, iVert, fvNormOut[0],
312 fvNormOut[1], fvNormOut[2]);
313}
314
315void SMikk_get_texcoord(const SMikkTSpaceContext* pContext, float fvTexcOut[],
316 const int iFace, const int iVert) {
317 const SMikkUserData* data = (SMikkUserData*)pContext->m_pUserData;
318 const cgltf_primitive* primitive = data->primitive;
319
320 const cgltf_size idx = get_vertex_index(data, iFace, iVert);
321 const void* coord = get_texcoord(data, idx);
322 switch (data->texcoords.type) {
323 case cgltf_component_type_r_8: {
324 const int8_t* c = coord;
325 fvTexcOut[0] = normalize_i8(*c++);
326 fvTexcOut[1] = normalize_i8(*c);
327 break;
328 }
329 case cgltf_component_type_r_8u: {
330 const uint8_t* c = coord;
331 fvTexcOut[0] = normalize_u8(*c++);
332 fvTexcOut[1] = normalize_u8(*c);
333 break;
334 }
335 case cgltf_component_type_r_16: {
336 const int16_t* c = coord;
337 fvTexcOut[0] = normalize_i16(*c++);
338 fvTexcOut[1] = normalize_i16(*c);
339 break;
340 }
341 case cgltf_component_type_r_16u: {
342 const uint16_t* c = coord;
343 fvTexcOut[0] = normalize_u16(*c++);
344 fvTexcOut[1] = normalize_u16(*c);
345 break;
346 }
347 case cgltf_component_type_r_32u: {
348 const uint32_t* c = coord;
349 fvTexcOut[0] = normalize_u32(*c++);
350 fvTexcOut[1] = normalize_u32(*c);
351 break;
352 }
353 case cgltf_component_type_r_32f: {
354 const float* c = coord;
355 fvTexcOut[0] = *c++;
356 fvTexcOut[1] = *c;
357 break;
358 }
359 default:
360 assert(false);
361 break;
362 }
363 DLOG("Texcoord (face: %d, vert: %d): %f, %f\n", iFace, iVert, fvTexcOut[0],
364 fvTexcOut[1]);
365}
366
367void SMikk_set_TSpace_basic(const SMikkTSpaceContext* pContext,
368 const float fvTangent[], const float fSign,
369 const int iFace, const int iVert) {
370 SMikkUserData* data = (SMikkUserData*)pContext->m_pUserData;
371 const cgltf_primitive* primitive = data->primitive;
372
373 const cgltf_size idx = get_vertex_index(data, iFace, iVert);
374 float* coord = get_tangent(data->tangents, idx);
375 *coord++ = fvTangent[0];
376 *coord++ = fvTangent[1];
377 *coord++ = fvTangent[2];
378 *coord = fSign;
379 DLOG("Tangent (face: %d, vert: %d): %f, %f, %f; sign: %f\n", iFace, iVert,
380 fvTangent[0], fvTangent[1], fvTangent[2], fSign);
381}
382
383// -----------------------------------------------------------------------------
384
385static bool has_normal_map(const cgltf_primitive* primitive) {
386 return (primitive->material != NULL) &&
387 (primitive->material->normal_texture.texture != NULL);
388}
389
390static const cgltf_attribute* find_attribute(const cgltf_primitive* primitive,
391 cgltf_attribute_type type) {
392 for (cgltf_size i = 0; i < primitive->attributes_count; ++i) {
393 const cgltf_attribute* attrib = &primitive->attributes[i];
394 if (attrib->type == type) {
395 return attrib;
396 }
397 }
398 return NULL;
399}
400
401static bool has_attribute(const cgltf_primitive* primitive,
402 cgltf_attribute_type type) {
403 return find_attribute(primitive, type) != NULL;
404}
405
406static bool has_positions3d(const cgltf_primitive* primitive) {
407 const cgltf_attribute* attrib =
408 find_attribute(primitive, cgltf_attribute_type_position);
409 if (attrib) {
410 return attrib->data->type == cgltf_type_vec3;
411 }
412 return false;
413}
414
415static bool has_normals(const cgltf_primitive* primitive) {
416 return has_attribute(primitive, cgltf_attribute_type_normal);
417}
418
419static bool has_texcoords(const cgltf_primitive* primitive) {
420 return has_attribute(primitive, cgltf_attribute_type_texcoord);
421}
422
423static bool has_tangents(const cgltf_primitive* primitive) {
424 return has_attribute(primitive, cgltf_attribute_type_tangent);
425}
426
427static bool has_indices(const cgltf_primitive* primitive) {
428 return primitive->indices != 0;
429}
430
431static cgltfTangentBuffer compute_tangents(const cgltf_options* options,
432 const cgltf_data* data,
433 cgltf_primitive* primitive) {
434 cgltfTangentBuffer buffer = {0};
435 SMikkUserData user = {0};
436 cgltf_size num_verts = 0;
437
438 user.primitive = primitive;
439
440 if (primitive->indices != NULL) {
441 const cgltf_accessor* accessor = primitive->indices;
442 const cgltf_buffer_view* view = accessor->buffer_view;
443 const cgltf_size offset_bytes = accessor->offset + view->offset;
444 const void* buffer_data = view->buffer->data + offset_bytes;
445 const void* buffer_end = view->buffer->data + view->offset + view->size;
446
447 user.indices.start = buffer_data;
448 user.indices.end = buffer_end;
449 // Indices are tightly packed, stride 0.
450 user.indices.stride_bytes =
451 default_stride(accessor->type, accessor->component_type);
452 user.indices.type = accessor->component_type;
453 }
454
455 for (cgltf_size i = 0; i < primitive->attributes_count; ++i) {
456 const cgltf_attribute* attrib = &primitive->attributes[i];
457
458 if ((attrib->type == cgltf_attribute_type_position) ||
459 (attrib->type == cgltf_attribute_type_normal) ||
460 (attrib->type == cgltf_attribute_type_texcoord)) {
461 const cgltf_accessor* accessor = attrib->data;
462 const cgltf_buffer_view* view = accessor->buffer_view;
463 const cgltf_buffer* buffer = view->buffer;
464 const cgltf_size offset_bytes = accessor->offset + view->offset;
465 const cgltf_size stride_bytes =
466 view->stride > 0
467 ? view->stride
468 : default_stride(accessor->type, accessor->component_type);
469 // const cgltf_size size_bytes = view->size;
470 const void* buffer_data = view->buffer->data + offset_bytes;
471 const void* buffer_end = view->buffer->data + view->offset + view->size;
472
473 Buffer* attrib_buffer = 0;
474
475 if (attrib->type == cgltf_attribute_type_position) {
476 // glTF currently mandates vec3 for positions. Caller should ensure
477 // this.
478 assert(accessor->type == cgltf_type_vec3);
479 num_verts = attrib->data->count;
480 attrib_buffer = &user.positions;
481 } else if (attrib->type == cgltf_attribute_type_normal) {
482 attrib_buffer = &user.normals;
483 } else if (attrib->type == cgltf_attribute_type_texcoord) {
484 attrib_buffer = &user.texcoords;
485 }
486
487 attrib_buffer->start = buffer_data;
488 attrib_buffer->end = buffer_end;
489 attrib_buffer->stride_bytes = stride_bytes;
490 attrib_buffer->type = accessor->component_type;
491 }
492 }
493
494 assert(user.positions.start);
495 assert(user.positions.end);
496 assert(user.normals.start);
497 assert(user.normals.end);
498 assert(user.texcoords.start);
499 assert(user.texcoords.end);
500 assert(num_verts > 0);
501
502 const cgltf_size tangents_size_bytes =
503 num_verts * NUM_TANGENT_COMPONENTS * sizeof(float);
504
505 user.tangents = CGLTF_OPTIONS_MALLOC(tangents_size_bytes);
506 if (!user.tangents) {
507 return buffer;
508 }
509
510 SMikkTSpaceInterface interface = (SMikkTSpaceInterface){
511 .m_getNumFaces = SMikk_get_num_faces,
512 .m_getNumVerticesOfFace = SMikk_get_num_vertices_of_face,
513 .m_getPosition = SMikk_get_position,
514 .m_getNormal = SMikk_get_normal,
515 .m_getTexCoord = SMikk_get_texcoord,
516 .m_setTSpaceBasic = SMikk_set_TSpace_basic,
517 };
518 const SMikkTSpaceContext context = (SMikkTSpaceContext){
519 .m_pInterface = &interface,
520 .m_pUserData = &user,
521 };
522 if (!genTangSpaceDefault(&context)) {
523 return buffer;
524 }
525
526 buffer.data = user.tangents;
527 buffer.size_bytes = tangents_size_bytes;
528 buffer.primitive = primitive;
529
530 return buffer;
531}
532
533static void process_primitive(const cgltf_options* options,
534 const cgltf_data* data,
535 cgltf_primitive* primitive,
536 cgltfTangentBuffer* tangent_buffers,
537 cgltf_size* num_tangent_buffers) {
538 DLOG("Processing primitive\n");
539 cgltf_size cur_buffer = 0;
540 // TODO: MikkTSpace should not be used with models with vertex indices. One
541 // workaround is to unindex the mesh, compute tangents, and then re-index it.
542 if (((primitive->type == cgltf_primitive_type_triangle_fan) ||
543 (primitive->type == cgltf_primitive_type_triangle_strip) ||
544 (primitive->type == cgltf_primitive_type_triangles)) &&
545 has_normal_map(primitive) && !has_tangents(primitive) &&
546 has_positions3d(primitive) && has_normals(primitive) &&
547 has_texcoords(primitive) && !has_indices(primitive)) {
548 *num_tangent_buffers += 1;
549 if (tangent_buffers) {
550 DLOG("Model with normal map missing tangents detected\n");
551 tangent_buffers[cur_buffer] = compute_tangents(options, data, primitive);
552 if (tangent_buffers[cur_buffer].data) {
553 DLOG("Tangents computed\n");
554 }
555 cur_buffer++;
556 }
557 }
558}
559
560cgltf_result cgltf_compute_tangents(const cgltf_options* input_options,
561 const cgltf_data* data,
562 cgltfTangentBuffer** tangent_buffers,
563 cgltf_size* num_tangent_buffers) {
564 if ((input_options == NULL) || (data == NULL)) {
565 return cgltf_result_invalid_options;
566 }
567
568 DLOG("cgltf_compute_tangents\n");
569
570 cgltf_options options = *input_options;
571 if (options.memory.alloc == NULL) {
572 options.memory.alloc = &cgltf_default_alloc;
573 }
574 if (options.memory.free == NULL) {
575 options.memory.free = &cgltf_default_free;
576 }
577
578 // First pass: compute the number of tangent buffers to be created.
579 *num_tangent_buffers = 0;
580 for (cgltf_size mesh_idx = 0; mesh_idx < data->meshes_count; ++mesh_idx) {
581 const cgltf_mesh* mesh = &data->meshes[mesh_idx];
582
583 for (cgltf_size prim_idx = 0; prim_idx < mesh->primitives_count;
584 ++prim_idx) {
585 // Pass in null for the tangent buffers to just compute the number of
586 // buffers.
587 process_primitive(&options, data, &mesh->primitives[prim_idx], 0,
588 num_tangent_buffers);
589 }
590 }
591 DLOG("Number of primitives to be patched: %lu\n", *num_tangent_buffers);
592
593 // Second pass: compute the tangents.
594 if (*num_tangent_buffers > 0) {
595 *tangent_buffers =
596 options.memory.alloc(options.memory.user_data,
597 *num_tangent_buffers * sizeof(cgltfTangentBuffer));
598 if (!*tangent_buffers) {
599 return cgltf_result_out_of_memory;
600 }
601
602 cgltf_size tangent_buffers_computed = 0;
603
604 for (cgltf_size mesh_idx = 0; mesh_idx < data->meshes_count; ++mesh_idx) {
605 const cgltf_mesh* mesh = &data->meshes[mesh_idx];
606
607 for (cgltf_size prim_idx = 0; prim_idx < mesh->primitives_count;
608 ++prim_idx) {
609 process_primitive(&options, data, &mesh->primitives[prim_idx],
610 *tangent_buffers, &tangent_buffers_computed);
611 }
612 }
613
614 assert(tangent_buffers_computed == *num_tangent_buffers);
615 }
616
617 return cgltf_result_success;
618}
diff --git a/contrib/cgltf-tangents/cgltf_tangents.h b/contrib/cgltf-tangents/cgltf_tangents.h
new file mode 100644
index 0000000..79e3502
--- /dev/null
+++ b/contrib/cgltf-tangents/cgltf_tangents.h
@@ -0,0 +1,67 @@
1/*
2Copyright 2022 Marc Sunet
3
4Redistribution and use in source and binary forms, with or without modification,
5are permitted provided that the following conditions are met:
6
71. Redistributions of source code must retain the above copyright notice, this
8list of conditions and the following disclaimer.
9
102. Redistributions in binary form must reproduce the above copyright notice,
11this list of conditions and the following disclaimer in the documentation and/or
12other materials provided with the distribution.
13
14THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
15ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
18ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24*/
25#ifndef CGLTF_TANGENTS_H_INCLUDED__
26#define CGLTF_TANGENTS_H_INCLUDED__
27
28#include <cgltf.h>
29
30/// A buffer that holds tangent vectors.
31///
32/// Tangent vectors are tightly packed in the array.
33///
34/// Tangent vectors have 4 coordinates: (X,Y,Z) for the vector, W for the sign.
35/// The usual rules of MikkTSpace apply, namely that the bitangent should be
36/// computed as:
37///
38/// bitangent = tangent.w * cross(normal, tangent.xyz);
39///
40/// Refer to the MikkTSpace documentation for more details.
41///
42/// The primitive pointer points to the mesh primitive for which the tangents in
43/// this buffer were computed. When your application loads mesh primitives, it
44/// can scan the cgltfTangetBuffer array outputed by cgltf_compute_tangents() to
45/// see whether tangents were computed for the mesh primitive.
46typedef struct cgltfTangentBuffer {
47 void* data; // X-coordinate of the first tangent vector.
48 cgltf_size size_bytes; // Total Size of data in bytes.
49 cgltf_primitive* primitive; // The primitive these tangents belong to.
50} cgltfTangentBuffer;
51
52/// Compute tangent vectors for normal-mapped mesh primitives missing them.
53///
54/// cgltf_options can be zeroed out but must be non-null.
55///
56/// cgltf_data is the scene previously loaded by cgltf.
57///
58/// out_tangent_buffers is an output array of tangent buffers, one buffer per
59/// mesh primitive for which tangents were computed.
60///
61/// out_num_tangent_buffers is the number of tangent buffers in the output
62/// array.
63cgltf_result cgltf_compute_tangents(const cgltf_options*, const cgltf_data*,
64 cgltfTangentBuffer** out_tangent_buffers,
65 cgltf_size* out_num_tangent_buffers);
66
67#endif // CGLTF_TANGENTS_H_INCLUDED__
diff --git a/contrib/cgltf-tangents/test/CMakeLists.txt b/contrib/cgltf-tangents/test/CMakeLists.txt
new file mode 100644
index 0000000..422c950
--- /dev/null
+++ b/contrib/cgltf-tangents/test/CMakeLists.txt
@@ -0,0 +1,11 @@
1cmake_minimum_required(VERSION 3.0)
2
3project (cgltf-test)
4
5add_executable(cgltf-test
6 main.c)
7
8target_link_libraries(cgltf-test
9 cgltf
10 cgltf-tangents
11 -lm)
diff --git a/contrib/cgltf-tangents/test/main.c b/contrib/cgltf-tangents/test/main.c
new file mode 100644
index 0000000..0d70008
--- /dev/null
+++ b/contrib/cgltf-tangents/test/main.c
@@ -0,0 +1,86 @@
1/*
2Copyright 2022 Marc Sunet
3
4Redistribution and use in source and binary forms, with or without modification,
5are permitted provided that the following conditions are met:
6
71. Redistributions of source code must retain the above copyright notice, this
8list of conditions and the following disclaimer.
9
102. Redistributions in binary form must reproduce the above copyright notice,
11this list of conditions and the following disclaimer in the documentation and/or
12other materials provided with the distribution.
13
14THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
15ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
18ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24*/
25#include <cgltf_tangents.h>
26#define CGLTF_IMPLEMENTATION
27#include <cgltf.h>
28
29#include <stdio.h>
30
31void print_tangent_buffer(const cgltfTangentBuffer* buffer, int max_vectors) {
32 printf("Tangent buffer for primitive (%p) (%lu bytes):\n", buffer->primitive,
33 buffer->size_bytes);
34
35 const float* xyzw = (const float*)buffer->data;
36 const float* end = (const float*)(buffer->data + buffer->size_bytes);
37
38 for (int i = 0; i < max_vectors && xyzw < end; ++i, xyzw += 4) {
39 printf("(%3.2f, %3.2f, %3.2f, sign: %3.2f)\n", *xyzw, *(xyzw + 1),
40 *(xyzw + 2), *(xyzw + 3));
41 }
42 printf("--------------------");
43}
44
45void usage(const char* argv0) {
46 fprintf(stderr, "Usage: %s <glTF file path>\n", argv0);
47}
48
49int main(int argc, const char** argv) {
50 cgltf_options options = {0};
51 cgltf_data* data = NULL;
52
53 if (argc != 2) {
54 usage(argv[0]);
55 return 0;
56 }
57
58 const char* filepath = argv[1];
59
60 cgltf_result result = cgltf_parse_file(&options, filepath, &data);
61 if (result != cgltf_result_success) {
62 cgltf_free(data);
63 return 1;
64 }
65
66 // Must call cgltf_load_buffers() to load buffer data.
67 result = cgltf_load_buffers(&options, data, filepath);
68 if (result != cgltf_result_success) {
69 cgltf_free(data);
70 return 2;
71 }
72
73 cgltfTangentBuffer* tangent_buffers = 0;
74 cgltf_size num_tangent_buffers = 0;
75 cgltf_compute_tangents(&options, data, &tangent_buffers,
76 &num_tangent_buffers);
77
78 // cgltf scene not needed beyond this point.
79 cgltf_free(data);
80
81 for (cgltf_size i = 0; i < num_tangent_buffers; ++i) {
82 print_tangent_buffer(tangent_buffers, 10);
83 }
84
85 return 0;
86}
diff --git a/contrib/cgltf/CMakeLists.txt b/contrib/cgltf/CMakeLists.txt
new file mode 100644
index 0000000..0ac840a
--- /dev/null
+++ b/contrib/cgltf/CMakeLists.txt
@@ -0,0 +1,8 @@
1cmake_minimum_required(VERSION 3.16)
2
3project(cgltf)
4
5add_library(cgltf INTERFACE)
6
7target_include_directories(cgltf INTERFACE
8 ${CMAKE_CURRENT_SOURCE_DIR})
diff --git a/contrib/cgltf/LICENSE b/contrib/cgltf/LICENSE
new file mode 100644
index 0000000..0afe8c7
--- /dev/null
+++ b/contrib/cgltf/LICENSE
@@ -0,0 +1,7 @@
1Copyright (c) 2018 Johannes Kuhlmann
2
3Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
5The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
7THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/cgltf/README.md b/contrib/cgltf/README.md
new file mode 100644
index 0000000..3b49d52
--- /dev/null
+++ b/contrib/cgltf/README.md
@@ -0,0 +1,154 @@
1# :diamond_shape_with_a_dot_inside: cgltf
2**Single-file/stb-style C glTF loader and writer**
3
4[![Build Status](https://travis-ci.org/jkuhlmann/cgltf.svg?branch=master)](https://travis-ci.org/jkuhlmann/cgltf)
5
6Used in: [bgfx](https://github.com/bkaradzic/bgfx), [Filament](https://github.com/google/filament), [meshoptimizer](https://github.com/zeux/meshoptimizer), [raylib](https://github.com/raysan5/raylib), and more!
7
8## Usage: Loading
9Loading from file:
10```c
11#define CGLTF_IMPLEMENTATION
12#include "cgltf.h"
13
14cgltf_options options = {0};
15cgltf_data* data = NULL;
16cgltf_result result = cgltf_parse_file(&options, "scene.gltf", &data);
17if (result == cgltf_result_success)
18{
19 /* TODO make awesome stuff */
20 cgltf_free(data);
21}
22```
23
24Loading from memory:
25```c
26#define CGLTF_IMPLEMENTATION
27#include "cgltf.h"
28
29void* buf; /* Pointer to glb or gltf file data */
30size_t size; /* Size of the file data */
31
32cgltf_options options = {0};
33cgltf_data* data = NULL;
34cgltf_result result = cgltf_parse(&options, buf, size, &data);
35if (result == cgltf_result_success)
36{
37 /* TODO make awesome stuff */
38 cgltf_free(data);
39}
40```
41
42Note that cgltf does not load the contents of extra files such as buffers or images into memory by default. You'll need to read these files yourself using URIs from `data.buffers[]` or `data.images[]` respectively.
43For buffer data, you can alternatively call `cgltf_load_buffers`, which will use `FILE*` APIs to open and read buffer files.
44
45**For more in-depth documentation and a description of the public interface refer to the top of the `cgltf.h` file.**
46
47## Usage: Writing
48When writing glTF data, you need a valid `cgltf_data` structure that represents a valid glTF document. You can construct such a structure yourself or load it using the loader functions described above. The writer functions do not deallocate any memory. So, you either have to do it manually or call `cgltf_free()` if you got the data by loading it from a glTF document.
49
50Writing to file:
51```c
52#define CGLTF_IMPLEMENTATION
53#define CGLTF_WRITE_IMPLEMENTATION
54#include "cgltf_write.h"
55
56cgltf_options options = {0};
57cgltf_data* data = /* TODO must be valid data */;
58cgltf_result result = cgltf_write_file(&options, "out.gltf", data);
59if (result != cgltf_result_success)
60{
61 /* TODO handle error */
62}
63```
64
65Writing to memory:
66```c
67#define CGLTF_IMPLEMENTATION
68#define CGLTF_WRITE_IMPLEMENTATION
69#include "cgltf_write.h"
70cgltf_options options = {0};
71cgltf_data* data = /* TODO must be valid data */;
72
73cgltf_size size = cgltf_write(&options, NULL, 0, data);
74
75char* buf = malloc(size);
76
77cgltf_size written = cgltf_write(&options, buf, size, data);
78if (written != size)
79{
80 /* TODO handle error */
81}
82```
83
84Note that cgltf does not write the contents of extra files such as buffers or images. You'll need to write this data yourself.
85
86Writing does not yet support "extras" data.
87
88**For more in-depth documentation and a description of the public interface refer to the top of the `cgltf_write.h` file.**
89
90
91## Features
92cgltf supports core glTF 2.0:
93- glb (binary files) and gltf (JSON files)
94- meshes (including accessors, buffer views, buffers)
95- materials (including textures, samplers, images)
96- scenes and nodes
97- skins
98- animations
99- cameras
100- morph targets
101- extras data
102
103cgltf also supports some glTF extensions:
104- KHR_draco_mesh_compression (requires a library like [Google's Draco](https://github.com/google/draco) for decompression though)
105- KHR_lights_punctual
106- KHR_materials_clearcoat
107- KHR_materials_ior
108- KHR_materials_pbrSpecularGlossiness
109- KHR_materials_specular
110- KHR_materials_transmission
111- KHR_materials_unlit
112- KHR_texture_transform
113
114cgltf does **not** yet support unlisted extensions. However, unlisted extensions can be accessed via "extensions" member on objects.
115
116## Building
117The easiest approach is to integrate the `cgltf.h` header file into your project. If you are unfamiliar with single-file C libraries (also known as stb-style libraries), this is how it goes:
118
1191. Include `cgltf.h` where you need the functionality.
1201. Have exactly one source file that defines `CGLTF_IMPLEMENTATION` before including `cgltf.h`.
1211. Use the cgltf functions as described above.
122
123Support for writing can be found in a separate file called `cgltf_write.h` (which includes `cgltf.h`). Building it works analogously using the `CGLTF_WRITE_IMPLEMENTATION` define.
124
125## Contributing
126Everyone is welcome to contribute to the library. If you find any problems, you can submit them using [GitHub's issue system](https://github.com/jkuhlmann/cgltf/issues). If you want to contribute code, you should fork the project and then send a pull request.
127
128
129## Dependencies
130None.
131
132C headers being used by implementation:
133```
134#include <stddef.h>
135#include <stdint.h>
136#include <string.h>
137#include <stdlib.h>
138#include <stdio.h>
139#include <limits.h>
140```
141
142Note, this library has a copy of the [JSMN JSON parser](https://github.com/zserge/jsmn) embedded in its source.
143
144## Testing
145There is a Python script in the `test/` folder that retrieves the glTF 2.0 sample files from the glTF-Sample-Models repository (https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0) and runs the library against all gltf and glb files.
146
147Here's one way to build and run the test:
148
149 cd test ; mkdir build ; cd build ; cmake .. -DCMAKE_BUILD_TYPE=Debug
150 make -j
151 cd ..
152 ./test_all.py
153
154There is also a llvm-fuzz test in `fuzz/`. See http://llvm.org/docs/LibFuzzer.html for more information.
diff --git a/contrib/cgltf/cgltf.h b/contrib/cgltf/cgltf.h
new file mode 100644
index 0000000..077cf36
--- /dev/null
+++ b/contrib/cgltf/cgltf.h
@@ -0,0 +1,5746 @@
1/**
2 * cgltf - a single-file glTF 2.0 parser written in C99.
3 *
4 * Version: 1.7
5 *
6 * Website: https://github.com/jkuhlmann/cgltf
7 *
8 * Distributed under the MIT License, see notice at the end of this file.
9 *
10 * Building:
11 * Include this file where you need the struct and function
12 * declarations. Have exactly one source file where you define
13 * `CGLTF_IMPLEMENTATION` before including this file to get the
14 * function definitions.
15 *
16 * Reference:
17 * `cgltf_result cgltf_parse(const cgltf_options*, const void*,
18 * cgltf_size, cgltf_data**)` parses both glTF and GLB data. If
19 * this function returns `cgltf_result_success`, you have to call
20 * `cgltf_free()` on the created `cgltf_data*` variable.
21 * Note that contents of external files for buffers and images are not
22 * automatically loaded. You'll need to read these files yourself using
23 * URIs in the `cgltf_data` structure.
24 *
25 * `cgltf_options` is the struct passed to `cgltf_parse()` to control
26 * parts of the parsing process. You can use it to force the file type
27 * and provide memory allocation as well as file operation callbacks.
28 * Should be zero-initialized to trigger default behavior.
29 *
30 * `cgltf_data` is the struct allocated and filled by `cgltf_parse()`.
31 * It generally mirrors the glTF format as described by the spec (see
32 * https://github.com/KhronosGroup/glTF/tree/master/specification/2.0).
33 *
34 * `void cgltf_free(cgltf_data*)` frees the allocated `cgltf_data`
35 * variable.
36 *
37 * `cgltf_result cgltf_load_buffers(const cgltf_options*, cgltf_data*,
38 * const char* gltf_path)` can be optionally called to open and read buffer
39 * files using the `FILE*` APIs. The `gltf_path` argument is the path to
40 * the original glTF file, which allows the parser to resolve the path to
41 * buffer files.
42 *
43 * `cgltf_result cgltf_load_buffer_base64(const cgltf_options* options,
44 * cgltf_size size, const char* base64, void** out_data)` decodes
45 * base64-encoded data content. Used internally by `cgltf_load_buffers()`
46 * and may be useful if you're not dealing with normal files.
47 *
48 * `cgltf_result cgltf_parse_file(const cgltf_options* options, const
49 * char* path, cgltf_data** out_data)` can be used to open the given
50 * file using `FILE*` APIs and parse the data using `cgltf_parse()`.
51 *
52 * `cgltf_result cgltf_validate(cgltf_data*)` can be used to do additional
53 * checks to make sure the parsed glTF data is valid.
54 *
55 * `cgltf_node_transform_local` converts the translation / rotation / scale properties of a node
56 * into a mat4.
57 *
58 * `cgltf_node_transform_world` calls `cgltf_node_transform_local` on every ancestor in order
59 * to compute the root-to-node transformation.
60 *
61 * `cgltf_accessor_unpack_floats` reads in the data from an accessor, applies sparse data (if any),
62 * and converts them to floating point. Assumes that `cgltf_load_buffers` has already been called.
63 * By passing null for the output pointer, users can find out how many floats are required in the
64 * output buffer.
65 *
66 * `cgltf_accessor_num_components` is a tiny utility that tells you the dimensionality of
67 * a certain accessor type. This can be used before `cgltf_accessor_unpack_floats` to help allocate
68 * the necessary amount of memory.
69 *
70 * `cgltf_accessor_read_float` reads a certain element from a non-sparse accessor and converts it to
71 * floating point, assuming that `cgltf_load_buffers` has already been called. The passed-in element
72 * size is the number of floats in the output buffer, which should be in the range [1, 16]. Returns
73 * false if the passed-in element_size is too small, or if the accessor is sparse.
74 *
75 * `cgltf_accessor_read_uint` is similar to its floating-point counterpart, but limited to reading
76 * vector types and does not support matrix types. The passed-in element size is the number of uints
77 * in the output buffer, which should be in the range [1, 4]. Returns false if the passed-in
78 * element_size is too small, or if the accessor is sparse.
79 *
80 * `cgltf_accessor_read_index` is similar to its floating-point counterpart, but it returns size_t
81 * and only works with single-component data types.
82 *
83 * `cgltf_result cgltf_copy_extras_json(const cgltf_data*, const cgltf_extras*,
84 * char* dest, cgltf_size* dest_size)` allows users to retrieve the "extras" data that
85 * can be attached to many glTF objects (which can be arbitrary JSON data). The
86 * `cgltf_extras` struct stores the offsets of the start and end of the extras JSON data
87 * as it appears in the complete glTF JSON data. This function copies the extras data
88 * into the provided buffer. If `dest` is NULL, the length of the data is written into
89 * `dest_size`. You can then parse this data using your own JSON parser
90 * or, if you've included the cgltf implementation using the integrated JSMN JSON parser.
91 */
92#ifndef CGLTF_H_INCLUDED__
93#define CGLTF_H_INCLUDED__
94
95#include <stddef.h>
96
97#ifdef __cplusplus
98extern "C" {
99#endif
100
101typedef size_t cgltf_size;
102typedef float cgltf_float;
103typedef int cgltf_int;
104typedef unsigned int cgltf_uint;
105typedef int cgltf_bool;
106
107typedef enum cgltf_file_type
108{
109 cgltf_file_type_invalid,
110 cgltf_file_type_gltf,
111 cgltf_file_type_glb,
112} cgltf_file_type;
113
114typedef enum cgltf_result
115{
116 cgltf_result_success,
117 cgltf_result_data_too_short,
118 cgltf_result_unknown_format,
119 cgltf_result_invalid_json,
120 cgltf_result_invalid_gltf,
121 cgltf_result_invalid_options,
122 cgltf_result_file_not_found,
123 cgltf_result_io_error,
124 cgltf_result_out_of_memory,
125 cgltf_result_legacy_gltf,
126} cgltf_result;
127
128typedef struct cgltf_memory_options
129{
130 void* (*alloc)(void* user, cgltf_size size);
131 void (*free) (void* user, void* ptr);
132 void* user_data;
133} cgltf_memory_options;
134
135typedef struct cgltf_file_options
136{
137 cgltf_result(*read)(const struct cgltf_memory_options* memory_options, const struct cgltf_file_options* file_options, const char* path, cgltf_size* size, void** data);
138 void (*release)(const struct cgltf_memory_options* memory_options, const struct cgltf_file_options* file_options, void* data);
139 void* user_data;
140} cgltf_file_options;
141
142typedef struct cgltf_options
143{
144 cgltf_file_type type; /* invalid == auto detect */
145 cgltf_size json_token_count; /* 0 == auto */
146 cgltf_memory_options memory;
147 cgltf_file_options file;
148} cgltf_options;
149
150typedef enum cgltf_buffer_view_type
151{
152 cgltf_buffer_view_type_invalid,
153 cgltf_buffer_view_type_indices,
154 cgltf_buffer_view_type_vertices,
155} cgltf_buffer_view_type;
156
157typedef enum cgltf_attribute_type
158{
159 cgltf_attribute_type_invalid,
160 cgltf_attribute_type_position,
161 cgltf_attribute_type_normal,
162 cgltf_attribute_type_tangent,
163 cgltf_attribute_type_texcoord,
164 cgltf_attribute_type_color,
165 cgltf_attribute_type_joints,
166 cgltf_attribute_type_weights,
167} cgltf_attribute_type;
168
169typedef enum cgltf_component_type
170{
171 cgltf_component_type_invalid,
172 cgltf_component_type_r_8, /* BYTE */
173 cgltf_component_type_r_8u, /* UNSIGNED_BYTE */
174 cgltf_component_type_r_16, /* SHORT */
175 cgltf_component_type_r_16u, /* UNSIGNED_SHORT */
176 cgltf_component_type_r_32u, /* UNSIGNED_INT */
177 cgltf_component_type_r_32f, /* FLOAT */
178} cgltf_component_type;
179
180typedef enum cgltf_type
181{
182 cgltf_type_invalid,
183 cgltf_type_scalar,
184 cgltf_type_vec2,
185 cgltf_type_vec3,
186 cgltf_type_vec4,
187 cgltf_type_mat2,
188 cgltf_type_mat3,
189 cgltf_type_mat4,
190} cgltf_type;
191
192typedef enum cgltf_primitive_type
193{
194 cgltf_primitive_type_points,
195 cgltf_primitive_type_lines,
196 cgltf_primitive_type_line_loop,
197 cgltf_primitive_type_line_strip,
198 cgltf_primitive_type_triangles,
199 cgltf_primitive_type_triangle_strip,
200 cgltf_primitive_type_triangle_fan,
201} cgltf_primitive_type;
202
203typedef enum cgltf_alpha_mode
204{
205 cgltf_alpha_mode_opaque,
206 cgltf_alpha_mode_mask,
207 cgltf_alpha_mode_blend,
208} cgltf_alpha_mode;
209
210typedef enum cgltf_animation_path_type {
211 cgltf_animation_path_type_invalid,
212 cgltf_animation_path_type_translation,
213 cgltf_animation_path_type_rotation,
214 cgltf_animation_path_type_scale,
215 cgltf_animation_path_type_weights,
216} cgltf_animation_path_type;
217
218typedef enum cgltf_interpolation_type {
219 cgltf_interpolation_type_linear,
220 cgltf_interpolation_type_step,
221 cgltf_interpolation_type_cubic_spline,
222} cgltf_interpolation_type;
223
224typedef enum cgltf_camera_type {
225 cgltf_camera_type_invalid,
226 cgltf_camera_type_perspective,
227 cgltf_camera_type_orthographic,
228} cgltf_camera_type;
229
230typedef enum cgltf_light_type {
231 cgltf_light_type_invalid,
232 cgltf_light_type_directional,
233 cgltf_light_type_point,
234 cgltf_light_type_spot,
235} cgltf_light_type;
236
237typedef struct cgltf_extras {
238 cgltf_size start_offset;
239 cgltf_size end_offset;
240} cgltf_extras;
241
242typedef struct cgltf_extension {
243 char* name;
244 char* data;
245} cgltf_extension;
246
247typedef struct cgltf_buffer
248{
249 cgltf_size size;
250 char* uri;
251 void* data; /* loaded by cgltf_load_buffers */
252 cgltf_extras extras;
253 cgltf_size extensions_count;
254 cgltf_extension* extensions;
255} cgltf_buffer;
256
257typedef struct cgltf_buffer_view
258{
259 cgltf_buffer* buffer;
260 cgltf_size offset;
261 cgltf_size size;
262 cgltf_size stride; /* 0 == automatically determined by accessor */
263 cgltf_buffer_view_type type;
264 cgltf_extras extras;
265 cgltf_size extensions_count;
266 cgltf_extension* extensions;
267} cgltf_buffer_view;
268
269typedef struct cgltf_accessor_sparse
270{
271 cgltf_size count;
272 cgltf_buffer_view* indices_buffer_view;
273 cgltf_size indices_byte_offset;
274 cgltf_component_type indices_component_type;
275 cgltf_buffer_view* values_buffer_view;
276 cgltf_size values_byte_offset;
277 cgltf_extras extras;
278 cgltf_extras indices_extras;
279 cgltf_extras values_extras;
280 cgltf_size extensions_count;
281 cgltf_extension* extensions;
282 cgltf_size indices_extensions_count;
283 cgltf_extension* indices_extensions;
284 cgltf_size values_extensions_count;
285 cgltf_extension* values_extensions;
286} cgltf_accessor_sparse;
287
288typedef struct cgltf_accessor
289{
290 cgltf_component_type component_type;
291 cgltf_bool normalized;
292 cgltf_type type;
293 cgltf_size offset;
294 cgltf_size count;
295 cgltf_size stride;
296 cgltf_buffer_view* buffer_view;
297 cgltf_bool has_min;
298 cgltf_float min[16];
299 cgltf_bool has_max;
300 cgltf_float max[16];
301 cgltf_bool is_sparse;
302 cgltf_accessor_sparse sparse;
303 cgltf_extras extras;
304 cgltf_size extensions_count;
305 cgltf_extension* extensions;
306} cgltf_accessor;
307
308typedef struct cgltf_attribute
309{
310 char* name;
311 cgltf_attribute_type type;
312 cgltf_int index;
313 cgltf_accessor* data;
314} cgltf_attribute;
315
316typedef struct cgltf_image
317{
318 char* name;
319 char* uri;
320 cgltf_buffer_view* buffer_view;
321 char* mime_type;
322 cgltf_extras extras;
323 cgltf_size extensions_count;
324 cgltf_extension* extensions;
325} cgltf_image;
326
327typedef struct cgltf_sampler
328{
329 cgltf_int mag_filter;
330 cgltf_int min_filter;
331 cgltf_int wrap_s;
332 cgltf_int wrap_t;
333 cgltf_extras extras;
334 cgltf_size extensions_count;
335 cgltf_extension* extensions;
336} cgltf_sampler;
337
338typedef struct cgltf_texture
339{
340 char* name;
341 cgltf_image* image;
342 cgltf_sampler* sampler;
343 cgltf_extras extras;
344 cgltf_size extensions_count;
345 cgltf_extension* extensions;
346} cgltf_texture;
347
348typedef struct cgltf_texture_transform
349{
350 cgltf_float offset[2];
351 cgltf_float rotation;
352 cgltf_float scale[2];
353 cgltf_int texcoord;
354} cgltf_texture_transform;
355
356typedef struct cgltf_texture_view
357{
358 cgltf_texture* texture;
359 cgltf_int texcoord;
360 cgltf_float scale; /* equivalent to strength for occlusion_texture */
361 cgltf_bool has_transform;
362 cgltf_texture_transform transform;
363 cgltf_extras extras;
364 cgltf_size extensions_count;
365 cgltf_extension* extensions;
366} cgltf_texture_view;
367
368typedef struct cgltf_pbr_metallic_roughness
369{
370 cgltf_texture_view base_color_texture;
371 cgltf_texture_view metallic_roughness_texture;
372
373 cgltf_float base_color_factor[4];
374 cgltf_float metallic_factor;
375 cgltf_float roughness_factor;
376
377 cgltf_extras extras;
378} cgltf_pbr_metallic_roughness;
379
380typedef struct cgltf_pbr_specular_glossiness
381{
382 cgltf_texture_view diffuse_texture;
383 cgltf_texture_view specular_glossiness_texture;
384
385 cgltf_float diffuse_factor[4];
386 cgltf_float specular_factor[3];
387 cgltf_float glossiness_factor;
388} cgltf_pbr_specular_glossiness;
389
390typedef struct cgltf_clearcoat
391{
392 cgltf_texture_view clearcoat_texture;
393 cgltf_texture_view clearcoat_roughness_texture;
394 cgltf_texture_view clearcoat_normal_texture;
395
396 cgltf_float clearcoat_factor;
397 cgltf_float clearcoat_roughness_factor;
398} cgltf_clearcoat;
399
400typedef struct cgltf_transmission
401{
402 cgltf_texture_view transmission_texture;
403 cgltf_float transmission_factor;
404} cgltf_transmission;
405
406typedef struct cgltf_ior
407{
408 cgltf_float ior;
409} cgltf_ior;
410
411typedef struct cgltf_specular
412{
413 cgltf_texture_view specular_texture;
414 cgltf_float specular_color_factor[3];
415 cgltf_float specular_factor;
416} cgltf_specular;
417
418typedef struct cgltf_material
419{
420 char* name;
421 cgltf_bool has_pbr_metallic_roughness;
422 cgltf_bool has_pbr_specular_glossiness;
423 cgltf_bool has_clearcoat;
424 cgltf_bool has_transmission;
425 cgltf_bool has_ior;
426 cgltf_bool has_specular;
427 cgltf_pbr_metallic_roughness pbr_metallic_roughness;
428 cgltf_pbr_specular_glossiness pbr_specular_glossiness;
429 cgltf_clearcoat clearcoat;
430 cgltf_ior ior;
431 cgltf_specular specular;
432 cgltf_transmission transmission;
433 cgltf_texture_view normal_texture;
434 cgltf_texture_view occlusion_texture;
435 cgltf_texture_view emissive_texture;
436 cgltf_float emissive_factor[3];
437 cgltf_alpha_mode alpha_mode;
438 cgltf_float alpha_cutoff;
439 cgltf_bool double_sided;
440 cgltf_bool unlit;
441 cgltf_extras extras;
442 cgltf_size extensions_count;
443 cgltf_extension* extensions;
444} cgltf_material;
445
446typedef struct cgltf_morph_target {
447 cgltf_attribute* attributes;
448 cgltf_size attributes_count;
449} cgltf_morph_target;
450
451typedef struct cgltf_draco_mesh_compression {
452 cgltf_buffer_view* buffer_view;
453 cgltf_attribute* attributes;
454 cgltf_size attributes_count;
455} cgltf_draco_mesh_compression;
456
457typedef struct cgltf_primitive {
458 cgltf_primitive_type type;
459 cgltf_accessor* indices;
460 cgltf_material* material;
461 cgltf_attribute* attributes;
462 cgltf_size attributes_count;
463 cgltf_morph_target* targets;
464 cgltf_size targets_count;
465 cgltf_extras extras;
466 cgltf_bool has_draco_mesh_compression;
467 cgltf_draco_mesh_compression draco_mesh_compression;
468 cgltf_size extensions_count;
469 cgltf_extension* extensions;
470} cgltf_primitive;
471
472typedef struct cgltf_mesh {
473 char* name;
474 cgltf_primitive* primitives;
475 cgltf_size primitives_count;
476 cgltf_float* weights;
477 cgltf_size weights_count;
478 char** target_names;
479 cgltf_size target_names_count;
480 cgltf_extras extras;
481 cgltf_size extensions_count;
482 cgltf_extension* extensions;
483} cgltf_mesh;
484
485typedef struct cgltf_node cgltf_node;
486
487typedef struct cgltf_skin {
488 char* name;
489 cgltf_node** joints;
490 cgltf_size joints_count;
491 cgltf_node* skeleton;
492 cgltf_accessor* inverse_bind_matrices;
493 cgltf_extras extras;
494 cgltf_size extensions_count;
495 cgltf_extension* extensions;
496} cgltf_skin;
497
498typedef struct cgltf_camera_perspective {
499 cgltf_float aspect_ratio;
500 cgltf_float yfov;
501 cgltf_float zfar;
502 cgltf_float znear;
503 cgltf_extras extras;
504} cgltf_camera_perspective;
505
506typedef struct cgltf_camera_orthographic {
507 cgltf_float xmag;
508 cgltf_float ymag;
509 cgltf_float zfar;
510 cgltf_float znear;
511 cgltf_extras extras;
512} cgltf_camera_orthographic;
513
514typedef struct cgltf_camera {
515 char* name;
516 cgltf_camera_type type;
517 union {
518 cgltf_camera_perspective perspective;
519 cgltf_camera_orthographic orthographic;
520 } data;
521 cgltf_extras extras;
522 cgltf_size extensions_count;
523 cgltf_extension* extensions;
524} cgltf_camera;
525
526typedef struct cgltf_light {
527 char* name;
528 cgltf_float color[3];
529 cgltf_float intensity;
530 cgltf_light_type type;
531 cgltf_float range;
532 cgltf_float spot_inner_cone_angle;
533 cgltf_float spot_outer_cone_angle;
534} cgltf_light;
535
536struct cgltf_node {
537 char* name;
538 cgltf_node* parent;
539 cgltf_node** children;
540 cgltf_size children_count;
541 cgltf_skin* skin;
542 cgltf_mesh* mesh;
543 cgltf_camera* camera;
544 cgltf_light* light;
545 cgltf_float* weights;
546 cgltf_size weights_count;
547 cgltf_bool has_translation;
548 cgltf_bool has_rotation;
549 cgltf_bool has_scale;
550 cgltf_bool has_matrix;
551 cgltf_float translation[3];
552 cgltf_float rotation[4];
553 cgltf_float scale[3];
554 cgltf_float matrix[16];
555 cgltf_extras extras;
556 cgltf_size extensions_count;
557 cgltf_extension* extensions;
558};
559
560typedef struct cgltf_scene {
561 char* name;
562 cgltf_node** nodes;
563 cgltf_size nodes_count;
564 cgltf_extras extras;
565 cgltf_size extensions_count;
566 cgltf_extension* extensions;
567} cgltf_scene;
568
569typedef struct cgltf_animation_sampler {
570 cgltf_accessor* input;
571 cgltf_accessor* output;
572 cgltf_interpolation_type interpolation;
573 cgltf_extras extras;
574 cgltf_size extensions_count;
575 cgltf_extension* extensions;
576} cgltf_animation_sampler;
577
578typedef struct cgltf_animation_channel {
579 cgltf_animation_sampler* sampler;
580 cgltf_node* target_node;
581 cgltf_animation_path_type target_path;
582 cgltf_extras extras;
583 cgltf_size extensions_count;
584 cgltf_extension* extensions;
585} cgltf_animation_channel;
586
587typedef struct cgltf_animation {
588 char* name;
589 cgltf_animation_sampler* samplers;
590 cgltf_size samplers_count;
591 cgltf_animation_channel* channels;
592 cgltf_size channels_count;
593 cgltf_extras extras;
594 cgltf_size extensions_count;
595 cgltf_extension* extensions;
596} cgltf_animation;
597
598typedef struct cgltf_asset {
599 char* copyright;
600 char* generator;
601 char* version;
602 char* min_version;
603 cgltf_extras extras;
604 cgltf_size extensions_count;
605 cgltf_extension* extensions;
606} cgltf_asset;
607
608typedef struct cgltf_data
609{
610 cgltf_file_type file_type;
611 void* file_data;
612
613 cgltf_asset asset;
614
615 cgltf_mesh* meshes;
616 cgltf_size meshes_count;
617
618 cgltf_material* materials;
619 cgltf_size materials_count;
620
621 cgltf_accessor* accessors;
622 cgltf_size accessors_count;
623
624 cgltf_buffer_view* buffer_views;
625 cgltf_size buffer_views_count;
626
627 cgltf_buffer* buffers;
628 cgltf_size buffers_count;
629
630 cgltf_image* images;
631 cgltf_size images_count;
632
633 cgltf_texture* textures;
634 cgltf_size textures_count;
635
636 cgltf_sampler* samplers;
637 cgltf_size samplers_count;
638
639 cgltf_skin* skins;
640 cgltf_size skins_count;
641
642 cgltf_camera* cameras;
643 cgltf_size cameras_count;
644
645 cgltf_light* lights;
646 cgltf_size lights_count;
647
648 cgltf_node* nodes;
649 cgltf_size nodes_count;
650
651 cgltf_scene* scenes;
652 cgltf_size scenes_count;
653
654 cgltf_scene* scene;
655
656 cgltf_animation* animations;
657 cgltf_size animations_count;
658
659 cgltf_extras extras;
660
661 cgltf_size data_extensions_count;
662 cgltf_extension* data_extensions;
663
664 char** extensions_used;
665 cgltf_size extensions_used_count;
666
667 char** extensions_required;
668 cgltf_size extensions_required_count;
669
670 const char* json;
671 cgltf_size json_size;
672
673 const void* bin;
674 cgltf_size bin_size;
675
676 cgltf_memory_options memory;
677 cgltf_file_options file;
678} cgltf_data;
679
680cgltf_result cgltf_parse(
681 const cgltf_options* options,
682 const void* data,
683 cgltf_size size,
684 cgltf_data** out_data);
685
686cgltf_result cgltf_parse_file(
687 const cgltf_options* options,
688 const char* path,
689 cgltf_data** out_data);
690
691cgltf_result cgltf_load_buffers(
692 const cgltf_options* options,
693 cgltf_data* data,
694 const char* gltf_path);
695
696cgltf_result cgltf_load_buffer_base64(const cgltf_options* options, cgltf_size size, const char* base64, void** out_data);
697
698void cgltf_decode_uri(char* uri);
699
700cgltf_result cgltf_validate(cgltf_data* data);
701
702void cgltf_free(cgltf_data* data);
703
704void cgltf_node_transform_local(const cgltf_node* node, cgltf_float* out_matrix);
705void cgltf_node_transform_world(const cgltf_node* node, cgltf_float* out_matrix);
706
707cgltf_bool cgltf_accessor_read_float(const cgltf_accessor* accessor, cgltf_size index, cgltf_float* out, cgltf_size element_size);
708cgltf_bool cgltf_accessor_read_uint(const cgltf_accessor* accessor, cgltf_size index, cgltf_uint* out, cgltf_size element_size);
709cgltf_size cgltf_accessor_read_index(const cgltf_accessor* accessor, cgltf_size index);
710
711cgltf_size cgltf_num_components(cgltf_type type);
712
713cgltf_size cgltf_accessor_unpack_floats(const cgltf_accessor* accessor, cgltf_float* out, cgltf_size float_count);
714
715cgltf_result cgltf_copy_extras_json(const cgltf_data* data, const cgltf_extras* extras, char* dest, cgltf_size* dest_size);
716
717#ifdef __cplusplus
718}
719#endif
720
721#endif /* #ifndef CGLTF_H_INCLUDED__ */
722
723/*
724 *
725 * Stop now, if you are only interested in the API.
726 * Below, you find the implementation.
727 *
728 */
729
730#if defined(__INTELLISENSE__) || defined(__JETBRAINS_IDE__)
731/* This makes MSVC/CLion intellisense work. */
732#define CGLTF_IMPLEMENTATION
733#endif
734
735#ifdef CGLTF_IMPLEMENTATION
736
737#include <stdint.h> /* For uint8_t, uint32_t */
738#include <string.h> /* For strncpy */
739#include <stdio.h> /* For fopen */
740#include <limits.h> /* For UINT_MAX etc */
741
742#if !defined(CGLTF_MALLOC) || !defined(CGLTF_FREE) || !defined(CGLTF_ATOI) || !defined(CGLTF_ATOF)
743#include <stdlib.h> /* For malloc, free, atoi, atof */
744#endif
745
746/* JSMN_PARENT_LINKS is necessary to make parsing large structures linear in input size */
747#define JSMN_PARENT_LINKS
748
749/* JSMN_STRICT is necessary to reject invalid JSON documents */
750#define JSMN_STRICT
751
752/*
753 * -- jsmn.h start --
754 * Source: https://github.com/zserge/jsmn
755 * License: MIT
756 */
757typedef enum {
758 JSMN_UNDEFINED = 0,
759 JSMN_OBJECT = 1,
760 JSMN_ARRAY = 2,
761 JSMN_STRING = 3,
762 JSMN_PRIMITIVE = 4
763} jsmntype_t;
764enum jsmnerr {
765 /* Not enough tokens were provided */
766 JSMN_ERROR_NOMEM = -1,
767 /* Invalid character inside JSON string */
768 JSMN_ERROR_INVAL = -2,
769 /* The string is not a full JSON packet, more bytes expected */
770 JSMN_ERROR_PART = -3
771};
772typedef struct {
773 jsmntype_t type;
774 int start;
775 int end;
776 int size;
777#ifdef JSMN_PARENT_LINKS
778 int parent;
779#endif
780} jsmntok_t;
781typedef struct {
782 unsigned int pos; /* offset in the JSON string */
783 unsigned int toknext; /* next token to allocate */
784 int toksuper; /* superior token node, e.g parent object or array */
785} jsmn_parser;
786static void jsmn_init(jsmn_parser *parser);
787static int jsmn_parse(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, size_t num_tokens);
788/*
789 * -- jsmn.h end --
790 */
791
792
793static const cgltf_size GlbHeaderSize = 12;
794static const cgltf_size GlbChunkHeaderSize = 8;
795static const uint32_t GlbVersion = 2;
796static const uint32_t GlbMagic = 0x46546C67;
797static const uint32_t GlbMagicJsonChunk = 0x4E4F534A;
798static const uint32_t GlbMagicBinChunk = 0x004E4942;
799
800#ifndef CGLTF_MALLOC
801#define CGLTF_MALLOC(size) malloc(size)
802#endif
803#ifndef CGLTF_FREE
804#define CGLTF_FREE(ptr) free(ptr)
805#endif
806#ifndef CGLTF_ATOI
807#define CGLTF_ATOI(str) atoi(str)
808#endif
809#ifndef CGLTF_ATOF
810#define CGLTF_ATOF(str) atof(str)
811#endif
812
813static void* cgltf_default_alloc(void* user, cgltf_size size)
814{
815 (void)user;
816 return CGLTF_MALLOC(size);
817}
818
819static void cgltf_default_free(void* user, void* ptr)
820{
821 (void)user;
822 CGLTF_FREE(ptr);
823}
824
825static void* cgltf_calloc(cgltf_options* options, size_t element_size, cgltf_size count)
826{
827 if (SIZE_MAX / element_size < count)
828 {
829 return NULL;
830 }
831 void* result = options->memory.alloc(options->memory.user_data, element_size * count);
832 if (!result)
833 {
834 return NULL;
835 }
836 memset(result, 0, element_size * count);
837 return result;
838}
839
840static cgltf_result cgltf_default_file_read(const struct cgltf_memory_options* memory_options, const struct cgltf_file_options* file_options, const char* path, cgltf_size* size, void** data)
841{
842 (void)file_options;
843 void* (*memory_alloc)(void*, cgltf_size) = memory_options->alloc ? memory_options->alloc : &cgltf_default_alloc;
844 void (*memory_free)(void*, void*) = memory_options->free ? memory_options->free : &cgltf_default_free;
845
846 FILE* file = fopen(path, "rb");
847 if (!file)
848 {
849 return cgltf_result_file_not_found;
850 }
851
852 cgltf_size file_size = size ? *size : 0;
853
854 if (file_size == 0)
855 {
856 fseek(file, 0, SEEK_END);
857
858 long length = ftell(file);
859 if (length < 0)
860 {
861 fclose(file);
862 return cgltf_result_io_error;
863 }
864
865 fseek(file, 0, SEEK_SET);
866 file_size = (cgltf_size)length;
867 }
868
869 char* file_data = (char*)memory_alloc(memory_options->user_data, file_size);
870 if (!file_data)
871 {
872 fclose(file);
873 return cgltf_result_out_of_memory;
874 }
875
876 cgltf_size read_size = fread(file_data, 1, file_size, file);
877
878 fclose(file);
879
880 if (read_size != file_size)
881 {
882 memory_free(memory_options->user_data, file_data);
883 return cgltf_result_io_error;
884 }
885
886 if (size)
887 {
888 *size = file_size;
889 }
890 if (data)
891 {
892 *data = file_data;
893 }
894
895 return cgltf_result_success;
896}
897
898static void cgltf_default_file_release(const struct cgltf_memory_options* memory_options, const struct cgltf_file_options* file_options, void* data)
899{
900 (void)file_options;
901 void (*memfree)(void*, void*) = memory_options->free ? memory_options->free : &cgltf_default_free;
902 memfree(memory_options->user_data, data);
903}
904
905static cgltf_result cgltf_parse_json(cgltf_options* options, const uint8_t* json_chunk, cgltf_size size, cgltf_data** out_data);
906
907cgltf_result cgltf_parse(const cgltf_options* options, const void* data, cgltf_size size, cgltf_data** out_data)
908{
909 if (size < GlbHeaderSize)
910 {
911 return cgltf_result_data_too_short;
912 }
913
914 if (options == NULL)
915 {
916 return cgltf_result_invalid_options;
917 }
918
919 cgltf_options fixed_options = *options;
920 if (fixed_options.memory.alloc == NULL)
921 {
922 fixed_options.memory.alloc = &cgltf_default_alloc;
923 }
924 if (fixed_options.memory.free == NULL)
925 {
926 fixed_options.memory.free = &cgltf_default_free;
927 }
928
929 uint32_t tmp;
930 // Magic
931 memcpy(&tmp, data, 4);
932 if (tmp != GlbMagic)
933 {
934 if (fixed_options.type == cgltf_file_type_invalid)
935 {
936 fixed_options.type = cgltf_file_type_gltf;
937 }
938 else if (fixed_options.type == cgltf_file_type_glb)
939 {
940 return cgltf_result_unknown_format;
941 }
942 }
943
944 if (fixed_options.type == cgltf_file_type_gltf)
945 {
946 cgltf_result json_result = cgltf_parse_json(&fixed_options, (const uint8_t*)data, size, out_data);
947 if (json_result != cgltf_result_success)
948 {
949 return json_result;
950 }
951
952 (*out_data)->file_type = cgltf_file_type_gltf;
953
954 return cgltf_result_success;
955 }
956
957 const uint8_t* ptr = (const uint8_t*)data;
958 // Version
959 memcpy(&tmp, ptr + 4, 4);
960 uint32_t version = tmp;
961 if (version != GlbVersion)
962 {
963 return version < GlbVersion ? cgltf_result_legacy_gltf : cgltf_result_unknown_format;
964 }
965
966 // Total length
967 memcpy(&tmp, ptr + 8, 4);
968 if (tmp > size)
969 {
970 return cgltf_result_data_too_short;
971 }
972
973 const uint8_t* json_chunk = ptr + GlbHeaderSize;
974
975 if (GlbHeaderSize + GlbChunkHeaderSize > size)
976 {
977 return cgltf_result_data_too_short;
978 }
979
980 // JSON chunk: length
981 uint32_t json_length;
982 memcpy(&json_length, json_chunk, 4);
983 if (GlbHeaderSize + GlbChunkHeaderSize + json_length > size)
984 {
985 return cgltf_result_data_too_short;
986 }
987
988 // JSON chunk: magic
989 memcpy(&tmp, json_chunk + 4, 4);
990 if (tmp != GlbMagicJsonChunk)
991 {
992 return cgltf_result_unknown_format;
993 }
994
995 json_chunk += GlbChunkHeaderSize;
996
997 const void* bin = 0;
998 cgltf_size bin_size = 0;
999
1000 if (GlbHeaderSize + GlbChunkHeaderSize + json_length + GlbChunkHeaderSize <= size)
1001 {
1002 // We can read another chunk
1003 const uint8_t* bin_chunk = json_chunk + json_length;
1004
1005 // Bin chunk: length
1006 uint32_t bin_length;
1007 memcpy(&bin_length, bin_chunk, 4);
1008 if (GlbHeaderSize + GlbChunkHeaderSize + json_length + GlbChunkHeaderSize + bin_length > size)
1009 {
1010 return cgltf_result_data_too_short;
1011 }
1012
1013 // Bin chunk: magic
1014 memcpy(&tmp, bin_chunk + 4, 4);
1015 if (tmp != GlbMagicBinChunk)
1016 {
1017 return cgltf_result_unknown_format;
1018 }
1019
1020 bin_chunk += GlbChunkHeaderSize;
1021
1022 bin = bin_chunk;
1023 bin_size = bin_length;
1024 }
1025
1026 cgltf_result json_result = cgltf_parse_json(&fixed_options, json_chunk, json_length, out_data);
1027 if (json_result != cgltf_result_success)
1028 {
1029 return json_result;
1030 }
1031
1032 (*out_data)->file_type = cgltf_file_type_glb;
1033 (*out_data)->bin = bin;
1034 (*out_data)->bin_size = bin_size;
1035
1036 return cgltf_result_success;
1037}
1038
1039cgltf_result cgltf_parse_file(const cgltf_options* options, const char* path, cgltf_data** out_data)
1040{
1041 if (options == NULL)
1042 {
1043 return cgltf_result_invalid_options;
1044 }
1045
1046 void (*memory_free)(void*, void*) = options->memory.free ? options->memory.free : &cgltf_default_free;
1047 cgltf_result (*file_read)(const struct cgltf_memory_options*, const struct cgltf_file_options*, const char*, cgltf_size*, void**) = options->file.read ? options->file.read : &cgltf_default_file_read;
1048
1049 void* file_data = NULL;
1050 cgltf_size file_size = 0;
1051 cgltf_result result = file_read(&options->memory, &options->file, path, &file_size, &file_data);
1052 if (result != cgltf_result_success)
1053 {
1054 return result;
1055 }
1056
1057 result = cgltf_parse(options, file_data, file_size, out_data);
1058
1059 if (result != cgltf_result_success)
1060 {
1061 memory_free(options->memory.user_data, file_data);
1062 return result;
1063 }
1064
1065 (*out_data)->file_data = file_data;
1066
1067 return cgltf_result_success;
1068}
1069
1070static void cgltf_combine_paths(char* path, const char* base, const char* uri)
1071{
1072 const char* s0 = strrchr(base, '/');
1073 const char* s1 = strrchr(base, '\\');
1074 const char* slash = s0 ? (s1 && s1 > s0 ? s1 : s0) : s1;
1075
1076 if (slash)
1077 {
1078 size_t prefix = slash - base + 1;
1079
1080 strncpy(path, base, prefix);
1081 strcpy(path + prefix, uri);
1082 }
1083 else
1084 {
1085 strcpy(path, uri);
1086 }
1087}
1088
1089static cgltf_result cgltf_load_buffer_file(const cgltf_options* options, cgltf_size size, const char* uri, const char* gltf_path, void** out_data)
1090{
1091 void* (*memory_alloc)(void*, cgltf_size) = options->memory.alloc ? options->memory.alloc : &cgltf_default_alloc;
1092 void (*memory_free)(void*, void*) = options->memory.free ? options->memory.free : &cgltf_default_free;
1093 cgltf_result (*file_read)(const struct cgltf_memory_options*, const struct cgltf_file_options*, const char*, cgltf_size*, void**) = options->file.read ? options->file.read : &cgltf_default_file_read;
1094
1095 char* path = (char*)memory_alloc(options->memory.user_data, strlen(uri) + strlen(gltf_path) + 1);
1096 if (!path)
1097 {
1098 return cgltf_result_out_of_memory;
1099 }
1100
1101 cgltf_combine_paths(path, gltf_path, uri);
1102
1103 // after combining, the tail of the resulting path is a uri; decode_uri converts it into path
1104 cgltf_decode_uri(path + strlen(path) - strlen(uri));
1105
1106 void* file_data = NULL;
1107 cgltf_result result = file_read(&options->memory, &options->file, path, &size, &file_data);
1108
1109 memory_free(options->memory.user_data, path);
1110
1111 *out_data = (result == cgltf_result_success) ? file_data : NULL;
1112
1113 return result;
1114}
1115
1116cgltf_result cgltf_load_buffer_base64(const cgltf_options* options, cgltf_size size, const char* base64, void** out_data)
1117{
1118 void* (*memory_alloc)(void*, cgltf_size) = options->memory.alloc ? options->memory.alloc : &cgltf_default_alloc;
1119 void (*memory_free)(void*, void*) = options->memory.free ? options->memory.free : &cgltf_default_free;
1120
1121 unsigned char* data = (unsigned char*)memory_alloc(options->memory.user_data, size);
1122 if (!data)
1123 {
1124 return cgltf_result_out_of_memory;
1125 }
1126
1127 unsigned int buffer = 0;
1128 unsigned int buffer_bits = 0;
1129
1130 for (cgltf_size i = 0; i < size; ++i)
1131 {
1132 while (buffer_bits < 8)
1133 {
1134 char ch = *base64++;
1135
1136 int index =
1137 (unsigned)(ch - 'A') < 26 ? (ch - 'A') :
1138 (unsigned)(ch - 'a') < 26 ? (ch - 'a') + 26 :
1139 (unsigned)(ch - '0') < 10 ? (ch - '0') + 52 :
1140 ch == '+' ? 62 :
1141 ch == '/' ? 63 :
1142 -1;
1143
1144 if (index < 0)
1145 {
1146 memory_free(options->memory.user_data, data);
1147 return cgltf_result_io_error;
1148 }
1149
1150 buffer = (buffer << 6) | index;
1151 buffer_bits += 6;
1152 }
1153
1154 data[i] = (unsigned char)(buffer >> (buffer_bits - 8));
1155 buffer_bits -= 8;
1156 }
1157
1158 *out_data = data;
1159
1160 return cgltf_result_success;
1161}
1162
1163static int cgltf_unhex(char ch)
1164{
1165 return
1166 (unsigned)(ch - '0') < 10 ? (ch - '0') :
1167 (unsigned)(ch - 'A') < 6 ? (ch - 'A') + 10 :
1168 (unsigned)(ch - 'a') < 6 ? (ch - 'a') + 10 :
1169 -1;
1170}
1171
1172void cgltf_decode_uri(char* uri)
1173{
1174 char* write = uri;
1175 char* i = uri;
1176
1177 while (*i)
1178 {
1179 if (*i == '%')
1180 {
1181 int ch1 = cgltf_unhex(i[1]);
1182
1183 if (ch1 >= 0)
1184 {
1185 int ch2 = cgltf_unhex(i[2]);
1186
1187 if (ch2 >= 0)
1188 {
1189 *write++ = (char)(ch1 * 16 + ch2);
1190 i += 3;
1191 continue;
1192 }
1193 }
1194 }
1195
1196 *write++ = *i++;
1197 }
1198
1199 *write = 0;
1200}
1201
1202cgltf_result cgltf_load_buffers(const cgltf_options* options, cgltf_data* data, const char* gltf_path)
1203{
1204 if (options == NULL)
1205 {
1206 return cgltf_result_invalid_options;
1207 }
1208
1209 if (data->buffers_count && data->buffers[0].data == NULL && data->buffers[0].uri == NULL && data->bin)
1210 {
1211 if (data->bin_size < data->buffers[0].size)
1212 {
1213 return cgltf_result_data_too_short;
1214 }
1215
1216 data->buffers[0].data = (void*)data->bin;
1217 }
1218
1219 for (cgltf_size i = 0; i < data->buffers_count; ++i)
1220 {
1221 if (data->buffers[i].data)
1222 {
1223 continue;
1224 }
1225
1226 const char* uri = data->buffers[i].uri;
1227
1228 if (uri == NULL)
1229 {
1230 continue;
1231 }
1232
1233 if (strncmp(uri, "data:", 5) == 0)
1234 {
1235 const char* comma = strchr(uri, ',');
1236
1237 if (comma && comma - uri >= 7 && strncmp(comma - 7, ";base64", 7) == 0)
1238 {
1239 cgltf_result res = cgltf_load_buffer_base64(options, data->buffers[i].size, comma + 1, &data->buffers[i].data);
1240
1241 if (res != cgltf_result_success)
1242 {
1243 return res;
1244 }
1245 }
1246 else
1247 {
1248 return cgltf_result_unknown_format;
1249 }
1250 }
1251 else if (strstr(uri, "://") == NULL && gltf_path)
1252 {
1253 cgltf_result res = cgltf_load_buffer_file(options, data->buffers[i].size, uri, gltf_path, &data->buffers[i].data);
1254
1255 if (res != cgltf_result_success)
1256 {
1257 return res;
1258 }
1259 }
1260 else
1261 {
1262 return cgltf_result_unknown_format;
1263 }
1264 }
1265
1266 return cgltf_result_success;
1267}
1268
1269static cgltf_size cgltf_calc_size(cgltf_type type, cgltf_component_type component_type);
1270
1271static cgltf_size cgltf_calc_index_bound(cgltf_buffer_view* buffer_view, cgltf_size offset, cgltf_component_type component_type, cgltf_size count)
1272{
1273 char* data = (char*)buffer_view->buffer->data + offset + buffer_view->offset;
1274 cgltf_size bound = 0;
1275
1276 switch (component_type)
1277 {
1278 case cgltf_component_type_r_8u:
1279 for (size_t i = 0; i < count; ++i)
1280 {
1281 cgltf_size v = ((unsigned char*)data)[i];
1282 bound = bound > v ? bound : v;
1283 }
1284 break;
1285
1286 case cgltf_component_type_r_16u:
1287 for (size_t i = 0; i < count; ++i)
1288 {
1289 cgltf_size v = ((unsigned short*)data)[i];
1290 bound = bound > v ? bound : v;
1291 }
1292 break;
1293
1294 case cgltf_component_type_r_32u:
1295 for (size_t i = 0; i < count; ++i)
1296 {
1297 cgltf_size v = ((unsigned int*)data)[i];
1298 bound = bound > v ? bound : v;
1299 }
1300 break;
1301
1302 default:
1303 ;
1304 }
1305
1306 return bound;
1307}
1308
1309cgltf_result cgltf_validate(cgltf_data* data)
1310{
1311 for (cgltf_size i = 0; i < data->accessors_count; ++i)
1312 {
1313 cgltf_accessor* accessor = &data->accessors[i];
1314
1315 cgltf_size element_size = cgltf_calc_size(accessor->type, accessor->component_type);
1316
1317 if (accessor->buffer_view)
1318 {
1319 cgltf_size req_size = accessor->offset + accessor->stride * (accessor->count - 1) + element_size;
1320
1321 if (accessor->buffer_view->size < req_size)
1322 {
1323 return cgltf_result_data_too_short;
1324 }
1325 }
1326
1327 if (accessor->is_sparse)
1328 {
1329 cgltf_accessor_sparse* sparse = &accessor->sparse;
1330
1331 cgltf_size indices_component_size = cgltf_calc_size(cgltf_type_scalar, sparse->indices_component_type);
1332 cgltf_size indices_req_size = sparse->indices_byte_offset + indices_component_size * sparse->count;
1333 cgltf_size values_req_size = sparse->values_byte_offset + element_size * sparse->count;
1334
1335 if (sparse->indices_buffer_view->size < indices_req_size ||
1336 sparse->values_buffer_view->size < values_req_size)
1337 {
1338 return cgltf_result_data_too_short;
1339 }
1340
1341 if (sparse->indices_component_type != cgltf_component_type_r_8u &&
1342 sparse->indices_component_type != cgltf_component_type_r_16u &&
1343 sparse->indices_component_type != cgltf_component_type_r_32u)
1344 {
1345 return cgltf_result_invalid_gltf;
1346 }
1347
1348 if (sparse->indices_buffer_view->buffer->data)
1349 {
1350 cgltf_size index_bound = cgltf_calc_index_bound(sparse->indices_buffer_view, sparse->indices_byte_offset, sparse->indices_component_type, sparse->count);
1351
1352 if (index_bound >= accessor->count)
1353 {
1354 return cgltf_result_data_too_short;
1355 }
1356 }
1357 }
1358 }
1359
1360 for (cgltf_size i = 0; i < data->buffer_views_count; ++i)
1361 {
1362 cgltf_size req_size = data->buffer_views[i].offset + data->buffer_views[i].size;
1363
1364 if (data->buffer_views[i].buffer && data->buffer_views[i].buffer->size < req_size)
1365 {
1366 return cgltf_result_data_too_short;
1367 }
1368 }
1369
1370 for (cgltf_size i = 0; i < data->meshes_count; ++i)
1371 {
1372 if (data->meshes[i].weights)
1373 {
1374 if (data->meshes[i].primitives_count && data->meshes[i].primitives[0].targets_count != data->meshes[i].weights_count)
1375 {
1376 return cgltf_result_invalid_gltf;
1377 }
1378 }
1379
1380 if (data->meshes[i].target_names)
1381 {
1382 if (data->meshes[i].primitives_count && data->meshes[i].primitives[0].targets_count != data->meshes[i].target_names_count)
1383 {
1384 return cgltf_result_invalid_gltf;
1385 }
1386 }
1387
1388 for (cgltf_size j = 0; j < data->meshes[i].primitives_count; ++j)
1389 {
1390 if (data->meshes[i].primitives[j].targets_count != data->meshes[i].primitives[0].targets_count)
1391 {
1392 return cgltf_result_invalid_gltf;
1393 }
1394
1395 if (data->meshes[i].primitives[j].attributes_count)
1396 {
1397 cgltf_accessor* first = data->meshes[i].primitives[j].attributes[0].data;
1398
1399 for (cgltf_size k = 0; k < data->meshes[i].primitives[j].attributes_count; ++k)
1400 {
1401 if (data->meshes[i].primitives[j].attributes[k].data->count != first->count)
1402 {
1403 return cgltf_result_invalid_gltf;
1404 }
1405 }
1406
1407 for (cgltf_size k = 0; k < data->meshes[i].primitives[j].targets_count; ++k)
1408 {
1409 for (cgltf_size m = 0; m < data->meshes[i].primitives[j].targets[k].attributes_count; ++m)
1410 {
1411 if (data->meshes[i].primitives[j].targets[k].attributes[m].data->count != first->count)
1412 {
1413 return cgltf_result_invalid_gltf;
1414 }
1415 }
1416 }
1417
1418 cgltf_accessor* indices = data->meshes[i].primitives[j].indices;
1419
1420 if (indices &&
1421 indices->component_type != cgltf_component_type_r_8u &&
1422 indices->component_type != cgltf_component_type_r_16u &&
1423 indices->component_type != cgltf_component_type_r_32u)
1424 {
1425 return cgltf_result_invalid_gltf;
1426 }
1427
1428 if (indices && indices->buffer_view && indices->buffer_view->buffer->data)
1429 {
1430 cgltf_size index_bound = cgltf_calc_index_bound(indices->buffer_view, indices->offset, indices->component_type, indices->count);
1431
1432 if (index_bound >= first->count)
1433 {
1434 return cgltf_result_data_too_short;
1435 }
1436 }
1437 }
1438 }
1439 }
1440
1441 for (cgltf_size i = 0; i < data->nodes_count; ++i)
1442 {
1443 if (data->nodes[i].weights && data->nodes[i].mesh)
1444 {
1445 if (data->nodes[i].mesh->primitives_count && data->nodes[i].mesh->primitives[0].targets_count != data->nodes[i].weights_count)
1446 {
1447 return cgltf_result_invalid_gltf;
1448 }
1449 }
1450 }
1451
1452 for (cgltf_size i = 0; i < data->nodes_count; ++i)
1453 {
1454 cgltf_node* p1 = data->nodes[i].parent;
1455 cgltf_node* p2 = p1 ? p1->parent : NULL;
1456
1457 while (p1 && p2)
1458 {
1459 if (p1 == p2)
1460 {
1461 return cgltf_result_invalid_gltf;
1462 }
1463
1464 p1 = p1->parent;
1465 p2 = p2->parent ? p2->parent->parent : NULL;
1466 }
1467 }
1468
1469 for (cgltf_size i = 0; i < data->scenes_count; ++i)
1470 {
1471 for (cgltf_size j = 0; j < data->scenes[i].nodes_count; ++j)
1472 {
1473 if (data->scenes[i].nodes[j]->parent)
1474 {
1475 return cgltf_result_invalid_gltf;
1476 }
1477 }
1478 }
1479
1480 for (cgltf_size i = 0; i < data->animations_count; ++i)
1481 {
1482 for (cgltf_size j = 0; j < data->animations[i].channels_count; ++j)
1483 {
1484 cgltf_animation_channel* channel = &data->animations[i].channels[j];
1485
1486 if (!channel->target_node)
1487 {
1488 continue;
1489 }
1490
1491 cgltf_size components = 1;
1492
1493 if (channel->target_path == cgltf_animation_path_type_weights)
1494 {
1495 if (!channel->target_node->mesh || !channel->target_node->mesh->primitives_count)
1496 {
1497 return cgltf_result_invalid_gltf;
1498 }
1499
1500 components = channel->target_node->mesh->primitives[0].targets_count;
1501 }
1502
1503 cgltf_size values = channel->sampler->interpolation == cgltf_interpolation_type_cubic_spline ? 3 : 1;
1504
1505 if (channel->sampler->input->count * components * values != channel->sampler->output->count)
1506 {
1507 return cgltf_result_data_too_short;
1508 }
1509 }
1510 }
1511
1512 return cgltf_result_success;
1513}
1514
1515cgltf_result cgltf_copy_extras_json(const cgltf_data* data, const cgltf_extras* extras, char* dest, cgltf_size* dest_size)
1516{
1517 cgltf_size json_size = extras->end_offset - extras->start_offset;
1518
1519 if (!dest)
1520 {
1521 if (dest_size)
1522 {
1523 *dest_size = json_size + 1;
1524 return cgltf_result_success;
1525 }
1526 return cgltf_result_invalid_options;
1527 }
1528
1529 if (*dest_size + 1 < json_size)
1530 {
1531 strncpy(dest, data->json + extras->start_offset, *dest_size - 1);
1532 dest[*dest_size - 1] = 0;
1533 }
1534 else
1535 {
1536 strncpy(dest, data->json + extras->start_offset, json_size);
1537 dest[json_size] = 0;
1538 }
1539
1540 return cgltf_result_success;
1541}
1542
1543void cgltf_free_extensions(cgltf_data* data, cgltf_extension* extensions, cgltf_size extensions_count)
1544{
1545 for (cgltf_size i = 0; i < extensions_count; ++i)
1546 {
1547 data->memory.free(data->memory.user_data, extensions[i].name);
1548 data->memory.free(data->memory.user_data, extensions[i].data);
1549 }
1550 data->memory.free(data->memory.user_data, extensions);
1551}
1552
1553void cgltf_free(cgltf_data* data)
1554{
1555 if (!data)
1556 {
1557 return;
1558 }
1559
1560 void (*file_release)(const struct cgltf_memory_options*, const struct cgltf_file_options*, void* data) = data->file.release ? data->file.release : cgltf_default_file_release;
1561
1562 data->memory.free(data->memory.user_data, data->asset.copyright);
1563 data->memory.free(data->memory.user_data, data->asset.generator);
1564 data->memory.free(data->memory.user_data, data->asset.version);
1565 data->memory.free(data->memory.user_data, data->asset.min_version);
1566
1567 cgltf_free_extensions(data, data->asset.extensions, data->asset.extensions_count);
1568
1569 for (cgltf_size i = 0; i < data->accessors_count; ++i)
1570 {
1571 if(data->accessors[i].is_sparse)
1572 {
1573 cgltf_free_extensions(data, data->accessors[i].sparse.extensions, data->accessors[i].sparse.extensions_count);
1574 cgltf_free_extensions(data, data->accessors[i].sparse.indices_extensions, data->accessors[i].sparse.indices_extensions_count);
1575 cgltf_free_extensions(data, data->accessors[i].sparse.values_extensions, data->accessors[i].sparse.values_extensions_count);
1576 }
1577 cgltf_free_extensions(data, data->accessors[i].extensions, data->accessors[i].extensions_count);
1578 }
1579 data->memory.free(data->memory.user_data, data->accessors);
1580
1581 for (cgltf_size i = 0; i < data->buffer_views_count; ++i)
1582 {
1583 cgltf_free_extensions(data, data->buffer_views[i].extensions, data->buffer_views[i].extensions_count);
1584 }
1585 data->memory.free(data->memory.user_data, data->buffer_views);
1586
1587 for (cgltf_size i = 0; i < data->buffers_count; ++i)
1588 {
1589 if (data->buffers[i].data != data->bin)
1590 {
1591 file_release(&data->memory, &data->file, data->buffers[i].data);
1592 }
1593 data->memory.free(data->memory.user_data, data->buffers[i].uri);
1594
1595 cgltf_free_extensions(data, data->buffers[i].extensions, data->buffers[i].extensions_count);
1596 }
1597
1598 data->memory.free(data->memory.user_data, data->buffers);
1599
1600 for (cgltf_size i = 0; i < data->meshes_count; ++i)
1601 {
1602 data->memory.free(data->memory.user_data, data->meshes[i].name);
1603
1604 for (cgltf_size j = 0; j < data->meshes[i].primitives_count; ++j)
1605 {
1606 for (cgltf_size k = 0; k < data->meshes[i].primitives[j].attributes_count; ++k)
1607 {
1608 data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].attributes[k].name);
1609 }
1610
1611 data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].attributes);
1612
1613 for (cgltf_size k = 0; k < data->meshes[i].primitives[j].targets_count; ++k)
1614 {
1615 for (cgltf_size m = 0; m < data->meshes[i].primitives[j].targets[k].attributes_count; ++m)
1616 {
1617 data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].targets[k].attributes[m].name);
1618 }
1619
1620 data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].targets[k].attributes);
1621 }
1622
1623 data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].targets);
1624
1625 if (data->meshes[i].primitives[j].has_draco_mesh_compression)
1626 {
1627 for (cgltf_size k = 0; k < data->meshes[i].primitives[j].draco_mesh_compression.attributes_count; ++k)
1628 {
1629 data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].draco_mesh_compression.attributes[k].name);
1630 }
1631
1632 data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].draco_mesh_compression.attributes);
1633 }
1634
1635 cgltf_free_extensions(data, data->meshes[i].primitives[j].extensions, data->meshes[i].primitives[j].extensions_count);
1636 }
1637
1638 data->memory.free(data->memory.user_data, data->meshes[i].primitives);
1639 data->memory.free(data->memory.user_data, data->meshes[i].weights);
1640
1641 for (cgltf_size j = 0; j < data->meshes[i].target_names_count; ++j)
1642 {
1643 data->memory.free(data->memory.user_data, data->meshes[i].target_names[j]);
1644 }
1645
1646 cgltf_free_extensions(data, data->meshes[i].extensions, data->meshes[i].extensions_count);
1647
1648 data->memory.free(data->memory.user_data, data->meshes[i].target_names);
1649 }
1650
1651 data->memory.free(data->memory.user_data, data->meshes);
1652
1653 for (cgltf_size i = 0; i < data->materials_count; ++i)
1654 {
1655 data->memory.free(data->memory.user_data, data->materials[i].name);
1656
1657 if(data->materials[i].has_pbr_metallic_roughness)
1658 {
1659 cgltf_free_extensions(data, data->materials[i].pbr_metallic_roughness.metallic_roughness_texture.extensions, data->materials[i].pbr_metallic_roughness.metallic_roughness_texture.extensions_count);
1660 cgltf_free_extensions(data, data->materials[i].pbr_metallic_roughness.base_color_texture.extensions, data->materials[i].pbr_metallic_roughness.base_color_texture.extensions_count);
1661 }
1662 if(data->materials[i].has_pbr_specular_glossiness)
1663 {
1664 cgltf_free_extensions(data, data->materials[i].pbr_specular_glossiness.diffuse_texture.extensions, data->materials[i].pbr_specular_glossiness.diffuse_texture.extensions_count);
1665 cgltf_free_extensions(data, data->materials[i].pbr_specular_glossiness.specular_glossiness_texture.extensions, data->materials[i].pbr_specular_glossiness.specular_glossiness_texture.extensions_count);
1666 }
1667 if(data->materials[i].has_clearcoat)
1668 {
1669 cgltf_free_extensions(data, data->materials[i].clearcoat.clearcoat_texture.extensions, data->materials[i].clearcoat.clearcoat_texture.extensions_count);
1670 cgltf_free_extensions(data, data->materials[i].clearcoat.clearcoat_roughness_texture.extensions, data->materials[i].clearcoat.clearcoat_roughness_texture.extensions_count);
1671 cgltf_free_extensions(data, data->materials[i].clearcoat.clearcoat_normal_texture.extensions, data->materials[i].clearcoat.clearcoat_normal_texture.extensions_count);
1672 }
1673 if(data->materials[i].has_specular)
1674 {
1675 cgltf_free_extensions(data, data->materials[i].specular.specular_texture.extensions, data->materials[i].specular.specular_texture.extensions_count);
1676 }
1677 if(data->materials[i].has_transmission)
1678 {
1679 cgltf_free_extensions(data, data->materials[i].transmission.transmission_texture.extensions, data->materials[i].transmission.transmission_texture.extensions_count);
1680 }
1681
1682 cgltf_free_extensions(data, data->materials[i].normal_texture.extensions, data->materials[i].normal_texture.extensions_count);
1683 cgltf_free_extensions(data, data->materials[i].occlusion_texture.extensions, data->materials[i].occlusion_texture.extensions_count);
1684 cgltf_free_extensions(data, data->materials[i].emissive_texture.extensions, data->materials[i].emissive_texture.extensions_count);
1685
1686 cgltf_free_extensions(data, data->materials[i].extensions, data->materials[i].extensions_count);
1687 }
1688
1689 data->memory.free(data->memory.user_data, data->materials);
1690
1691 for (cgltf_size i = 0; i < data->images_count; ++i)
1692 {
1693 data->memory.free(data->memory.user_data, data->images[i].name);
1694 data->memory.free(data->memory.user_data, data->images[i].uri);
1695 data->memory.free(data->memory.user_data, data->images[i].mime_type);
1696
1697 cgltf_free_extensions(data, data->images[i].extensions, data->images[i].extensions_count);
1698 }
1699
1700 data->memory.free(data->memory.user_data, data->images);
1701
1702 for (cgltf_size i = 0; i < data->textures_count; ++i)
1703 {
1704 data->memory.free(data->memory.user_data, data->textures[i].name);
1705 cgltf_free_extensions(data, data->textures[i].extensions, data->textures[i].extensions_count);
1706 }
1707
1708 data->memory.free(data->memory.user_data, data->textures);
1709
1710 for (cgltf_size i = 0; i < data->samplers_count; ++i)
1711 {
1712 cgltf_free_extensions(data, data->samplers[i].extensions, data->samplers[i].extensions_count);
1713 }
1714
1715 data->memory.free(data->memory.user_data, data->samplers);
1716
1717 for (cgltf_size i = 0; i < data->skins_count; ++i)
1718 {
1719 data->memory.free(data->memory.user_data, data->skins[i].name);
1720 data->memory.free(data->memory.user_data, data->skins[i].joints);
1721
1722 cgltf_free_extensions(data, data->skins[i].extensions, data->skins[i].extensions_count);
1723 }
1724
1725 data->memory.free(data->memory.user_data, data->skins);
1726
1727 for (cgltf_size i = 0; i < data->cameras_count; ++i)
1728 {
1729 data->memory.free(data->memory.user_data, data->cameras[i].name);
1730 cgltf_free_extensions(data, data->cameras[i].extensions, data->cameras[i].extensions_count);
1731 }
1732
1733 data->memory.free(data->memory.user_data, data->cameras);
1734
1735 for (cgltf_size i = 0; i < data->lights_count; ++i)
1736 {
1737 data->memory.free(data->memory.user_data, data->lights[i].name);
1738 }
1739
1740 data->memory.free(data->memory.user_data, data->lights);
1741
1742 for (cgltf_size i = 0; i < data->nodes_count; ++i)
1743 {
1744 data->memory.free(data->memory.user_data, data->nodes[i].name);
1745 data->memory.free(data->memory.user_data, data->nodes[i].children);
1746 data->memory.free(data->memory.user_data, data->nodes[i].weights);
1747 cgltf_free_extensions(data, data->nodes[i].extensions, data->nodes[i].extensions_count);
1748 }
1749
1750 data->memory.free(data->memory.user_data, data->nodes);
1751
1752 for (cgltf_size i = 0; i < data->scenes_count; ++i)
1753 {
1754 data->memory.free(data->memory.user_data, data->scenes[i].name);
1755 data->memory.free(data->memory.user_data, data->scenes[i].nodes);
1756
1757 cgltf_free_extensions(data, data->scenes[i].extensions, data->scenes[i].extensions_count);
1758 }
1759
1760 data->memory.free(data->memory.user_data, data->scenes);
1761
1762 for (cgltf_size i = 0; i < data->animations_count; ++i)
1763 {
1764 data->memory.free(data->memory.user_data, data->animations[i].name);
1765 for (cgltf_size j = 0; j < data->animations[i].samplers_count; ++j)
1766 {
1767 cgltf_free_extensions(data, data->animations[i].samplers[j].extensions, data->animations[i].samplers[j].extensions_count);
1768 }
1769 data->memory.free(data->memory.user_data, data->animations[i].samplers);
1770
1771 for (cgltf_size j = 0; j < data->animations[i].channels_count; ++j)
1772 {
1773 cgltf_free_extensions(data, data->animations[i].channels[j].extensions, data->animations[i].channels[j].extensions_count);
1774 }
1775 data->memory.free(data->memory.user_data, data->animations[i].channels);
1776
1777 cgltf_free_extensions(data, data->animations[i].extensions, data->animations[i].extensions_count);
1778 }
1779
1780 data->memory.free(data->memory.user_data, data->animations);
1781
1782 cgltf_free_extensions(data, data->data_extensions, data->data_extensions_count);
1783
1784 for (cgltf_size i = 0; i < data->extensions_used_count; ++i)
1785 {
1786 data->memory.free(data->memory.user_data, data->extensions_used[i]);
1787 }
1788
1789 data->memory.free(data->memory.user_data, data->extensions_used);
1790
1791 for (cgltf_size i = 0; i < data->extensions_required_count; ++i)
1792 {
1793 data->memory.free(data->memory.user_data, data->extensions_required[i]);
1794 }
1795
1796 data->memory.free(data->memory.user_data, data->extensions_required);
1797
1798 file_release(&data->memory, &data->file, data->file_data);
1799
1800 data->memory.free(data->memory.user_data, data);
1801}
1802
1803void cgltf_node_transform_local(const cgltf_node* node, cgltf_float* out_matrix)
1804{
1805 cgltf_float* lm = out_matrix;
1806
1807 if (node->has_matrix)
1808 {
1809 memcpy(lm, node->matrix, sizeof(float) * 16);
1810 }
1811 else
1812 {
1813 float tx = node->translation[0];
1814 float ty = node->translation[1];
1815 float tz = node->translation[2];
1816
1817 float qx = node->rotation[0];
1818 float qy = node->rotation[1];
1819 float qz = node->rotation[2];
1820 float qw = node->rotation[3];
1821
1822 float sx = node->scale[0];
1823 float sy = node->scale[1];
1824 float sz = node->scale[2];
1825
1826 lm[0] = (1 - 2 * qy*qy - 2 * qz*qz) * sx;
1827 lm[1] = (2 * qx*qy + 2 * qz*qw) * sx;
1828 lm[2] = (2 * qx*qz - 2 * qy*qw) * sx;
1829 lm[3] = 0.f;
1830
1831 lm[4] = (2 * qx*qy - 2 * qz*qw) * sy;
1832 lm[5] = (1 - 2 * qx*qx - 2 * qz*qz) * sy;
1833 lm[6] = (2 * qy*qz + 2 * qx*qw) * sy;
1834 lm[7] = 0.f;
1835
1836 lm[8] = (2 * qx*qz + 2 * qy*qw) * sz;
1837 lm[9] = (2 * qy*qz - 2 * qx*qw) * sz;
1838 lm[10] = (1 - 2 * qx*qx - 2 * qy*qy) * sz;
1839 lm[11] = 0.f;
1840
1841 lm[12] = tx;
1842 lm[13] = ty;
1843 lm[14] = tz;
1844 lm[15] = 1.f;
1845 }
1846}
1847
1848void cgltf_node_transform_world(const cgltf_node* node, cgltf_float* out_matrix)
1849{
1850 cgltf_float* lm = out_matrix;
1851 cgltf_node_transform_local(node, lm);
1852
1853 const cgltf_node* parent = node->parent;
1854
1855 while (parent)
1856 {
1857 float pm[16];
1858 cgltf_node_transform_local(parent, pm);
1859
1860 for (int i = 0; i < 4; ++i)
1861 {
1862 float l0 = lm[i * 4 + 0];
1863 float l1 = lm[i * 4 + 1];
1864 float l2 = lm[i * 4 + 2];
1865
1866 float r0 = l0 * pm[0] + l1 * pm[4] + l2 * pm[8];
1867 float r1 = l0 * pm[1] + l1 * pm[5] + l2 * pm[9];
1868 float r2 = l0 * pm[2] + l1 * pm[6] + l2 * pm[10];
1869
1870 lm[i * 4 + 0] = r0;
1871 lm[i * 4 + 1] = r1;
1872 lm[i * 4 + 2] = r2;
1873 }
1874
1875 lm[12] += pm[12];
1876 lm[13] += pm[13];
1877 lm[14] += pm[14];
1878
1879 parent = parent->parent;
1880 }
1881}
1882
1883static cgltf_size cgltf_component_read_index(const void* in, cgltf_component_type component_type)
1884{
1885 switch (component_type)
1886 {
1887 case cgltf_component_type_r_16:
1888 return *((const int16_t*) in);
1889 case cgltf_component_type_r_16u:
1890 return *((const uint16_t*) in);
1891 case cgltf_component_type_r_32u:
1892 return *((const uint32_t*) in);
1893 case cgltf_component_type_r_32f:
1894 return (cgltf_size)*((const float*) in);
1895 case cgltf_component_type_r_8:
1896 return *((const int8_t*) in);
1897 case cgltf_component_type_r_8u:
1898 return *((const uint8_t*) in);
1899 default:
1900 return 0;
1901 }
1902}
1903
1904static cgltf_float cgltf_component_read_float(const void* in, cgltf_component_type component_type, cgltf_bool normalized)
1905{
1906 if (component_type == cgltf_component_type_r_32f)
1907 {
1908 return *((const float*) in);
1909 }
1910
1911 if (normalized)
1912 {
1913 switch (component_type)
1914 {
1915 // note: glTF spec doesn't currently define normalized conversions for 32-bit integers
1916 case cgltf_component_type_r_16:
1917 return *((const int16_t*) in) / (cgltf_float)32767;
1918 case cgltf_component_type_r_16u:
1919 return *((const uint16_t*) in) / (cgltf_float)65535;
1920 case cgltf_component_type_r_8:
1921 return *((const int8_t*) in) / (cgltf_float)127;
1922 case cgltf_component_type_r_8u:
1923 return *((const uint8_t*) in) / (cgltf_float)255;
1924 default:
1925 return 0;
1926 }
1927 }
1928
1929 return (cgltf_float)cgltf_component_read_index(in, component_type);
1930}
1931
1932static cgltf_size cgltf_component_size(cgltf_component_type component_type);
1933
1934static cgltf_bool cgltf_element_read_float(const uint8_t* element, cgltf_type type, cgltf_component_type component_type, cgltf_bool normalized, cgltf_float* out, cgltf_size element_size)
1935{
1936 cgltf_size num_components = cgltf_num_components(type);
1937
1938 if (element_size < num_components) {
1939 return 0;
1940 }
1941
1942 // There are three special cases for component extraction, see #data-alignment in the 2.0 spec.
1943
1944 cgltf_size component_size = cgltf_component_size(component_type);
1945
1946 if (type == cgltf_type_mat2 && component_size == 1)
1947 {
1948 out[0] = cgltf_component_read_float(element, component_type, normalized);
1949 out[1] = cgltf_component_read_float(element + 1, component_type, normalized);
1950 out[2] = cgltf_component_read_float(element + 4, component_type, normalized);
1951 out[3] = cgltf_component_read_float(element + 5, component_type, normalized);
1952 return 1;
1953 }
1954
1955 if (type == cgltf_type_mat3 && component_size == 1)
1956 {
1957 out[0] = cgltf_component_read_float(element, component_type, normalized);
1958 out[1] = cgltf_component_read_float(element + 1, component_type, normalized);
1959 out[2] = cgltf_component_read_float(element + 2, component_type, normalized);
1960 out[3] = cgltf_component_read_float(element + 4, component_type, normalized);
1961 out[4] = cgltf_component_read_float(element + 5, component_type, normalized);
1962 out[5] = cgltf_component_read_float(element + 6, component_type, normalized);
1963 out[6] = cgltf_component_read_float(element + 8, component_type, normalized);
1964 out[7] = cgltf_component_read_float(element + 9, component_type, normalized);
1965 out[8] = cgltf_component_read_float(element + 10, component_type, normalized);
1966 return 1;
1967 }
1968
1969 if (type == cgltf_type_mat3 && component_size == 2)
1970 {
1971 out[0] = cgltf_component_read_float(element, component_type, normalized);
1972 out[1] = cgltf_component_read_float(element + 2, component_type, normalized);
1973 out[2] = cgltf_component_read_float(element + 4, component_type, normalized);
1974 out[3] = cgltf_component_read_float(element + 8, component_type, normalized);
1975 out[4] = cgltf_component_read_float(element + 10, component_type, normalized);
1976 out[5] = cgltf_component_read_float(element + 12, component_type, normalized);
1977 out[6] = cgltf_component_read_float(element + 16, component_type, normalized);
1978 out[7] = cgltf_component_read_float(element + 18, component_type, normalized);
1979 out[8] = cgltf_component_read_float(element + 20, component_type, normalized);
1980 return 1;
1981 }
1982
1983 for (cgltf_size i = 0; i < num_components; ++i)
1984 {
1985 out[i] = cgltf_component_read_float(element + component_size * i, component_type, normalized);
1986 }
1987 return 1;
1988}
1989
1990cgltf_bool cgltf_accessor_read_float(const cgltf_accessor* accessor, cgltf_size index, cgltf_float* out, cgltf_size element_size)
1991{
1992 if (accessor->is_sparse)
1993 {
1994 return 0;
1995 }
1996 if (accessor->buffer_view == NULL)
1997 {
1998 memset(out, 0, element_size * sizeof(cgltf_float));
1999 return 1;
2000 }
2001 if (accessor->buffer_view->buffer->data == NULL)
2002 {
2003 return 0;
2004 }
2005 cgltf_size offset = accessor->offset + accessor->buffer_view->offset;
2006 const uint8_t* element = (const uint8_t*) accessor->buffer_view->buffer->data;
2007 element += offset + accessor->stride * index;
2008 return cgltf_element_read_float(element, accessor->type, accessor->component_type, accessor->normalized, out, element_size);
2009}
2010
2011cgltf_size cgltf_accessor_unpack_floats(const cgltf_accessor* accessor, cgltf_float* out, cgltf_size float_count)
2012{
2013 cgltf_size floats_per_element = cgltf_num_components(accessor->type);
2014 cgltf_size available_floats = accessor->count * floats_per_element;
2015 if (out == NULL)
2016 {
2017 return available_floats;
2018 }
2019
2020 float_count = available_floats < float_count ? available_floats : float_count;
2021 cgltf_size element_count = float_count / floats_per_element;
2022
2023 // First pass: convert each element in the base accessor.
2024 cgltf_float* dest = out;
2025 cgltf_accessor dense = *accessor;
2026 dense.is_sparse = 0;
2027 for (cgltf_size index = 0; index < element_count; index++, dest += floats_per_element)
2028 {
2029 if (!cgltf_accessor_read_float(&dense, index, dest, floats_per_element))
2030 {
2031 return 0;
2032 }
2033 }
2034
2035 // Second pass: write out each element in the sparse accessor.
2036 if (accessor->is_sparse)
2037 {
2038 const cgltf_accessor_sparse* sparse = &dense.sparse;
2039
2040 if (sparse->indices_buffer_view->buffer->data == NULL || sparse->values_buffer_view->buffer->data == NULL)
2041 {
2042 return 0;
2043 }
2044
2045 const uint8_t* index_data = (const uint8_t*) sparse->indices_buffer_view->buffer->data;
2046 index_data += sparse->indices_byte_offset + sparse->indices_buffer_view->offset;
2047 cgltf_size index_stride = cgltf_component_size(sparse->indices_component_type);
2048 const uint8_t* reader_head = (const uint8_t*) sparse->values_buffer_view->buffer->data;
2049 reader_head += sparse->values_byte_offset + sparse->values_buffer_view->offset;
2050 for (cgltf_size reader_index = 0; reader_index < sparse->count; reader_index++, index_data += index_stride)
2051 {
2052 size_t writer_index = cgltf_component_read_index(index_data, sparse->indices_component_type);
2053 float* writer_head = out + writer_index * floats_per_element;
2054
2055 if (!cgltf_element_read_float(reader_head, dense.type, dense.component_type, dense.normalized, writer_head, floats_per_element))
2056 {
2057 return 0;
2058 }
2059
2060 reader_head += dense.stride;
2061 }
2062 }
2063
2064 return element_count * floats_per_element;
2065}
2066
2067static cgltf_uint cgltf_component_read_uint(const void* in, cgltf_component_type component_type)
2068{
2069 switch (component_type)
2070 {
2071 case cgltf_component_type_r_8:
2072 return *((const int8_t*) in);
2073
2074 case cgltf_component_type_r_8u:
2075 return *((const uint8_t*) in);
2076
2077 case cgltf_component_type_r_16:
2078 return *((const int16_t*) in);
2079
2080 case cgltf_component_type_r_16u:
2081 return *((const uint16_t*) in);
2082
2083 case cgltf_component_type_r_32u:
2084 return *((const uint32_t*) in);
2085
2086 default:
2087 return 0;
2088 }
2089}
2090
2091static cgltf_bool cgltf_element_read_uint(const uint8_t* element, cgltf_type type, cgltf_component_type component_type, cgltf_uint* out, cgltf_size element_size)
2092{
2093 cgltf_size num_components = cgltf_num_components(type);
2094
2095 if (element_size < num_components)
2096 {
2097 return 0;
2098 }
2099
2100 // Reading integer matrices is not a valid use case
2101 if (type == cgltf_type_mat2 || type == cgltf_type_mat3 || type == cgltf_type_mat4)
2102 {
2103 return 0;
2104 }
2105
2106 cgltf_size component_size = cgltf_component_size(component_type);
2107
2108 for (cgltf_size i = 0; i < num_components; ++i)
2109 {
2110 out[i] = cgltf_component_read_uint(element + component_size * i, component_type);
2111 }
2112 return 1;
2113}
2114
2115cgltf_bool cgltf_accessor_read_uint(const cgltf_accessor* accessor, cgltf_size index, cgltf_uint* out, cgltf_size element_size)
2116{
2117 if (accessor->is_sparse)
2118 {
2119 return 0;
2120 }
2121 if (accessor->buffer_view == NULL)
2122 {
2123 memset(out, 0, element_size * sizeof( cgltf_uint ));
2124 return 1;
2125 }
2126 if (accessor->buffer_view->buffer->data == NULL)
2127 {
2128 return 0;
2129 }
2130 cgltf_size offset = accessor->offset + accessor->buffer_view->offset;
2131 const uint8_t* element = (const uint8_t*) accessor->buffer_view->buffer->data;
2132 element += offset + accessor->stride * index;
2133 return cgltf_element_read_uint(element, accessor->type, accessor->component_type, out, element_size);
2134}
2135
2136cgltf_size cgltf_accessor_read_index(const cgltf_accessor* accessor, cgltf_size index)
2137{
2138 if (accessor->is_sparse)
2139 {
2140 return 0; // This is an error case, but we can't communicate the error with existing interface.
2141 }
2142 if (accessor->buffer_view == NULL)
2143 {
2144 return 0;
2145 }
2146 if (accessor->buffer_view->buffer->data == NULL)
2147 {
2148 return 0; // This is an error case, but we can't communicate the error with existing interface.
2149 }
2150
2151 cgltf_size offset = accessor->offset + accessor->buffer_view->offset;
2152 const uint8_t* element = (const uint8_t*) accessor->buffer_view->buffer->data;
2153 element += offset + accessor->stride * index;
2154 return cgltf_component_read_index(element, accessor->component_type);
2155}
2156
2157#define CGLTF_ERROR_JSON -1
2158#define CGLTF_ERROR_NOMEM -2
2159#define CGLTF_ERROR_LEGACY -3
2160
2161#define CGLTF_CHECK_TOKTYPE(tok_, type_) if ((tok_).type != (type_)) { return CGLTF_ERROR_JSON; }
2162#define CGLTF_CHECK_KEY(tok_) if ((tok_).type != JSMN_STRING || (tok_).size == 0) { return CGLTF_ERROR_JSON; } /* checking size for 0 verifies that a value follows the key */
2163
2164#define CGLTF_PTRINDEX(type, idx) (type*)((cgltf_size)idx + 1)
2165#define CGLTF_PTRFIXUP(var, data, size) if (var) { if ((cgltf_size)var > size) { return CGLTF_ERROR_JSON; } var = &data[(cgltf_size)var-1]; }
2166#define CGLTF_PTRFIXUP_REQ(var, data, size) if (!var || (cgltf_size)var > size) { return CGLTF_ERROR_JSON; } var = &data[(cgltf_size)var-1];
2167
2168static int cgltf_json_strcmp(jsmntok_t const* tok, const uint8_t* json_chunk, const char* str)
2169{
2170 CGLTF_CHECK_TOKTYPE(*tok, JSMN_STRING);
2171 size_t const str_len = strlen(str);
2172 size_t const name_length = tok->end - tok->start;
2173 return (str_len == name_length) ? strncmp((const char*)json_chunk + tok->start, str, str_len) : 128;
2174}
2175
2176static int cgltf_json_to_int(jsmntok_t const* tok, const uint8_t* json_chunk)
2177{
2178 CGLTF_CHECK_TOKTYPE(*tok, JSMN_PRIMITIVE);
2179 char tmp[128];
2180 int size = (cgltf_size)(tok->end - tok->start) < sizeof(tmp) ? tok->end - tok->start : (int)(sizeof(tmp) - 1);
2181 strncpy(tmp, (const char*)json_chunk + tok->start, size);
2182 tmp[size] = 0;
2183 return CGLTF_ATOI(tmp);
2184}
2185
2186static cgltf_float cgltf_json_to_float(jsmntok_t const* tok, const uint8_t* json_chunk)
2187{
2188 CGLTF_CHECK_TOKTYPE(*tok, JSMN_PRIMITIVE);
2189 char tmp[128];
2190 int size = (cgltf_size)(tok->end - tok->start) < sizeof(tmp) ? tok->end - tok->start : (int)(sizeof(tmp) - 1);
2191 strncpy(tmp, (const char*)json_chunk + tok->start, size);
2192 tmp[size] = 0;
2193 return (cgltf_float)CGLTF_ATOF(tmp);
2194}
2195
2196static cgltf_bool cgltf_json_to_bool(jsmntok_t const* tok, const uint8_t* json_chunk)
2197{
2198 int size = tok->end - tok->start;
2199 return size == 4 && memcmp(json_chunk + tok->start, "true", 4) == 0;
2200}
2201
2202static int cgltf_skip_json(jsmntok_t const* tokens, int i)
2203{
2204 int end = i + 1;
2205
2206 while (i < end)
2207 {
2208 switch (tokens[i].type)
2209 {
2210 case JSMN_OBJECT:
2211 end += tokens[i].size * 2;
2212 break;
2213
2214 case JSMN_ARRAY:
2215 end += tokens[i].size;
2216 break;
2217
2218 case JSMN_PRIMITIVE:
2219 case JSMN_STRING:
2220 break;
2221
2222 default:
2223 return -1;
2224 }
2225
2226 i++;
2227 }
2228
2229 return i;
2230}
2231
2232static void cgltf_fill_float_array(float* out_array, int size, float value)
2233{
2234 for (int j = 0; j < size; ++j)
2235 {
2236 out_array[j] = value;
2237 }
2238}
2239
2240static int cgltf_parse_json_float_array(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, float* out_array, int size)
2241{
2242 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_ARRAY);
2243 if (tokens[i].size != size)
2244 {
2245 return CGLTF_ERROR_JSON;
2246 }
2247 ++i;
2248 for (int j = 0; j < size; ++j)
2249 {
2250 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE);
2251 out_array[j] = cgltf_json_to_float(tokens + i, json_chunk);
2252 ++i;
2253 }
2254 return i;
2255}
2256
2257static int cgltf_parse_json_string(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, char** out_string)
2258{
2259 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_STRING);
2260 if (*out_string)
2261 {
2262 return CGLTF_ERROR_JSON;
2263 }
2264 int size = tokens[i].end - tokens[i].start;
2265 char* result = (char*)options->memory.alloc(options->memory.user_data, size + 1);
2266 if (!result)
2267 {
2268 return CGLTF_ERROR_NOMEM;
2269 }
2270 strncpy(result, (const char*)json_chunk + tokens[i].start, size);
2271 result[size] = 0;
2272 *out_string = result;
2273 return i + 1;
2274}
2275
2276static int cgltf_parse_json_array(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, size_t element_size, void** out_array, cgltf_size* out_size)
2277{
2278 (void)json_chunk;
2279 if (tokens[i].type != JSMN_ARRAY)
2280 {
2281 return tokens[i].type == JSMN_OBJECT ? CGLTF_ERROR_LEGACY : CGLTF_ERROR_JSON;
2282 }
2283 if (*out_array)
2284 {
2285 return CGLTF_ERROR_JSON;
2286 }
2287 int size = tokens[i].size;
2288 void* result = cgltf_calloc(options, element_size, size);
2289 if (!result)
2290 {
2291 return CGLTF_ERROR_NOMEM;
2292 }
2293 *out_array = result;
2294 *out_size = size;
2295 return i + 1;
2296}
2297
2298static int cgltf_parse_json_string_array(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, char*** out_array, cgltf_size* out_size)
2299{
2300 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_ARRAY);
2301 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(char*), (void**)out_array, out_size);
2302 if (i < 0)
2303 {
2304 return i;
2305 }
2306
2307 for (cgltf_size j = 0; j < *out_size; ++j)
2308 {
2309 i = cgltf_parse_json_string(options, tokens, i, json_chunk, j + (*out_array));
2310 if (i < 0)
2311 {
2312 return i;
2313 }
2314 }
2315 return i;
2316}
2317
2318static void cgltf_parse_attribute_type(const char* name, cgltf_attribute_type* out_type, int* out_index)
2319{
2320 const char* us = strchr(name, '_');
2321 size_t len = us ? (size_t)(us - name) : strlen(name);
2322
2323 if (len == 8 && strncmp(name, "POSITION", 8) == 0)
2324 {
2325 *out_type = cgltf_attribute_type_position;
2326 }
2327 else if (len == 6 && strncmp(name, "NORMAL", 6) == 0)
2328 {
2329 *out_type = cgltf_attribute_type_normal;
2330 }
2331 else if (len == 7 && strncmp(name, "TANGENT", 7) == 0)
2332 {
2333 *out_type = cgltf_attribute_type_tangent;
2334 }
2335 else if (len == 8 && strncmp(name, "TEXCOORD", 8) == 0)
2336 {
2337 *out_type = cgltf_attribute_type_texcoord;
2338 }
2339 else if (len == 5 && strncmp(name, "COLOR", 5) == 0)
2340 {
2341 *out_type = cgltf_attribute_type_color;
2342 }
2343 else if (len == 6 && strncmp(name, "JOINTS", 6) == 0)
2344 {
2345 *out_type = cgltf_attribute_type_joints;
2346 }
2347 else if (len == 7 && strncmp(name, "WEIGHTS", 7) == 0)
2348 {
2349 *out_type = cgltf_attribute_type_weights;
2350 }
2351 else
2352 {
2353 *out_type = cgltf_attribute_type_invalid;
2354 }
2355
2356 if (us && *out_type != cgltf_attribute_type_invalid)
2357 {
2358 *out_index = CGLTF_ATOI(us + 1);
2359 }
2360}
2361
2362static int cgltf_parse_json_attribute_list(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_attribute** out_attributes, cgltf_size* out_attributes_count)
2363{
2364 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2365
2366 if (*out_attributes)
2367 {
2368 return CGLTF_ERROR_JSON;
2369 }
2370
2371 *out_attributes_count = tokens[i].size;
2372 *out_attributes = (cgltf_attribute*)cgltf_calloc(options, sizeof(cgltf_attribute), *out_attributes_count);
2373 ++i;
2374
2375 if (!*out_attributes)
2376 {
2377 return CGLTF_ERROR_NOMEM;
2378 }
2379
2380 for (cgltf_size j = 0; j < *out_attributes_count; ++j)
2381 {
2382 CGLTF_CHECK_KEY(tokens[i]);
2383
2384 i = cgltf_parse_json_string(options, tokens, i, json_chunk, &(*out_attributes)[j].name);
2385 if (i < 0)
2386 {
2387 return CGLTF_ERROR_JSON;
2388 }
2389
2390 cgltf_parse_attribute_type((*out_attributes)[j].name, &(*out_attributes)[j].type, &(*out_attributes)[j].index);
2391
2392 (*out_attributes)[j].data = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk));
2393 ++i;
2394 }
2395
2396 return i;
2397}
2398
2399static int cgltf_parse_json_extras(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_extras* out_extras)
2400{
2401 (void)json_chunk;
2402 out_extras->start_offset = tokens[i].start;
2403 out_extras->end_offset = tokens[i].end;
2404 i = cgltf_skip_json(tokens, i);
2405 return i;
2406}
2407
2408static int cgltf_parse_json_unprocessed_extension(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_extension* out_extension)
2409{
2410 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_STRING);
2411 CGLTF_CHECK_TOKTYPE(tokens[i+1], JSMN_OBJECT);
2412 if (out_extension->name)
2413 {
2414 return CGLTF_ERROR_JSON;
2415 }
2416
2417 cgltf_size name_length = tokens[i].end - tokens[i].start;
2418 out_extension->name = (char*)options->memory.alloc(options->memory.user_data, name_length + 1);
2419 if (!out_extension->name)
2420 {
2421 return CGLTF_ERROR_NOMEM;
2422 }
2423 strncpy(out_extension->name, (const char*)json_chunk + tokens[i].start, name_length);
2424 out_extension->name[name_length] = 0;
2425 i++;
2426
2427 size_t start = tokens[i].start;
2428 size_t size = tokens[i].end - start;
2429 out_extension->data = (char*)options->memory.alloc(options->memory.user_data, size + 1);
2430 if (!out_extension->data)
2431 {
2432 return CGLTF_ERROR_NOMEM;
2433 }
2434 strncpy(out_extension->data, (const char*)json_chunk + start, size);
2435 out_extension->data[size] = '\0';
2436
2437 i = cgltf_skip_json(tokens, i);
2438
2439 return i;
2440}
2441
2442static int cgltf_parse_json_unprocessed_extensions(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_size* out_extensions_count, cgltf_extension** out_extensions)
2443{
2444 ++i;
2445
2446 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2447 if(*out_extensions)
2448 {
2449 return CGLTF_ERROR_JSON;
2450 }
2451
2452 int extensions_size = tokens[i].size;
2453 *out_extensions_count = 0;
2454 *out_extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size);
2455
2456 if (!*out_extensions)
2457 {
2458 return CGLTF_ERROR_NOMEM;
2459 }
2460
2461 ++i;
2462
2463 for (int j = 0; j < extensions_size; ++j)
2464 {
2465 CGLTF_CHECK_KEY(tokens[i]);
2466
2467 cgltf_size extension_index = (*out_extensions_count)++;
2468 cgltf_extension* extension = &((*out_extensions)[extension_index]);
2469 i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, extension);
2470
2471 if (i < 0)
2472 {
2473 return i;
2474 }
2475 }
2476 return i;
2477}
2478
2479static int cgltf_parse_json_draco_mesh_compression(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_draco_mesh_compression* out_draco_mesh_compression)
2480{
2481 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2482
2483 int size = tokens[i].size;
2484 ++i;
2485
2486 for (int j = 0; j < size; ++j)
2487 {
2488 CGLTF_CHECK_KEY(tokens[i]);
2489
2490 if (cgltf_json_strcmp(tokens + i, json_chunk, "attributes") == 0)
2491 {
2492 i = cgltf_parse_json_attribute_list(options, tokens, i + 1, json_chunk, &out_draco_mesh_compression->attributes, &out_draco_mesh_compression->attributes_count);
2493 }
2494 else if (cgltf_json_strcmp(tokens + i, json_chunk, "bufferView") == 0)
2495 {
2496 ++i;
2497 out_draco_mesh_compression->buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk));
2498 ++i;
2499 }
2500 }
2501
2502 return i;
2503}
2504
2505static int cgltf_parse_json_primitive(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_primitive* out_prim)
2506{
2507 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2508
2509 out_prim->type = cgltf_primitive_type_triangles;
2510
2511 int size = tokens[i].size;
2512 ++i;
2513
2514 for (int j = 0; j < size; ++j)
2515 {
2516 CGLTF_CHECK_KEY(tokens[i]);
2517
2518 if (cgltf_json_strcmp(tokens+i, json_chunk, "mode") == 0)
2519 {
2520 ++i;
2521 out_prim->type
2522 = (cgltf_primitive_type)
2523 cgltf_json_to_int(tokens+i, json_chunk);
2524 ++i;
2525 }
2526 else if (cgltf_json_strcmp(tokens+i, json_chunk, "indices") == 0)
2527 {
2528 ++i;
2529 out_prim->indices = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk));
2530 ++i;
2531 }
2532 else if (cgltf_json_strcmp(tokens+i, json_chunk, "material") == 0)
2533 {
2534 ++i;
2535 out_prim->material = CGLTF_PTRINDEX(cgltf_material, cgltf_json_to_int(tokens + i, json_chunk));
2536 ++i;
2537 }
2538 else if (cgltf_json_strcmp(tokens+i, json_chunk, "attributes") == 0)
2539 {
2540 i = cgltf_parse_json_attribute_list(options, tokens, i + 1, json_chunk, &out_prim->attributes, &out_prim->attributes_count);
2541 }
2542 else if (cgltf_json_strcmp(tokens+i, json_chunk, "targets") == 0)
2543 {
2544 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_morph_target), (void**)&out_prim->targets, &out_prim->targets_count);
2545 if (i < 0)
2546 {
2547 return i;
2548 }
2549
2550 for (cgltf_size k = 0; k < out_prim->targets_count; ++k)
2551 {
2552 i = cgltf_parse_json_attribute_list(options, tokens, i, json_chunk, &out_prim->targets[k].attributes, &out_prim->targets[k].attributes_count);
2553 if (i < 0)
2554 {
2555 return i;
2556 }
2557 }
2558 }
2559 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
2560 {
2561 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_prim->extras);
2562 }
2563 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
2564 {
2565 ++i;
2566
2567 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2568 if(out_prim->extensions)
2569 {
2570 return CGLTF_ERROR_JSON;
2571 }
2572
2573 int extensions_size = tokens[i].size;
2574 out_prim->extensions_count = 0;
2575 out_prim->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size);
2576
2577 if (!out_prim->extensions)
2578 {
2579 return CGLTF_ERROR_NOMEM;
2580 }
2581
2582 ++i;
2583 for (int k = 0; k < extensions_size; ++k)
2584 {
2585 CGLTF_CHECK_KEY(tokens[i]);
2586
2587 if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_draco_mesh_compression") == 0)
2588 {
2589 out_prim->has_draco_mesh_compression = 1;
2590 i = cgltf_parse_json_draco_mesh_compression(options, tokens, i + 1, json_chunk, &out_prim->draco_mesh_compression);
2591 }
2592 else
2593 {
2594 i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_prim->extensions[out_prim->extensions_count++]));
2595 }
2596
2597 if (i < 0)
2598 {
2599 return i;
2600 }
2601 }
2602 }
2603 else
2604 {
2605 i = cgltf_skip_json(tokens, i+1);
2606 }
2607
2608 if (i < 0)
2609 {
2610 return i;
2611 }
2612 }
2613
2614 return i;
2615}
2616
2617static int cgltf_parse_json_mesh(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_mesh* out_mesh)
2618{
2619 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2620
2621 int size = tokens[i].size;
2622 ++i;
2623
2624 for (int j = 0; j < size; ++j)
2625 {
2626 CGLTF_CHECK_KEY(tokens[i]);
2627
2628 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
2629 {
2630 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_mesh->name);
2631 }
2632 else if (cgltf_json_strcmp(tokens+i, json_chunk, "primitives") == 0)
2633 {
2634 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_primitive), (void**)&out_mesh->primitives, &out_mesh->primitives_count);
2635 if (i < 0)
2636 {
2637 return i;
2638 }
2639
2640 for (cgltf_size prim_index = 0; prim_index < out_mesh->primitives_count; ++prim_index)
2641 {
2642 i = cgltf_parse_json_primitive(options, tokens, i, json_chunk, &out_mesh->primitives[prim_index]);
2643 if (i < 0)
2644 {
2645 return i;
2646 }
2647 }
2648 }
2649 else if (cgltf_json_strcmp(tokens + i, json_chunk, "weights") == 0)
2650 {
2651 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_float), (void**)&out_mesh->weights, &out_mesh->weights_count);
2652 if (i < 0)
2653 {
2654 return i;
2655 }
2656
2657 i = cgltf_parse_json_float_array(tokens, i - 1, json_chunk, out_mesh->weights, (int)out_mesh->weights_count);
2658 }
2659 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
2660 {
2661 ++i;
2662
2663 out_mesh->extras.start_offset = tokens[i].start;
2664 out_mesh->extras.end_offset = tokens[i].end;
2665
2666 if (tokens[i].type == JSMN_OBJECT)
2667 {
2668 int extras_size = tokens[i].size;
2669 ++i;
2670
2671 for (int k = 0; k < extras_size; ++k)
2672 {
2673 CGLTF_CHECK_KEY(tokens[i]);
2674
2675 if (cgltf_json_strcmp(tokens+i, json_chunk, "targetNames") == 0)
2676 {
2677 i = cgltf_parse_json_string_array(options, tokens, i + 1, json_chunk, &out_mesh->target_names, &out_mesh->target_names_count);
2678 }
2679 else
2680 {
2681 i = cgltf_skip_json(tokens, i+1);
2682 }
2683
2684 if (i < 0)
2685 {
2686 return i;
2687 }
2688 }
2689 }
2690 else
2691 {
2692 i = cgltf_skip_json(tokens, i);
2693 }
2694 }
2695 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
2696 {
2697 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_mesh->extensions_count, &out_mesh->extensions);
2698 }
2699 else
2700 {
2701 i = cgltf_skip_json(tokens, i+1);
2702 }
2703
2704 if (i < 0)
2705 {
2706 return i;
2707 }
2708 }
2709
2710 return i;
2711}
2712
2713static int cgltf_parse_json_meshes(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
2714{
2715 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_mesh), (void**)&out_data->meshes, &out_data->meshes_count);
2716 if (i < 0)
2717 {
2718 return i;
2719 }
2720
2721 for (cgltf_size j = 0; j < out_data->meshes_count; ++j)
2722 {
2723 i = cgltf_parse_json_mesh(options, tokens, i, json_chunk, &out_data->meshes[j]);
2724 if (i < 0)
2725 {
2726 return i;
2727 }
2728 }
2729 return i;
2730}
2731
2732static cgltf_component_type cgltf_json_to_component_type(jsmntok_t const* tok, const uint8_t* json_chunk)
2733{
2734 int type = cgltf_json_to_int(tok, json_chunk);
2735
2736 switch (type)
2737 {
2738 case 5120:
2739 return cgltf_component_type_r_8;
2740 case 5121:
2741 return cgltf_component_type_r_8u;
2742 case 5122:
2743 return cgltf_component_type_r_16;
2744 case 5123:
2745 return cgltf_component_type_r_16u;
2746 case 5125:
2747 return cgltf_component_type_r_32u;
2748 case 5126:
2749 return cgltf_component_type_r_32f;
2750 default:
2751 return cgltf_component_type_invalid;
2752 }
2753}
2754
2755static int cgltf_parse_json_accessor_sparse(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_accessor_sparse* out_sparse)
2756{
2757 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2758
2759 int size = tokens[i].size;
2760 ++i;
2761
2762 for (int j = 0; j < size; ++j)
2763 {
2764 CGLTF_CHECK_KEY(tokens[i]);
2765
2766 if (cgltf_json_strcmp(tokens+i, json_chunk, "count") == 0)
2767 {
2768 ++i;
2769 out_sparse->count = cgltf_json_to_int(tokens + i, json_chunk);
2770 ++i;
2771 }
2772 else if (cgltf_json_strcmp(tokens+i, json_chunk, "indices") == 0)
2773 {
2774 ++i;
2775 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2776
2777 int indices_size = tokens[i].size;
2778 ++i;
2779
2780 for (int k = 0; k < indices_size; ++k)
2781 {
2782 CGLTF_CHECK_KEY(tokens[i]);
2783
2784 if (cgltf_json_strcmp(tokens+i, json_chunk, "bufferView") == 0)
2785 {
2786 ++i;
2787 out_sparse->indices_buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk));
2788 ++i;
2789 }
2790 else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0)
2791 {
2792 ++i;
2793 out_sparse->indices_byte_offset = cgltf_json_to_int(tokens + i, json_chunk);
2794 ++i;
2795 }
2796 else if (cgltf_json_strcmp(tokens+i, json_chunk, "componentType") == 0)
2797 {
2798 ++i;
2799 out_sparse->indices_component_type = cgltf_json_to_component_type(tokens + i, json_chunk);
2800 ++i;
2801 }
2802 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
2803 {
2804 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sparse->indices_extras);
2805 }
2806 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
2807 {
2808 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->indices_extensions_count, &out_sparse->indices_extensions);
2809 }
2810 else
2811 {
2812 i = cgltf_skip_json(tokens, i+1);
2813 }
2814
2815 if (i < 0)
2816 {
2817 return i;
2818 }
2819 }
2820 }
2821 else if (cgltf_json_strcmp(tokens+i, json_chunk, "values") == 0)
2822 {
2823 ++i;
2824 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2825
2826 int values_size = tokens[i].size;
2827 ++i;
2828
2829 for (int k = 0; k < values_size; ++k)
2830 {
2831 CGLTF_CHECK_KEY(tokens[i]);
2832
2833 if (cgltf_json_strcmp(tokens+i, json_chunk, "bufferView") == 0)
2834 {
2835 ++i;
2836 out_sparse->values_buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk));
2837 ++i;
2838 }
2839 else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0)
2840 {
2841 ++i;
2842 out_sparse->values_byte_offset = cgltf_json_to_int(tokens + i, json_chunk);
2843 ++i;
2844 }
2845 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
2846 {
2847 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sparse->values_extras);
2848 }
2849 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
2850 {
2851 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->values_extensions_count, &out_sparse->values_extensions);
2852 }
2853 else
2854 {
2855 i = cgltf_skip_json(tokens, i+1);
2856 }
2857
2858 if (i < 0)
2859 {
2860 return i;
2861 }
2862 }
2863 }
2864 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
2865 {
2866 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sparse->extras);
2867 }
2868 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
2869 {
2870 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->extensions_count, &out_sparse->extensions);
2871 }
2872 else
2873 {
2874 i = cgltf_skip_json(tokens, i+1);
2875 }
2876
2877 if (i < 0)
2878 {
2879 return i;
2880 }
2881 }
2882
2883 return i;
2884}
2885
2886static int cgltf_parse_json_accessor(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_accessor* out_accessor)
2887{
2888 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
2889
2890 int size = tokens[i].size;
2891 ++i;
2892
2893 for (int j = 0; j < size; ++j)
2894 {
2895 CGLTF_CHECK_KEY(tokens[i]);
2896
2897 if (cgltf_json_strcmp(tokens+i, json_chunk, "bufferView") == 0)
2898 {
2899 ++i;
2900 out_accessor->buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk));
2901 ++i;
2902 }
2903 else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0)
2904 {
2905 ++i;
2906 out_accessor->offset =
2907 cgltf_json_to_int(tokens+i, json_chunk);
2908 ++i;
2909 }
2910 else if (cgltf_json_strcmp(tokens+i, json_chunk, "componentType") == 0)
2911 {
2912 ++i;
2913 out_accessor->component_type = cgltf_json_to_component_type(tokens + i, json_chunk);
2914 ++i;
2915 }
2916 else if (cgltf_json_strcmp(tokens+i, json_chunk, "normalized") == 0)
2917 {
2918 ++i;
2919 out_accessor->normalized = cgltf_json_to_bool(tokens+i, json_chunk);
2920 ++i;
2921 }
2922 else if (cgltf_json_strcmp(tokens+i, json_chunk, "count") == 0)
2923 {
2924 ++i;
2925 out_accessor->count =
2926 cgltf_json_to_int(tokens+i, json_chunk);
2927 ++i;
2928 }
2929 else if (cgltf_json_strcmp(tokens+i, json_chunk, "type") == 0)
2930 {
2931 ++i;
2932 if (cgltf_json_strcmp(tokens+i, json_chunk, "SCALAR") == 0)
2933 {
2934 out_accessor->type = cgltf_type_scalar;
2935 }
2936 else if (cgltf_json_strcmp(tokens+i, json_chunk, "VEC2") == 0)
2937 {
2938 out_accessor->type = cgltf_type_vec2;
2939 }
2940 else if (cgltf_json_strcmp(tokens+i, json_chunk, "VEC3") == 0)
2941 {
2942 out_accessor->type = cgltf_type_vec3;
2943 }
2944 else if (cgltf_json_strcmp(tokens+i, json_chunk, "VEC4") == 0)
2945 {
2946 out_accessor->type = cgltf_type_vec4;
2947 }
2948 else if (cgltf_json_strcmp(tokens+i, json_chunk, "MAT2") == 0)
2949 {
2950 out_accessor->type = cgltf_type_mat2;
2951 }
2952 else if (cgltf_json_strcmp(tokens+i, json_chunk, "MAT3") == 0)
2953 {
2954 out_accessor->type = cgltf_type_mat3;
2955 }
2956 else if (cgltf_json_strcmp(tokens+i, json_chunk, "MAT4") == 0)
2957 {
2958 out_accessor->type = cgltf_type_mat4;
2959 }
2960 ++i;
2961 }
2962 else if (cgltf_json_strcmp(tokens + i, json_chunk, "min") == 0)
2963 {
2964 ++i;
2965 out_accessor->has_min = 1;
2966 // note: we can't parse the precise number of elements since type may not have been computed yet
2967 int min_size = tokens[i].size > 16 ? 16 : tokens[i].size;
2968 i = cgltf_parse_json_float_array(tokens, i, json_chunk, out_accessor->min, min_size);
2969 }
2970 else if (cgltf_json_strcmp(tokens + i, json_chunk, "max") == 0)
2971 {
2972 ++i;
2973 out_accessor->has_max = 1;
2974 // note: we can't parse the precise number of elements since type may not have been computed yet
2975 int max_size = tokens[i].size > 16 ? 16 : tokens[i].size;
2976 i = cgltf_parse_json_float_array(tokens, i, json_chunk, out_accessor->max, max_size);
2977 }
2978 else if (cgltf_json_strcmp(tokens + i, json_chunk, "sparse") == 0)
2979 {
2980 out_accessor->is_sparse = 1;
2981 i = cgltf_parse_json_accessor_sparse(options, tokens, i + 1, json_chunk, &out_accessor->sparse);
2982 }
2983 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
2984 {
2985 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_accessor->extras);
2986 }
2987 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
2988 {
2989 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_accessor->extensions_count, &out_accessor->extensions);
2990 }
2991 else
2992 {
2993 i = cgltf_skip_json(tokens, i+1);
2994 }
2995
2996 if (i < 0)
2997 {
2998 return i;
2999 }
3000 }
3001
3002 return i;
3003}
3004
3005static int cgltf_parse_json_texture_transform(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_texture_transform* out_texture_transform)
3006{
3007 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3008
3009 int size = tokens[i].size;
3010 ++i;
3011
3012 for (int j = 0; j < size; ++j)
3013 {
3014 CGLTF_CHECK_KEY(tokens[i]);
3015
3016 if (cgltf_json_strcmp(tokens + i, json_chunk, "offset") == 0)
3017 {
3018 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_texture_transform->offset, 2);
3019 }
3020 else if (cgltf_json_strcmp(tokens + i, json_chunk, "rotation") == 0)
3021 {
3022 ++i;
3023 out_texture_transform->rotation = cgltf_json_to_float(tokens + i, json_chunk);
3024 ++i;
3025 }
3026 else if (cgltf_json_strcmp(tokens + i, json_chunk, "scale") == 0)
3027 {
3028 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_texture_transform->scale, 2);
3029 }
3030 else if (cgltf_json_strcmp(tokens + i, json_chunk, "texCoord") == 0)
3031 {
3032 ++i;
3033 out_texture_transform->texcoord = cgltf_json_to_int(tokens + i, json_chunk);
3034 ++i;
3035 }
3036 else
3037 {
3038 i = cgltf_skip_json(tokens, i + 1);
3039 }
3040
3041 if (i < 0)
3042 {
3043 return i;
3044 }
3045 }
3046
3047 return i;
3048}
3049
3050static int cgltf_parse_json_texture_view(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_texture_view* out_texture_view)
3051{
3052 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3053
3054 out_texture_view->scale = 1.0f;
3055 cgltf_fill_float_array(out_texture_view->transform.scale, 2, 1.0f);
3056
3057 int size = tokens[i].size;
3058 ++i;
3059
3060 for (int j = 0; j < size; ++j)
3061 {
3062 CGLTF_CHECK_KEY(tokens[i]);
3063
3064 if (cgltf_json_strcmp(tokens + i, json_chunk, "index") == 0)
3065 {
3066 ++i;
3067 out_texture_view->texture = CGLTF_PTRINDEX(cgltf_texture, cgltf_json_to_int(tokens + i, json_chunk));
3068 ++i;
3069 }
3070 else if (cgltf_json_strcmp(tokens + i, json_chunk, "texCoord") == 0)
3071 {
3072 ++i;
3073 out_texture_view->texcoord = cgltf_json_to_int(tokens + i, json_chunk);
3074 ++i;
3075 }
3076 else if (cgltf_json_strcmp(tokens + i, json_chunk, "scale") == 0)
3077 {
3078 ++i;
3079 out_texture_view->scale = cgltf_json_to_float(tokens + i, json_chunk);
3080 ++i;
3081 }
3082 else if (cgltf_json_strcmp(tokens + i, json_chunk, "strength") == 0)
3083 {
3084 ++i;
3085 out_texture_view->scale = cgltf_json_to_float(tokens + i, json_chunk);
3086 ++i;
3087 }
3088 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
3089 {
3090 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_texture_view->extras);
3091 }
3092 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
3093 {
3094 ++i;
3095
3096 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3097 if(out_texture_view->extensions)
3098 {
3099 return CGLTF_ERROR_JSON;
3100 }
3101
3102 int extensions_size = tokens[i].size;
3103 out_texture_view->extensions_count = 0;
3104 out_texture_view->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size);
3105
3106 if (!out_texture_view->extensions)
3107 {
3108 return CGLTF_ERROR_NOMEM;
3109 }
3110
3111 ++i;
3112
3113 for (int k = 0; k < extensions_size; ++k)
3114 {
3115 CGLTF_CHECK_KEY(tokens[i]);
3116
3117 if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_texture_transform") == 0)
3118 {
3119 out_texture_view->has_transform = 1;
3120 i = cgltf_parse_json_texture_transform(tokens, i + 1, json_chunk, &out_texture_view->transform);
3121 }
3122 else
3123 {
3124 i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_texture_view->extensions[out_texture_view->extensions_count++]));
3125 }
3126
3127 if (i < 0)
3128 {
3129 return i;
3130 }
3131 }
3132 }
3133 else
3134 {
3135 i = cgltf_skip_json(tokens, i + 1);
3136 }
3137
3138 if (i < 0)
3139 {
3140 return i;
3141 }
3142 }
3143
3144 return i;
3145}
3146
3147static int cgltf_parse_json_pbr_metallic_roughness(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_pbr_metallic_roughness* out_pbr)
3148{
3149 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3150
3151 int size = tokens[i].size;
3152 ++i;
3153
3154 for (int j = 0; j < size; ++j)
3155 {
3156 CGLTF_CHECK_KEY(tokens[i]);
3157
3158 if (cgltf_json_strcmp(tokens+i, json_chunk, "metallicFactor") == 0)
3159 {
3160 ++i;
3161 out_pbr->metallic_factor =
3162 cgltf_json_to_float(tokens + i, json_chunk);
3163 ++i;
3164 }
3165 else if (cgltf_json_strcmp(tokens+i, json_chunk, "roughnessFactor") == 0)
3166 {
3167 ++i;
3168 out_pbr->roughness_factor =
3169 cgltf_json_to_float(tokens+i, json_chunk);
3170 ++i;
3171 }
3172 else if (cgltf_json_strcmp(tokens+i, json_chunk, "baseColorFactor") == 0)
3173 {
3174 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_pbr->base_color_factor, 4);
3175 }
3176 else if (cgltf_json_strcmp(tokens+i, json_chunk, "baseColorTexture") == 0)
3177 {
3178 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk,
3179 &out_pbr->base_color_texture);
3180 }
3181 else if (cgltf_json_strcmp(tokens + i, json_chunk, "metallicRoughnessTexture") == 0)
3182 {
3183 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk,
3184 &out_pbr->metallic_roughness_texture);
3185 }
3186 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
3187 {
3188 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_pbr->extras);
3189 }
3190 else
3191 {
3192 i = cgltf_skip_json(tokens, i+1);
3193 }
3194
3195 if (i < 0)
3196 {
3197 return i;
3198 }
3199 }
3200
3201 return i;
3202}
3203
3204static int cgltf_parse_json_pbr_specular_glossiness(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_pbr_specular_glossiness* out_pbr)
3205{
3206 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3207 int size = tokens[i].size;
3208 ++i;
3209
3210 for (int j = 0; j < size; ++j)
3211 {
3212 CGLTF_CHECK_KEY(tokens[i]);
3213
3214 if (cgltf_json_strcmp(tokens+i, json_chunk, "diffuseFactor") == 0)
3215 {
3216 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_pbr->diffuse_factor, 4);
3217 }
3218 else if (cgltf_json_strcmp(tokens+i, json_chunk, "specularFactor") == 0)
3219 {
3220 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_pbr->specular_factor, 3);
3221 }
3222 else if (cgltf_json_strcmp(tokens+i, json_chunk, "glossinessFactor") == 0)
3223 {
3224 ++i;
3225 out_pbr->glossiness_factor = cgltf_json_to_float(tokens + i, json_chunk);
3226 ++i;
3227 }
3228 else if (cgltf_json_strcmp(tokens+i, json_chunk, "diffuseTexture") == 0)
3229 {
3230 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_pbr->diffuse_texture);
3231 }
3232 else if (cgltf_json_strcmp(tokens+i, json_chunk, "specularGlossinessTexture") == 0)
3233 {
3234 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_pbr->specular_glossiness_texture);
3235 }
3236 else
3237 {
3238 i = cgltf_skip_json(tokens, i+1);
3239 }
3240
3241 if (i < 0)
3242 {
3243 return i;
3244 }
3245 }
3246
3247 return i;
3248}
3249
3250static int cgltf_parse_json_clearcoat(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_clearcoat* out_clearcoat)
3251{
3252 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3253 int size = tokens[i].size;
3254 ++i;
3255
3256 for (int j = 0; j < size; ++j)
3257 {
3258 CGLTF_CHECK_KEY(tokens[i]);
3259
3260 if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatFactor") == 0)
3261 {
3262 ++i;
3263 out_clearcoat->clearcoat_factor = cgltf_json_to_float(tokens + i, json_chunk);
3264 ++i;
3265 }
3266 else if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatRoughnessFactor") == 0)
3267 {
3268 ++i;
3269 out_clearcoat->clearcoat_roughness_factor = cgltf_json_to_float(tokens + i, json_chunk);
3270 ++i;
3271 }
3272 else if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatTexture") == 0)
3273 {
3274 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_clearcoat->clearcoat_texture);
3275 }
3276 else if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatRoughnessTexture") == 0)
3277 {
3278 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_clearcoat->clearcoat_roughness_texture);
3279 }
3280 else if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatNormalTexture") == 0)
3281 {
3282 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_clearcoat->clearcoat_normal_texture);
3283 }
3284 else
3285 {
3286 i = cgltf_skip_json(tokens, i+1);
3287 }
3288
3289 if (i < 0)
3290 {
3291 return i;
3292 }
3293 }
3294
3295 return i;
3296}
3297
3298static int cgltf_parse_json_ior(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_ior* out_ior)
3299{
3300 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3301 int size = tokens[i].size;
3302 ++i;
3303
3304 // Default values
3305 out_ior->ior = 1.5f;
3306
3307 for (int j = 0; j < size; ++j)
3308 {
3309 CGLTF_CHECK_KEY(tokens[i]);
3310
3311 if (cgltf_json_strcmp(tokens+i, json_chunk, "ior") == 0)
3312 {
3313 ++i;
3314 out_ior->ior = cgltf_json_to_float(tokens + i, json_chunk);
3315 ++i;
3316 }
3317 else
3318 {
3319 i = cgltf_skip_json(tokens, i+1);
3320 }
3321
3322 if (i < 0)
3323 {
3324 return i;
3325 }
3326 }
3327
3328 return i;
3329}
3330
3331static int cgltf_parse_json_specular(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_specular* out_specular)
3332{
3333 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3334 int size = tokens[i].size;
3335 ++i;
3336
3337 // Default values
3338 out_specular->specular_factor = 1.0f;
3339 cgltf_fill_float_array(out_specular->specular_color_factor, 3, 1.0f);
3340
3341 for (int j = 0; j < size; ++j)
3342 {
3343 CGLTF_CHECK_KEY(tokens[i]);
3344
3345 if (cgltf_json_strcmp(tokens+i, json_chunk, "specularFactor") == 0)
3346 {
3347 ++i;
3348 out_specular->specular_factor = cgltf_json_to_float(tokens + i, json_chunk);
3349 ++i;
3350 }
3351 else if (cgltf_json_strcmp(tokens+i, json_chunk, "specularColorFactor") == 0)
3352 {
3353 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_specular->specular_color_factor, 3);
3354 }
3355 else if (cgltf_json_strcmp(tokens+i, json_chunk, "specularTexture") == 0)
3356 {
3357 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_specular->specular_texture);
3358 }
3359 else
3360 {
3361 i = cgltf_skip_json(tokens, i+1);
3362 }
3363
3364 if (i < 0)
3365 {
3366 return i;
3367 }
3368 }
3369
3370 return i;
3371}
3372
3373static int cgltf_parse_json_transmission(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_transmission* out_transmission)
3374{
3375 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3376 int size = tokens[i].size;
3377 ++i;
3378
3379 for (int j = 0; j < size; ++j)
3380 {
3381 CGLTF_CHECK_KEY(tokens[i]);
3382
3383 if (cgltf_json_strcmp(tokens+i, json_chunk, "transmissionFactor") == 0)
3384 {
3385 ++i;
3386 out_transmission->transmission_factor = cgltf_json_to_float(tokens + i, json_chunk);
3387 ++i;
3388 }
3389 else if (cgltf_json_strcmp(tokens+i, json_chunk, "transmissionTexture") == 0)
3390 {
3391 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_transmission->transmission_texture);
3392 }
3393 else
3394 {
3395 i = cgltf_skip_json(tokens, i+1);
3396 }
3397
3398 if (i < 0)
3399 {
3400 return i;
3401 }
3402 }
3403
3404 return i;
3405}
3406
3407static int cgltf_parse_json_image(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_image* out_image)
3408{
3409 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3410
3411 int size = tokens[i].size;
3412 ++i;
3413
3414 for (int j = 0; j < size; ++j)
3415 {
3416 CGLTF_CHECK_KEY(tokens[i]);
3417
3418 if (cgltf_json_strcmp(tokens + i, json_chunk, "uri") == 0)
3419 {
3420 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_image->uri);
3421 }
3422 else if (cgltf_json_strcmp(tokens+i, json_chunk, "bufferView") == 0)
3423 {
3424 ++i;
3425 out_image->buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk));
3426 ++i;
3427 }
3428 else if (cgltf_json_strcmp(tokens + i, json_chunk, "mimeType") == 0)
3429 {
3430 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_image->mime_type);
3431 }
3432 else if (cgltf_json_strcmp(tokens + i, json_chunk, "name") == 0)
3433 {
3434 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_image->name);
3435 }
3436 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
3437 {
3438 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_image->extras);
3439 }
3440 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
3441 {
3442 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_image->extensions_count, &out_image->extensions);
3443 }
3444 else
3445 {
3446 i = cgltf_skip_json(tokens, i + 1);
3447 }
3448
3449 if (i < 0)
3450 {
3451 return i;
3452 }
3453 }
3454
3455 return i;
3456}
3457
3458static int cgltf_parse_json_sampler(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_sampler* out_sampler)
3459{
3460 (void)options;
3461 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3462
3463 out_sampler->wrap_s = 10497;
3464 out_sampler->wrap_t = 10497;
3465
3466 int size = tokens[i].size;
3467 ++i;
3468
3469 for (int j = 0; j < size; ++j)
3470 {
3471 CGLTF_CHECK_KEY(tokens[i]);
3472
3473 if (cgltf_json_strcmp(tokens + i, json_chunk, "magFilter") == 0)
3474 {
3475 ++i;
3476 out_sampler->mag_filter
3477 = cgltf_json_to_int(tokens + i, json_chunk);
3478 ++i;
3479 }
3480 else if (cgltf_json_strcmp(tokens + i, json_chunk, "minFilter") == 0)
3481 {
3482 ++i;
3483 out_sampler->min_filter
3484 = cgltf_json_to_int(tokens + i, json_chunk);
3485 ++i;
3486 }
3487 else if (cgltf_json_strcmp(tokens + i, json_chunk, "wrapS") == 0)
3488 {
3489 ++i;
3490 out_sampler->wrap_s
3491 = cgltf_json_to_int(tokens + i, json_chunk);
3492 ++i;
3493 }
3494 else if (cgltf_json_strcmp(tokens + i, json_chunk, "wrapT") == 0)
3495 {
3496 ++i;
3497 out_sampler->wrap_t
3498 = cgltf_json_to_int(tokens + i, json_chunk);
3499 ++i;
3500 }
3501 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
3502 {
3503 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sampler->extras);
3504 }
3505 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
3506 {
3507 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sampler->extensions_count, &out_sampler->extensions);
3508 }
3509 else
3510 {
3511 i = cgltf_skip_json(tokens, i + 1);
3512 }
3513
3514 if (i < 0)
3515 {
3516 return i;
3517 }
3518 }
3519
3520 return i;
3521}
3522
3523static int cgltf_parse_json_texture(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_texture* out_texture)
3524{
3525 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3526
3527 int size = tokens[i].size;
3528 ++i;
3529
3530 for (int j = 0; j < size; ++j)
3531 {
3532 CGLTF_CHECK_KEY(tokens[i]);
3533
3534 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
3535 {
3536 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_texture->name);
3537 }
3538 else if (cgltf_json_strcmp(tokens + i, json_chunk, "sampler") == 0)
3539 {
3540 ++i;
3541 out_texture->sampler = CGLTF_PTRINDEX(cgltf_sampler, cgltf_json_to_int(tokens + i, json_chunk));
3542 ++i;
3543 }
3544 else if (cgltf_json_strcmp(tokens + i, json_chunk, "source") == 0)
3545 {
3546 ++i;
3547 out_texture->image = CGLTF_PTRINDEX(cgltf_image, cgltf_json_to_int(tokens + i, json_chunk));
3548 ++i;
3549 }
3550 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
3551 {
3552 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_texture->extras);
3553 }
3554 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
3555 {
3556 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_texture->extensions_count, &out_texture->extensions);
3557 }
3558 else
3559 {
3560 i = cgltf_skip_json(tokens, i + 1);
3561 }
3562
3563 if (i < 0)
3564 {
3565 return i;
3566 }
3567 }
3568
3569 return i;
3570}
3571
3572static int cgltf_parse_json_material(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_material* out_material)
3573{
3574 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3575
3576 cgltf_fill_float_array(out_material->pbr_metallic_roughness.base_color_factor, 4, 1.0f);
3577 out_material->pbr_metallic_roughness.metallic_factor = 1.0f;
3578 out_material->pbr_metallic_roughness.roughness_factor = 1.0f;
3579
3580 cgltf_fill_float_array(out_material->pbr_specular_glossiness.diffuse_factor, 4, 1.0f);
3581 cgltf_fill_float_array(out_material->pbr_specular_glossiness.specular_factor, 3, 1.0f);
3582 out_material->pbr_specular_glossiness.glossiness_factor = 1.0f;
3583
3584 out_material->alpha_cutoff = 0.5f;
3585
3586 int size = tokens[i].size;
3587 ++i;
3588
3589 for (int j = 0; j < size; ++j)
3590 {
3591 CGLTF_CHECK_KEY(tokens[i]);
3592
3593 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
3594 {
3595 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_material->name);
3596 }
3597 else if (cgltf_json_strcmp(tokens+i, json_chunk, "pbrMetallicRoughness") == 0)
3598 {
3599 out_material->has_pbr_metallic_roughness = 1;
3600 i = cgltf_parse_json_pbr_metallic_roughness(options, tokens, i + 1, json_chunk, &out_material->pbr_metallic_roughness);
3601 }
3602 else if (cgltf_json_strcmp(tokens+i, json_chunk, "emissiveFactor") == 0)
3603 {
3604 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_material->emissive_factor, 3);
3605 }
3606 else if (cgltf_json_strcmp(tokens + i, json_chunk, "normalTexture") == 0)
3607 {
3608 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk,
3609 &out_material->normal_texture);
3610 }
3611 else if (cgltf_json_strcmp(tokens + i, json_chunk, "occlusionTexture") == 0)
3612 {
3613 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk,
3614 &out_material->occlusion_texture);
3615 }
3616 else if (cgltf_json_strcmp(tokens + i, json_chunk, "emissiveTexture") == 0)
3617 {
3618 i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk,
3619 &out_material->emissive_texture);
3620 }
3621 else if (cgltf_json_strcmp(tokens + i, json_chunk, "alphaMode") == 0)
3622 {
3623 ++i;
3624 if (cgltf_json_strcmp(tokens + i, json_chunk, "OPAQUE") == 0)
3625 {
3626 out_material->alpha_mode = cgltf_alpha_mode_opaque;
3627 }
3628 else if (cgltf_json_strcmp(tokens + i, json_chunk, "MASK") == 0)
3629 {
3630 out_material->alpha_mode = cgltf_alpha_mode_mask;
3631 }
3632 else if (cgltf_json_strcmp(tokens + i, json_chunk, "BLEND") == 0)
3633 {
3634 out_material->alpha_mode = cgltf_alpha_mode_blend;
3635 }
3636 ++i;
3637 }
3638 else if (cgltf_json_strcmp(tokens + i, json_chunk, "alphaCutoff") == 0)
3639 {
3640 ++i;
3641 out_material->alpha_cutoff = cgltf_json_to_float(tokens + i, json_chunk);
3642 ++i;
3643 }
3644 else if (cgltf_json_strcmp(tokens + i, json_chunk, "doubleSided") == 0)
3645 {
3646 ++i;
3647 out_material->double_sided =
3648 cgltf_json_to_bool(tokens + i, json_chunk);
3649 ++i;
3650 }
3651 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
3652 {
3653 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_material->extras);
3654 }
3655 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
3656 {
3657 ++i;
3658
3659 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3660 if(out_material->extensions)
3661 {
3662 return CGLTF_ERROR_JSON;
3663 }
3664
3665 int extensions_size = tokens[i].size;
3666 ++i;
3667 out_material->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size);
3668 out_material->extensions_count= 0;
3669
3670 if (!out_material->extensions)
3671 {
3672 return CGLTF_ERROR_NOMEM;
3673 }
3674
3675 for (int k = 0; k < extensions_size; ++k)
3676 {
3677 CGLTF_CHECK_KEY(tokens[i]);
3678
3679 if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_pbrSpecularGlossiness") == 0)
3680 {
3681 out_material->has_pbr_specular_glossiness = 1;
3682 i = cgltf_parse_json_pbr_specular_glossiness(options, tokens, i + 1, json_chunk, &out_material->pbr_specular_glossiness);
3683 }
3684 else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_unlit") == 0)
3685 {
3686 out_material->unlit = 1;
3687 i = cgltf_skip_json(tokens, i+1);
3688 }
3689 else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_clearcoat") == 0)
3690 {
3691 out_material->has_clearcoat = 1;
3692 i = cgltf_parse_json_clearcoat(options, tokens, i + 1, json_chunk, &out_material->clearcoat);
3693 }
3694 else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_ior") == 0)
3695 {
3696 out_material->has_ior = 1;
3697 i = cgltf_parse_json_ior(tokens, i + 1, json_chunk, &out_material->ior);
3698 }
3699 else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_specular") == 0)
3700 {
3701 out_material->has_specular = 1;
3702 i = cgltf_parse_json_specular(options, tokens, i + 1, json_chunk, &out_material->specular);
3703 }
3704 else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_transmission") == 0)
3705 {
3706 out_material->has_transmission = 1;
3707 i = cgltf_parse_json_transmission(options, tokens, i + 1, json_chunk, &out_material->transmission);
3708 }
3709 else
3710 {
3711 i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_material->extensions[out_material->extensions_count++]));
3712 }
3713
3714 if (i < 0)
3715 {
3716 return i;
3717 }
3718 }
3719 }
3720 else
3721 {
3722 i = cgltf_skip_json(tokens, i+1);
3723 }
3724
3725 if (i < 0)
3726 {
3727 return i;
3728 }
3729 }
3730
3731 return i;
3732}
3733
3734static int cgltf_parse_json_accessors(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
3735{
3736 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_accessor), (void**)&out_data->accessors, &out_data->accessors_count);
3737 if (i < 0)
3738 {
3739 return i;
3740 }
3741
3742 for (cgltf_size j = 0; j < out_data->accessors_count; ++j)
3743 {
3744 i = cgltf_parse_json_accessor(options, tokens, i, json_chunk, &out_data->accessors[j]);
3745 if (i < 0)
3746 {
3747 return i;
3748 }
3749 }
3750 return i;
3751}
3752
3753static int cgltf_parse_json_materials(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
3754{
3755 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_material), (void**)&out_data->materials, &out_data->materials_count);
3756 if (i < 0)
3757 {
3758 return i;
3759 }
3760
3761 for (cgltf_size j = 0; j < out_data->materials_count; ++j)
3762 {
3763 i = cgltf_parse_json_material(options, tokens, i, json_chunk, &out_data->materials[j]);
3764 if (i < 0)
3765 {
3766 return i;
3767 }
3768 }
3769 return i;
3770}
3771
3772static int cgltf_parse_json_images(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
3773{
3774 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_image), (void**)&out_data->images, &out_data->images_count);
3775 if (i < 0)
3776 {
3777 return i;
3778 }
3779
3780 for (cgltf_size j = 0; j < out_data->images_count; ++j)
3781 {
3782 i = cgltf_parse_json_image(options, tokens, i, json_chunk, &out_data->images[j]);
3783 if (i < 0)
3784 {
3785 return i;
3786 }
3787 }
3788 return i;
3789}
3790
3791static int cgltf_parse_json_textures(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
3792{
3793 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_texture), (void**)&out_data->textures, &out_data->textures_count);
3794 if (i < 0)
3795 {
3796 return i;
3797 }
3798
3799 for (cgltf_size j = 0; j < out_data->textures_count; ++j)
3800 {
3801 i = cgltf_parse_json_texture(options, tokens, i, json_chunk, &out_data->textures[j]);
3802 if (i < 0)
3803 {
3804 return i;
3805 }
3806 }
3807 return i;
3808}
3809
3810static int cgltf_parse_json_samplers(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
3811{
3812 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_sampler), (void**)&out_data->samplers, &out_data->samplers_count);
3813 if (i < 0)
3814 {
3815 return i;
3816 }
3817
3818 for (cgltf_size j = 0; j < out_data->samplers_count; ++j)
3819 {
3820 i = cgltf_parse_json_sampler(options, tokens, i, json_chunk, &out_data->samplers[j]);
3821 if (i < 0)
3822 {
3823 return i;
3824 }
3825 }
3826 return i;
3827}
3828
3829static int cgltf_parse_json_buffer_view(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_buffer_view* out_buffer_view)
3830{
3831 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3832
3833 int size = tokens[i].size;
3834 ++i;
3835
3836 for (int j = 0; j < size; ++j)
3837 {
3838 CGLTF_CHECK_KEY(tokens[i]);
3839
3840 if (cgltf_json_strcmp(tokens+i, json_chunk, "buffer") == 0)
3841 {
3842 ++i;
3843 out_buffer_view->buffer = CGLTF_PTRINDEX(cgltf_buffer, cgltf_json_to_int(tokens + i, json_chunk));
3844 ++i;
3845 }
3846 else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0)
3847 {
3848 ++i;
3849 out_buffer_view->offset =
3850 cgltf_json_to_int(tokens+i, json_chunk);
3851 ++i;
3852 }
3853 else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteLength") == 0)
3854 {
3855 ++i;
3856 out_buffer_view->size =
3857 cgltf_json_to_int(tokens+i, json_chunk);
3858 ++i;
3859 }
3860 else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteStride") == 0)
3861 {
3862 ++i;
3863 out_buffer_view->stride =
3864 cgltf_json_to_int(tokens+i, json_chunk);
3865 ++i;
3866 }
3867 else if (cgltf_json_strcmp(tokens+i, json_chunk, "target") == 0)
3868 {
3869 ++i;
3870 int type = cgltf_json_to_int(tokens+i, json_chunk);
3871 switch (type)
3872 {
3873 case 34962:
3874 type = cgltf_buffer_view_type_vertices;
3875 break;
3876 case 34963:
3877 type = cgltf_buffer_view_type_indices;
3878 break;
3879 default:
3880 type = cgltf_buffer_view_type_invalid;
3881 break;
3882 }
3883 out_buffer_view->type = (cgltf_buffer_view_type)type;
3884 ++i;
3885 }
3886 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
3887 {
3888 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_buffer_view->extras);
3889 }
3890 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
3891 {
3892 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_buffer_view->extensions_count, &out_buffer_view->extensions);
3893 }
3894 else
3895 {
3896 i = cgltf_skip_json(tokens, i+1);
3897 }
3898
3899 if (i < 0)
3900 {
3901 return i;
3902 }
3903 }
3904
3905 return i;
3906}
3907
3908static int cgltf_parse_json_buffer_views(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
3909{
3910 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_buffer_view), (void**)&out_data->buffer_views, &out_data->buffer_views_count);
3911 if (i < 0)
3912 {
3913 return i;
3914 }
3915
3916 for (cgltf_size j = 0; j < out_data->buffer_views_count; ++j)
3917 {
3918 i = cgltf_parse_json_buffer_view(options, tokens, i, json_chunk, &out_data->buffer_views[j]);
3919 if (i < 0)
3920 {
3921 return i;
3922 }
3923 }
3924 return i;
3925}
3926
3927static int cgltf_parse_json_buffer(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_buffer* out_buffer)
3928{
3929 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3930
3931 int size = tokens[i].size;
3932 ++i;
3933
3934 for (int j = 0; j < size; ++j)
3935 {
3936 CGLTF_CHECK_KEY(tokens[i]);
3937
3938 if (cgltf_json_strcmp(tokens+i, json_chunk, "byteLength") == 0)
3939 {
3940 ++i;
3941 out_buffer->size =
3942 cgltf_json_to_int(tokens+i, json_chunk);
3943 ++i;
3944 }
3945 else if (cgltf_json_strcmp(tokens+i, json_chunk, "uri") == 0)
3946 {
3947 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_buffer->uri);
3948 }
3949 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
3950 {
3951 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_buffer->extras);
3952 }
3953 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
3954 {
3955 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_buffer->extensions_count, &out_buffer->extensions);
3956 }
3957 else
3958 {
3959 i = cgltf_skip_json(tokens, i+1);
3960 }
3961
3962 if (i < 0)
3963 {
3964 return i;
3965 }
3966 }
3967
3968 return i;
3969}
3970
3971static int cgltf_parse_json_buffers(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
3972{
3973 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_buffer), (void**)&out_data->buffers, &out_data->buffers_count);
3974 if (i < 0)
3975 {
3976 return i;
3977 }
3978
3979 for (cgltf_size j = 0; j < out_data->buffers_count; ++j)
3980 {
3981 i = cgltf_parse_json_buffer(options, tokens, i, json_chunk, &out_data->buffers[j]);
3982 if (i < 0)
3983 {
3984 return i;
3985 }
3986 }
3987 return i;
3988}
3989
3990static int cgltf_parse_json_skin(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_skin* out_skin)
3991{
3992 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
3993
3994 int size = tokens[i].size;
3995 ++i;
3996
3997 for (int j = 0; j < size; ++j)
3998 {
3999 CGLTF_CHECK_KEY(tokens[i]);
4000
4001 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
4002 {
4003 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_skin->name);
4004 }
4005 else if (cgltf_json_strcmp(tokens+i, json_chunk, "joints") == 0)
4006 {
4007 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_node*), (void**)&out_skin->joints, &out_skin->joints_count);
4008 if (i < 0)
4009 {
4010 return i;
4011 }
4012
4013 for (cgltf_size k = 0; k < out_skin->joints_count; ++k)
4014 {
4015 out_skin->joints[k] = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk));
4016 ++i;
4017 }
4018 }
4019 else if (cgltf_json_strcmp(tokens+i, json_chunk, "skeleton") == 0)
4020 {
4021 ++i;
4022 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE);
4023 out_skin->skeleton = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk));
4024 ++i;
4025 }
4026 else if (cgltf_json_strcmp(tokens+i, json_chunk, "inverseBindMatrices") == 0)
4027 {
4028 ++i;
4029 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE);
4030 out_skin->inverse_bind_matrices = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk));
4031 ++i;
4032 }
4033 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4034 {
4035 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_skin->extras);
4036 }
4037 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
4038 {
4039 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_skin->extensions_count, &out_skin->extensions);
4040 }
4041 else
4042 {
4043 i = cgltf_skip_json(tokens, i+1);
4044 }
4045
4046 if (i < 0)
4047 {
4048 return i;
4049 }
4050 }
4051
4052 return i;
4053}
4054
4055static int cgltf_parse_json_skins(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
4056{
4057 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_skin), (void**)&out_data->skins, &out_data->skins_count);
4058 if (i < 0)
4059 {
4060 return i;
4061 }
4062
4063 for (cgltf_size j = 0; j < out_data->skins_count; ++j)
4064 {
4065 i = cgltf_parse_json_skin(options, tokens, i, json_chunk, &out_data->skins[j]);
4066 if (i < 0)
4067 {
4068 return i;
4069 }
4070 }
4071 return i;
4072}
4073
4074static int cgltf_parse_json_camera(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_camera* out_camera)
4075{
4076 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4077
4078 int size = tokens[i].size;
4079 ++i;
4080
4081 for (int j = 0; j < size; ++j)
4082 {
4083 CGLTF_CHECK_KEY(tokens[i]);
4084
4085 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
4086 {
4087 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_camera->name);
4088 }
4089 else if (cgltf_json_strcmp(tokens+i, json_chunk, "type") == 0)
4090 {
4091 ++i;
4092 if (cgltf_json_strcmp(tokens + i, json_chunk, "perspective") == 0)
4093 {
4094 out_camera->type = cgltf_camera_type_perspective;
4095 }
4096 else if (cgltf_json_strcmp(tokens + i, json_chunk, "orthographic") == 0)
4097 {
4098 out_camera->type = cgltf_camera_type_orthographic;
4099 }
4100 ++i;
4101 }
4102 else if (cgltf_json_strcmp(tokens+i, json_chunk, "perspective") == 0)
4103 {
4104 ++i;
4105
4106 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4107
4108 int data_size = tokens[i].size;
4109 ++i;
4110
4111 out_camera->type = cgltf_camera_type_perspective;
4112
4113 for (int k = 0; k < data_size; ++k)
4114 {
4115 CGLTF_CHECK_KEY(tokens[i]);
4116
4117 if (cgltf_json_strcmp(tokens+i, json_chunk, "aspectRatio") == 0)
4118 {
4119 ++i;
4120 out_camera->data.perspective.aspect_ratio = cgltf_json_to_float(tokens + i, json_chunk);
4121 ++i;
4122 }
4123 else if (cgltf_json_strcmp(tokens+i, json_chunk, "yfov") == 0)
4124 {
4125 ++i;
4126 out_camera->data.perspective.yfov = cgltf_json_to_float(tokens + i, json_chunk);
4127 ++i;
4128 }
4129 else if (cgltf_json_strcmp(tokens+i, json_chunk, "zfar") == 0)
4130 {
4131 ++i;
4132 out_camera->data.perspective.zfar = cgltf_json_to_float(tokens + i, json_chunk);
4133 ++i;
4134 }
4135 else if (cgltf_json_strcmp(tokens+i, json_chunk, "znear") == 0)
4136 {
4137 ++i;
4138 out_camera->data.perspective.znear = cgltf_json_to_float(tokens + i, json_chunk);
4139 ++i;
4140 }
4141 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4142 {
4143 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_camera->data.perspective.extras);
4144 }
4145 else
4146 {
4147 i = cgltf_skip_json(tokens, i+1);
4148 }
4149
4150 if (i < 0)
4151 {
4152 return i;
4153 }
4154 }
4155 }
4156 else if (cgltf_json_strcmp(tokens+i, json_chunk, "orthographic") == 0)
4157 {
4158 ++i;
4159
4160 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4161
4162 int data_size = tokens[i].size;
4163 ++i;
4164
4165 out_camera->type = cgltf_camera_type_orthographic;
4166
4167 for (int k = 0; k < data_size; ++k)
4168 {
4169 CGLTF_CHECK_KEY(tokens[i]);
4170
4171 if (cgltf_json_strcmp(tokens+i, json_chunk, "xmag") == 0)
4172 {
4173 ++i;
4174 out_camera->data.orthographic.xmag = cgltf_json_to_float(tokens + i, json_chunk);
4175 ++i;
4176 }
4177 else if (cgltf_json_strcmp(tokens+i, json_chunk, "ymag") == 0)
4178 {
4179 ++i;
4180 out_camera->data.orthographic.ymag = cgltf_json_to_float(tokens + i, json_chunk);
4181 ++i;
4182 }
4183 else if (cgltf_json_strcmp(tokens+i, json_chunk, "zfar") == 0)
4184 {
4185 ++i;
4186 out_camera->data.orthographic.zfar = cgltf_json_to_float(tokens + i, json_chunk);
4187 ++i;
4188 }
4189 else if (cgltf_json_strcmp(tokens+i, json_chunk, "znear") == 0)
4190 {
4191 ++i;
4192 out_camera->data.orthographic.znear = cgltf_json_to_float(tokens + i, json_chunk);
4193 ++i;
4194 }
4195 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4196 {
4197 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_camera->data.orthographic.extras);
4198 }
4199 else
4200 {
4201 i = cgltf_skip_json(tokens, i+1);
4202 }
4203
4204 if (i < 0)
4205 {
4206 return i;
4207 }
4208 }
4209 }
4210 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4211 {
4212 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_camera->extras);
4213 }
4214 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
4215 {
4216 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_camera->extensions_count, &out_camera->extensions);
4217 }
4218 else
4219 {
4220 i = cgltf_skip_json(tokens, i+1);
4221 }
4222
4223 if (i < 0)
4224 {
4225 return i;
4226 }
4227 }
4228
4229 return i;
4230}
4231
4232static int cgltf_parse_json_cameras(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
4233{
4234 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_camera), (void**)&out_data->cameras, &out_data->cameras_count);
4235 if (i < 0)
4236 {
4237 return i;
4238 }
4239
4240 for (cgltf_size j = 0; j < out_data->cameras_count; ++j)
4241 {
4242 i = cgltf_parse_json_camera(options, tokens, i, json_chunk, &out_data->cameras[j]);
4243 if (i < 0)
4244 {
4245 return i;
4246 }
4247 }
4248 return i;
4249}
4250
4251static int cgltf_parse_json_light(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_light* out_light)
4252{
4253 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4254
4255 int size = tokens[i].size;
4256 ++i;
4257
4258 for (int j = 0; j < size; ++j)
4259 {
4260 CGLTF_CHECK_KEY(tokens[i]);
4261
4262 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
4263 {
4264 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_light->name);
4265 }
4266 else if (cgltf_json_strcmp(tokens + i, json_chunk, "color") == 0)
4267 {
4268 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_light->color, 3);
4269 }
4270 else if (cgltf_json_strcmp(tokens + i, json_chunk, "intensity") == 0)
4271 {
4272 ++i;
4273 out_light->intensity = cgltf_json_to_float(tokens + i, json_chunk);
4274 ++i;
4275 }
4276 else if (cgltf_json_strcmp(tokens+i, json_chunk, "type") == 0)
4277 {
4278 ++i;
4279 if (cgltf_json_strcmp(tokens + i, json_chunk, "directional") == 0)
4280 {
4281 out_light->type = cgltf_light_type_directional;
4282 }
4283 else if (cgltf_json_strcmp(tokens + i, json_chunk, "point") == 0)
4284 {
4285 out_light->type = cgltf_light_type_point;
4286 }
4287 else if (cgltf_json_strcmp(tokens + i, json_chunk, "spot") == 0)
4288 {
4289 out_light->type = cgltf_light_type_spot;
4290 }
4291 ++i;
4292 }
4293 else if (cgltf_json_strcmp(tokens + i, json_chunk, "range") == 0)
4294 {
4295 ++i;
4296 out_light->range = cgltf_json_to_float(tokens + i, json_chunk);
4297 ++i;
4298 }
4299 else if (cgltf_json_strcmp(tokens+i, json_chunk, "spot") == 0)
4300 {
4301 ++i;
4302
4303 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4304
4305 int data_size = tokens[i].size;
4306 ++i;
4307
4308 for (int k = 0; k < data_size; ++k)
4309 {
4310 CGLTF_CHECK_KEY(tokens[i]);
4311
4312 if (cgltf_json_strcmp(tokens+i, json_chunk, "innerConeAngle") == 0)
4313 {
4314 ++i;
4315 out_light->spot_inner_cone_angle = cgltf_json_to_float(tokens + i, json_chunk);
4316 ++i;
4317 }
4318 else if (cgltf_json_strcmp(tokens+i, json_chunk, "outerConeAngle") == 0)
4319 {
4320 ++i;
4321 out_light->spot_outer_cone_angle = cgltf_json_to_float(tokens + i, json_chunk);
4322 ++i;
4323 }
4324 else
4325 {
4326 i = cgltf_skip_json(tokens, i+1);
4327 }
4328
4329 if (i < 0)
4330 {
4331 return i;
4332 }
4333 }
4334 }
4335 else
4336 {
4337 i = cgltf_skip_json(tokens, i+1);
4338 }
4339
4340 if (i < 0)
4341 {
4342 return i;
4343 }
4344 }
4345
4346 return i;
4347}
4348
4349static int cgltf_parse_json_lights(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
4350{
4351 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_light), (void**)&out_data->lights, &out_data->lights_count);
4352 if (i < 0)
4353 {
4354 return i;
4355 }
4356
4357 for (cgltf_size j = 0; j < out_data->lights_count; ++j)
4358 {
4359 i = cgltf_parse_json_light(options, tokens, i, json_chunk, &out_data->lights[j]);
4360 if (i < 0)
4361 {
4362 return i;
4363 }
4364 }
4365 return i;
4366}
4367
4368static int cgltf_parse_json_node(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_node* out_node)
4369{
4370 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4371
4372 out_node->rotation[3] = 1.0f;
4373 out_node->scale[0] = 1.0f;
4374 out_node->scale[1] = 1.0f;
4375 out_node->scale[2] = 1.0f;
4376 out_node->matrix[0] = 1.0f;
4377 out_node->matrix[5] = 1.0f;
4378 out_node->matrix[10] = 1.0f;
4379 out_node->matrix[15] = 1.0f;
4380
4381 int size = tokens[i].size;
4382 ++i;
4383
4384 for (int j = 0; j < size; ++j)
4385 {
4386 CGLTF_CHECK_KEY(tokens[i]);
4387
4388 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
4389 {
4390 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_node->name);
4391 }
4392 else if (cgltf_json_strcmp(tokens+i, json_chunk, "children") == 0)
4393 {
4394 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_node*), (void**)&out_node->children, &out_node->children_count);
4395 if (i < 0)
4396 {
4397 return i;
4398 }
4399
4400 for (cgltf_size k = 0; k < out_node->children_count; ++k)
4401 {
4402 out_node->children[k] = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk));
4403 ++i;
4404 }
4405 }
4406 else if (cgltf_json_strcmp(tokens+i, json_chunk, "mesh") == 0)
4407 {
4408 ++i;
4409 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE);
4410 out_node->mesh = CGLTF_PTRINDEX(cgltf_mesh, cgltf_json_to_int(tokens + i, json_chunk));
4411 ++i;
4412 }
4413 else if (cgltf_json_strcmp(tokens+i, json_chunk, "skin") == 0)
4414 {
4415 ++i;
4416 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE);
4417 out_node->skin = CGLTF_PTRINDEX(cgltf_skin, cgltf_json_to_int(tokens + i, json_chunk));
4418 ++i;
4419 }
4420 else if (cgltf_json_strcmp(tokens+i, json_chunk, "camera") == 0)
4421 {
4422 ++i;
4423 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE);
4424 out_node->camera = CGLTF_PTRINDEX(cgltf_camera, cgltf_json_to_int(tokens + i, json_chunk));
4425 ++i;
4426 }
4427 else if (cgltf_json_strcmp(tokens+i, json_chunk, "translation") == 0)
4428 {
4429 out_node->has_translation = 1;
4430 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_node->translation, 3);
4431 }
4432 else if (cgltf_json_strcmp(tokens+i, json_chunk, "rotation") == 0)
4433 {
4434 out_node->has_rotation = 1;
4435 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_node->rotation, 4);
4436 }
4437 else if (cgltf_json_strcmp(tokens+i, json_chunk, "scale") == 0)
4438 {
4439 out_node->has_scale = 1;
4440 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_node->scale, 3);
4441 }
4442 else if (cgltf_json_strcmp(tokens+i, json_chunk, "matrix") == 0)
4443 {
4444 out_node->has_matrix = 1;
4445 i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_node->matrix, 16);
4446 }
4447 else if (cgltf_json_strcmp(tokens + i, json_chunk, "weights") == 0)
4448 {
4449 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_float), (void**)&out_node->weights, &out_node->weights_count);
4450 if (i < 0)
4451 {
4452 return i;
4453 }
4454
4455 i = cgltf_parse_json_float_array(tokens, i - 1, json_chunk, out_node->weights, (int)out_node->weights_count);
4456 }
4457 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4458 {
4459 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_node->extras);
4460 }
4461 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
4462 {
4463 ++i;
4464
4465 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4466 if(out_node->extensions)
4467 {
4468 return CGLTF_ERROR_JSON;
4469 }
4470
4471 int extensions_size = tokens[i].size;
4472 out_node->extensions_count= 0;
4473 out_node->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size);
4474
4475 if (!out_node->extensions)
4476 {
4477 return CGLTF_ERROR_NOMEM;
4478 }
4479
4480 ++i;
4481
4482 for (int k = 0; k < extensions_size; ++k)
4483 {
4484 CGLTF_CHECK_KEY(tokens[i]);
4485
4486 if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_lights_punctual") == 0)
4487 {
4488 ++i;
4489
4490 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4491
4492 int data_size = tokens[i].size;
4493 ++i;
4494
4495 for (int m = 0; m < data_size; ++m)
4496 {
4497 CGLTF_CHECK_KEY(tokens[i]);
4498
4499 if (cgltf_json_strcmp(tokens + i, json_chunk, "light") == 0)
4500 {
4501 ++i;
4502 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE);
4503 out_node->light = CGLTF_PTRINDEX(cgltf_light, cgltf_json_to_int(tokens + i, json_chunk));
4504 ++i;
4505 }
4506 else
4507 {
4508 i = cgltf_skip_json(tokens, i + 1);
4509 }
4510
4511 if (i < 0)
4512 {
4513 return i;
4514 }
4515 }
4516 }
4517 else
4518 {
4519 i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_node->extensions[out_node->extensions_count++]));
4520 }
4521
4522 if (i < 0)
4523 {
4524 return i;
4525 }
4526 }
4527 }
4528 else
4529 {
4530 i = cgltf_skip_json(tokens, i+1);
4531 }
4532
4533 if (i < 0)
4534 {
4535 return i;
4536 }
4537 }
4538
4539 return i;
4540}
4541
4542static int cgltf_parse_json_nodes(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
4543{
4544 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_node), (void**)&out_data->nodes, &out_data->nodes_count);
4545 if (i < 0)
4546 {
4547 return i;
4548 }
4549
4550 for (cgltf_size j = 0; j < out_data->nodes_count; ++j)
4551 {
4552 i = cgltf_parse_json_node(options, tokens, i, json_chunk, &out_data->nodes[j]);
4553 if (i < 0)
4554 {
4555 return i;
4556 }
4557 }
4558 return i;
4559}
4560
4561static int cgltf_parse_json_scene(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_scene* out_scene)
4562{
4563 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4564
4565 int size = tokens[i].size;
4566 ++i;
4567
4568 for (int j = 0; j < size; ++j)
4569 {
4570 CGLTF_CHECK_KEY(tokens[i]);
4571
4572 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
4573 {
4574 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_scene->name);
4575 }
4576 else if (cgltf_json_strcmp(tokens+i, json_chunk, "nodes") == 0)
4577 {
4578 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_node*), (void**)&out_scene->nodes, &out_scene->nodes_count);
4579 if (i < 0)
4580 {
4581 return i;
4582 }
4583
4584 for (cgltf_size k = 0; k < out_scene->nodes_count; ++k)
4585 {
4586 out_scene->nodes[k] = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk));
4587 ++i;
4588 }
4589 }
4590 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4591 {
4592 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_scene->extras);
4593 }
4594 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
4595 {
4596 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_scene->extensions_count, &out_scene->extensions);
4597 }
4598 else
4599 {
4600 i = cgltf_skip_json(tokens, i+1);
4601 }
4602
4603 if (i < 0)
4604 {
4605 return i;
4606 }
4607 }
4608
4609 return i;
4610}
4611
4612static int cgltf_parse_json_scenes(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
4613{
4614 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_scene), (void**)&out_data->scenes, &out_data->scenes_count);
4615 if (i < 0)
4616 {
4617 return i;
4618 }
4619
4620 for (cgltf_size j = 0; j < out_data->scenes_count; ++j)
4621 {
4622 i = cgltf_parse_json_scene(options, tokens, i, json_chunk, &out_data->scenes[j]);
4623 if (i < 0)
4624 {
4625 return i;
4626 }
4627 }
4628 return i;
4629}
4630
4631static int cgltf_parse_json_animation_sampler(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_animation_sampler* out_sampler)
4632{
4633 (void)options;
4634 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4635
4636 int size = tokens[i].size;
4637 ++i;
4638
4639 for (int j = 0; j < size; ++j)
4640 {
4641 CGLTF_CHECK_KEY(tokens[i]);
4642
4643 if (cgltf_json_strcmp(tokens+i, json_chunk, "input") == 0)
4644 {
4645 ++i;
4646 out_sampler->input = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk));
4647 ++i;
4648 }
4649 else if (cgltf_json_strcmp(tokens+i, json_chunk, "output") == 0)
4650 {
4651 ++i;
4652 out_sampler->output = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk));
4653 ++i;
4654 }
4655 else if (cgltf_json_strcmp(tokens+i, json_chunk, "interpolation") == 0)
4656 {
4657 ++i;
4658 if (cgltf_json_strcmp(tokens + i, json_chunk, "LINEAR") == 0)
4659 {
4660 out_sampler->interpolation = cgltf_interpolation_type_linear;
4661 }
4662 else if (cgltf_json_strcmp(tokens + i, json_chunk, "STEP") == 0)
4663 {
4664 out_sampler->interpolation = cgltf_interpolation_type_step;
4665 }
4666 else if (cgltf_json_strcmp(tokens + i, json_chunk, "CUBICSPLINE") == 0)
4667 {
4668 out_sampler->interpolation = cgltf_interpolation_type_cubic_spline;
4669 }
4670 ++i;
4671 }
4672 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4673 {
4674 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sampler->extras);
4675 }
4676 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
4677 {
4678 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sampler->extensions_count, &out_sampler->extensions);
4679 }
4680 else
4681 {
4682 i = cgltf_skip_json(tokens, i+1);
4683 }
4684
4685 if (i < 0)
4686 {
4687 return i;
4688 }
4689 }
4690
4691 return i;
4692}
4693
4694static int cgltf_parse_json_animation_channel(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_animation_channel* out_channel)
4695{
4696 (void)options;
4697 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4698
4699 int size = tokens[i].size;
4700 ++i;
4701
4702 for (int j = 0; j < size; ++j)
4703 {
4704 CGLTF_CHECK_KEY(tokens[i]);
4705
4706 if (cgltf_json_strcmp(tokens+i, json_chunk, "sampler") == 0)
4707 {
4708 ++i;
4709 out_channel->sampler = CGLTF_PTRINDEX(cgltf_animation_sampler, cgltf_json_to_int(tokens + i, json_chunk));
4710 ++i;
4711 }
4712 else if (cgltf_json_strcmp(tokens+i, json_chunk, "target") == 0)
4713 {
4714 ++i;
4715
4716 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4717
4718 int target_size = tokens[i].size;
4719 ++i;
4720
4721 for (int k = 0; k < target_size; ++k)
4722 {
4723 CGLTF_CHECK_KEY(tokens[i]);
4724
4725 if (cgltf_json_strcmp(tokens+i, json_chunk, "node") == 0)
4726 {
4727 ++i;
4728 out_channel->target_node = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk));
4729 ++i;
4730 }
4731 else if (cgltf_json_strcmp(tokens+i, json_chunk, "path") == 0)
4732 {
4733 ++i;
4734 if (cgltf_json_strcmp(tokens+i, json_chunk, "translation") == 0)
4735 {
4736 out_channel->target_path = cgltf_animation_path_type_translation;
4737 }
4738 else if (cgltf_json_strcmp(tokens+i, json_chunk, "rotation") == 0)
4739 {
4740 out_channel->target_path = cgltf_animation_path_type_rotation;
4741 }
4742 else if (cgltf_json_strcmp(tokens+i, json_chunk, "scale") == 0)
4743 {
4744 out_channel->target_path = cgltf_animation_path_type_scale;
4745 }
4746 else if (cgltf_json_strcmp(tokens+i, json_chunk, "weights") == 0)
4747 {
4748 out_channel->target_path = cgltf_animation_path_type_weights;
4749 }
4750 ++i;
4751 }
4752 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4753 {
4754 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_channel->extras);
4755 }
4756 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
4757 {
4758 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_channel->extensions_count, &out_channel->extensions);
4759 }
4760 else
4761 {
4762 i = cgltf_skip_json(tokens, i+1);
4763 }
4764
4765 if (i < 0)
4766 {
4767 return i;
4768 }
4769 }
4770 }
4771 else
4772 {
4773 i = cgltf_skip_json(tokens, i+1);
4774 }
4775
4776 if (i < 0)
4777 {
4778 return i;
4779 }
4780 }
4781
4782 return i;
4783}
4784
4785static int cgltf_parse_json_animation(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_animation* out_animation)
4786{
4787 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4788
4789 int size = tokens[i].size;
4790 ++i;
4791
4792 for (int j = 0; j < size; ++j)
4793 {
4794 CGLTF_CHECK_KEY(tokens[i]);
4795
4796 if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0)
4797 {
4798 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_animation->name);
4799 }
4800 else if (cgltf_json_strcmp(tokens+i, json_chunk, "samplers") == 0)
4801 {
4802 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_animation_sampler), (void**)&out_animation->samplers, &out_animation->samplers_count);
4803 if (i < 0)
4804 {
4805 return i;
4806 }
4807
4808 for (cgltf_size k = 0; k < out_animation->samplers_count; ++k)
4809 {
4810 i = cgltf_parse_json_animation_sampler(options, tokens, i, json_chunk, &out_animation->samplers[k]);
4811 if (i < 0)
4812 {
4813 return i;
4814 }
4815 }
4816 }
4817 else if (cgltf_json_strcmp(tokens+i, json_chunk, "channels") == 0)
4818 {
4819 i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_animation_channel), (void**)&out_animation->channels, &out_animation->channels_count);
4820 if (i < 0)
4821 {
4822 return i;
4823 }
4824
4825 for (cgltf_size k = 0; k < out_animation->channels_count; ++k)
4826 {
4827 i = cgltf_parse_json_animation_channel(options, tokens, i, json_chunk, &out_animation->channels[k]);
4828 if (i < 0)
4829 {
4830 return i;
4831 }
4832 }
4833 }
4834 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4835 {
4836 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_animation->extras);
4837 }
4838 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
4839 {
4840 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_animation->extensions_count, &out_animation->extensions);
4841 }
4842 else
4843 {
4844 i = cgltf_skip_json(tokens, i+1);
4845 }
4846
4847 if (i < 0)
4848 {
4849 return i;
4850 }
4851 }
4852
4853 return i;
4854}
4855
4856static int cgltf_parse_json_animations(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
4857{
4858 i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_animation), (void**)&out_data->animations, &out_data->animations_count);
4859 if (i < 0)
4860 {
4861 return i;
4862 }
4863
4864 for (cgltf_size j = 0; j < out_data->animations_count; ++j)
4865 {
4866 i = cgltf_parse_json_animation(options, tokens, i, json_chunk, &out_data->animations[j]);
4867 if (i < 0)
4868 {
4869 return i;
4870 }
4871 }
4872 return i;
4873}
4874
4875static int cgltf_parse_json_asset(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_asset* out_asset)
4876{
4877 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4878
4879 int size = tokens[i].size;
4880 ++i;
4881
4882 for (int j = 0; j < size; ++j)
4883 {
4884 CGLTF_CHECK_KEY(tokens[i]);
4885
4886 if (cgltf_json_strcmp(tokens+i, json_chunk, "copyright") == 0)
4887 {
4888 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_asset->copyright);
4889 }
4890 else if (cgltf_json_strcmp(tokens+i, json_chunk, "generator") == 0)
4891 {
4892 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_asset->generator);
4893 }
4894 else if (cgltf_json_strcmp(tokens+i, json_chunk, "version") == 0)
4895 {
4896 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_asset->version);
4897 }
4898 else if (cgltf_json_strcmp(tokens+i, json_chunk, "minVersion") == 0)
4899 {
4900 i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_asset->min_version);
4901 }
4902 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0)
4903 {
4904 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_asset->extras);
4905 }
4906 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
4907 {
4908 i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_asset->extensions_count, &out_asset->extensions);
4909 }
4910 else
4911 {
4912 i = cgltf_skip_json(tokens, i+1);
4913 }
4914
4915 if (i < 0)
4916 {
4917 return i;
4918 }
4919 }
4920
4921 if (out_asset->version && CGLTF_ATOF(out_asset->version) < 2)
4922 {
4923 return CGLTF_ERROR_LEGACY;
4924 }
4925
4926 return i;
4927}
4928
4929cgltf_size cgltf_num_components(cgltf_type type) {
4930 switch (type)
4931 {
4932 case cgltf_type_vec2:
4933 return 2;
4934 case cgltf_type_vec3:
4935 return 3;
4936 case cgltf_type_vec4:
4937 return 4;
4938 case cgltf_type_mat2:
4939 return 4;
4940 case cgltf_type_mat3:
4941 return 9;
4942 case cgltf_type_mat4:
4943 return 16;
4944 case cgltf_type_invalid:
4945 case cgltf_type_scalar:
4946 default:
4947 return 1;
4948 }
4949}
4950
4951static cgltf_size cgltf_component_size(cgltf_component_type component_type) {
4952 switch (component_type)
4953 {
4954 case cgltf_component_type_r_8:
4955 case cgltf_component_type_r_8u:
4956 return 1;
4957 case cgltf_component_type_r_16:
4958 case cgltf_component_type_r_16u:
4959 return 2;
4960 case cgltf_component_type_r_32u:
4961 case cgltf_component_type_r_32f:
4962 return 4;
4963 case cgltf_component_type_invalid:
4964 default:
4965 return 0;
4966 }
4967}
4968
4969static cgltf_size cgltf_calc_size(cgltf_type type, cgltf_component_type component_type)
4970{
4971 cgltf_size component_size = cgltf_component_size(component_type);
4972 if (type == cgltf_type_mat2 && component_size == 1)
4973 {
4974 return 8 * component_size;
4975 }
4976 else if (type == cgltf_type_mat3 && (component_size == 1 || component_size == 2))
4977 {
4978 return 12 * component_size;
4979 }
4980 return component_size * cgltf_num_components(type);
4981}
4982
4983static int cgltf_fixup_pointers(cgltf_data* out_data);
4984
4985static int cgltf_parse_json_root(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data)
4986{
4987 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
4988
4989 int size = tokens[i].size;
4990 ++i;
4991
4992 for (int j = 0; j < size; ++j)
4993 {
4994 CGLTF_CHECK_KEY(tokens[i]);
4995
4996 if (cgltf_json_strcmp(tokens + i, json_chunk, "asset") == 0)
4997 {
4998 i = cgltf_parse_json_asset(options, tokens, i + 1, json_chunk, &out_data->asset);
4999 }
5000 else if (cgltf_json_strcmp(tokens + i, json_chunk, "meshes") == 0)
5001 {
5002 i = cgltf_parse_json_meshes(options, tokens, i + 1, json_chunk, out_data);
5003 }
5004 else if (cgltf_json_strcmp(tokens + i, json_chunk, "accessors") == 0)
5005 {
5006 i = cgltf_parse_json_accessors(options, tokens, i + 1, json_chunk, out_data);
5007 }
5008 else if (cgltf_json_strcmp(tokens + i, json_chunk, "bufferViews") == 0)
5009 {
5010 i = cgltf_parse_json_buffer_views(options, tokens, i + 1, json_chunk, out_data);
5011 }
5012 else if (cgltf_json_strcmp(tokens + i, json_chunk, "buffers") == 0)
5013 {
5014 i = cgltf_parse_json_buffers(options, tokens, i + 1, json_chunk, out_data);
5015 }
5016 else if (cgltf_json_strcmp(tokens + i, json_chunk, "materials") == 0)
5017 {
5018 i = cgltf_parse_json_materials(options, tokens, i + 1, json_chunk, out_data);
5019 }
5020 else if (cgltf_json_strcmp(tokens + i, json_chunk, "images") == 0)
5021 {
5022 i = cgltf_parse_json_images(options, tokens, i + 1, json_chunk, out_data);
5023 }
5024 else if (cgltf_json_strcmp(tokens + i, json_chunk, "textures") == 0)
5025 {
5026 i = cgltf_parse_json_textures(options, tokens, i + 1, json_chunk, out_data);
5027 }
5028 else if (cgltf_json_strcmp(tokens + i, json_chunk, "samplers") == 0)
5029 {
5030 i = cgltf_parse_json_samplers(options, tokens, i + 1, json_chunk, out_data);
5031 }
5032 else if (cgltf_json_strcmp(tokens + i, json_chunk, "skins") == 0)
5033 {
5034 i = cgltf_parse_json_skins(options, tokens, i + 1, json_chunk, out_data);
5035 }
5036 else if (cgltf_json_strcmp(tokens + i, json_chunk, "cameras") == 0)
5037 {
5038 i = cgltf_parse_json_cameras(options, tokens, i + 1, json_chunk, out_data);
5039 }
5040 else if (cgltf_json_strcmp(tokens + i, json_chunk, "nodes") == 0)
5041 {
5042 i = cgltf_parse_json_nodes(options, tokens, i + 1, json_chunk, out_data);
5043 }
5044 else if (cgltf_json_strcmp(tokens + i, json_chunk, "scenes") == 0)
5045 {
5046 i = cgltf_parse_json_scenes(options, tokens, i + 1, json_chunk, out_data);
5047 }
5048 else if (cgltf_json_strcmp(tokens + i, json_chunk, "scene") == 0)
5049 {
5050 ++i;
5051 out_data->scene = CGLTF_PTRINDEX(cgltf_scene, cgltf_json_to_int(tokens + i, json_chunk));
5052 ++i;
5053 }
5054 else if (cgltf_json_strcmp(tokens + i, json_chunk, "animations") == 0)
5055 {
5056 i = cgltf_parse_json_animations(options, tokens, i + 1, json_chunk, out_data);
5057 }
5058 else if (cgltf_json_strcmp(tokens+i, json_chunk, "extras") == 0)
5059 {
5060 i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_data->extras);
5061 }
5062 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0)
5063 {
5064 ++i;
5065
5066 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
5067 if(out_data->data_extensions)
5068 {
5069 return CGLTF_ERROR_JSON;
5070 }
5071
5072 int extensions_size = tokens[i].size;
5073 out_data->data_extensions_count = 0;
5074 out_data->data_extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size);
5075
5076 if (!out_data->data_extensions)
5077 {
5078 return CGLTF_ERROR_NOMEM;
5079 }
5080
5081 ++i;
5082
5083 for (int k = 0; k < extensions_size; ++k)
5084 {
5085 CGLTF_CHECK_KEY(tokens[i]);
5086
5087 if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_lights_punctual") == 0)
5088 {
5089 ++i;
5090
5091 CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT);
5092
5093 int data_size = tokens[i].size;
5094 ++i;
5095
5096 for (int m = 0; m < data_size; ++m)
5097 {
5098 CGLTF_CHECK_KEY(tokens[i]);
5099
5100 if (cgltf_json_strcmp(tokens + i, json_chunk, "lights") == 0)
5101 {
5102 i = cgltf_parse_json_lights(options, tokens, i + 1, json_chunk, out_data);
5103 }
5104 else
5105 {
5106 i = cgltf_skip_json(tokens, i + 1);
5107 }
5108
5109 if (i < 0)
5110 {
5111 return i;
5112 }
5113 }
5114 }
5115 else
5116 {
5117 i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_data->data_extensions[out_data->data_extensions_count++]));
5118 }
5119
5120 if (i < 0)
5121 {
5122 return i;
5123 }
5124 }
5125 }
5126 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensionsUsed") == 0)
5127 {
5128 i = cgltf_parse_json_string_array(options, tokens, i + 1, json_chunk, &out_data->extensions_used, &out_data->extensions_used_count);
5129 }
5130 else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensionsRequired") == 0)
5131 {
5132 i = cgltf_parse_json_string_array(options, tokens, i + 1, json_chunk, &out_data->extensions_required, &out_data->extensions_required_count);
5133 }
5134 else
5135 {
5136 i = cgltf_skip_json(tokens, i + 1);
5137 }
5138
5139 if (i < 0)
5140 {
5141 return i;
5142 }
5143 }
5144
5145 return i;
5146}
5147
5148cgltf_result cgltf_parse_json(cgltf_options* options, const uint8_t* json_chunk, cgltf_size size, cgltf_data** out_data)
5149{
5150 jsmn_parser parser = { 0, 0, 0 };
5151
5152 if (options->json_token_count == 0)
5153 {
5154 int token_count = jsmn_parse(&parser, (const char*)json_chunk, size, NULL, 0);
5155
5156 if (token_count <= 0)
5157 {
5158 return cgltf_result_invalid_json;
5159 }
5160
5161 options->json_token_count = token_count;
5162 }
5163
5164 jsmntok_t* tokens = (jsmntok_t*)options->memory.alloc(options->memory.user_data, sizeof(jsmntok_t) * (options->json_token_count + 1));
5165
5166 if (!tokens)
5167 {
5168 return cgltf_result_out_of_memory;
5169 }
5170
5171 jsmn_init(&parser);
5172
5173 int token_count = jsmn_parse(&parser, (const char*)json_chunk, size, tokens, options->json_token_count);
5174
5175 if (token_count <= 0)
5176 {
5177 options->memory.free(options->memory.user_data, tokens);
5178 return cgltf_result_invalid_json;
5179 }
5180
5181 // this makes sure that we always have an UNDEFINED token at the end of the stream
5182 // for invalid JSON inputs this makes sure we don't perform out of bound reads of token data
5183 tokens[token_count].type = JSMN_UNDEFINED;
5184
5185 cgltf_data* data = (cgltf_data*)options->memory.alloc(options->memory.user_data, sizeof(cgltf_data));
5186
5187 if (!data)
5188 {
5189 options->memory.free(options->memory.user_data, tokens);
5190 return cgltf_result_out_of_memory;
5191 }
5192
5193 memset(data, 0, sizeof(cgltf_data));
5194 data->memory = options->memory;
5195 data->file = options->file;
5196
5197 int i = cgltf_parse_json_root(options, tokens, 0, json_chunk, data);
5198
5199 options->memory.free(options->memory.user_data, tokens);
5200
5201 if (i < 0)
5202 {
5203 cgltf_free(data);
5204
5205 switch (i)
5206 {
5207 case CGLTF_ERROR_NOMEM: return cgltf_result_out_of_memory;
5208 case CGLTF_ERROR_LEGACY: return cgltf_result_legacy_gltf;
5209 default: return cgltf_result_invalid_gltf;
5210 }
5211 }
5212
5213 if (cgltf_fixup_pointers(data) < 0)
5214 {
5215 cgltf_free(data);
5216 return cgltf_result_invalid_gltf;
5217 }
5218
5219 data->json = (const char*)json_chunk;
5220 data->json_size = size;
5221
5222 *out_data = data;
5223
5224 return cgltf_result_success;
5225}
5226
5227static int cgltf_fixup_pointers(cgltf_data* data)
5228{
5229 for (cgltf_size i = 0; i < data->meshes_count; ++i)
5230 {
5231 for (cgltf_size j = 0; j < data->meshes[i].primitives_count; ++j)
5232 {
5233 CGLTF_PTRFIXUP(data->meshes[i].primitives[j].indices, data->accessors, data->accessors_count);
5234 CGLTF_PTRFIXUP(data->meshes[i].primitives[j].material, data->materials, data->materials_count);
5235
5236 for (cgltf_size k = 0; k < data->meshes[i].primitives[j].attributes_count; ++k)
5237 {
5238 CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].attributes[k].data, data->accessors, data->accessors_count);
5239 }
5240
5241 for (cgltf_size k = 0; k < data->meshes[i].primitives[j].targets_count; ++k)
5242 {
5243 for (cgltf_size m = 0; m < data->meshes[i].primitives[j].targets[k].attributes_count; ++m)
5244 {
5245 CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].targets[k].attributes[m].data, data->accessors, data->accessors_count);
5246 }
5247 }
5248
5249 if (data->meshes[i].primitives[j].has_draco_mesh_compression)
5250 {
5251 CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].draco_mesh_compression.buffer_view, data->buffer_views, data->buffer_views_count);
5252 for (cgltf_size m = 0; m < data->meshes[i].primitives[j].draco_mesh_compression.attributes_count; ++m)
5253 {
5254 CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].draco_mesh_compression.attributes[m].data, data->accessors, data->accessors_count);
5255 }
5256 }
5257 }
5258 }
5259
5260 for (cgltf_size i = 0; i < data->accessors_count; ++i)
5261 {
5262 CGLTF_PTRFIXUP(data->accessors[i].buffer_view, data->buffer_views, data->buffer_views_count);
5263
5264 if (data->accessors[i].is_sparse)
5265 {
5266 CGLTF_PTRFIXUP_REQ(data->accessors[i].sparse.indices_buffer_view, data->buffer_views, data->buffer_views_count);
5267 CGLTF_PTRFIXUP_REQ(data->accessors[i].sparse.values_buffer_view, data->buffer_views, data->buffer_views_count);
5268 }
5269
5270 if (data->accessors[i].buffer_view)
5271 {
5272 data->accessors[i].stride = data->accessors[i].buffer_view->stride;
5273 }
5274
5275 if (data->accessors[i].stride == 0)
5276 {
5277 data->accessors[i].stride = cgltf_calc_size(data->accessors[i].type, data->accessors[i].component_type);
5278 }
5279 }
5280
5281 for (cgltf_size i = 0; i < data->textures_count; ++i)
5282 {
5283 CGLTF_PTRFIXUP(data->textures[i].image, data->images, data->images_count);
5284 CGLTF_PTRFIXUP(data->textures[i].sampler, data->samplers, data->samplers_count);
5285 }
5286
5287 for (cgltf_size i = 0; i < data->images_count; ++i)
5288 {
5289 CGLTF_PTRFIXUP(data->images[i].buffer_view, data->buffer_views, data->buffer_views_count);
5290 }
5291
5292 for (cgltf_size i = 0; i < data->materials_count; ++i)
5293 {
5294 CGLTF_PTRFIXUP(data->materials[i].normal_texture.texture, data->textures, data->textures_count);
5295 CGLTF_PTRFIXUP(data->materials[i].emissive_texture.texture, data->textures, data->textures_count);
5296 CGLTF_PTRFIXUP(data->materials[i].occlusion_texture.texture, data->textures, data->textures_count);
5297
5298 CGLTF_PTRFIXUP(data->materials[i].pbr_metallic_roughness.base_color_texture.texture, data->textures, data->textures_count);
5299 CGLTF_PTRFIXUP(data->materials[i].pbr_metallic_roughness.metallic_roughness_texture.texture, data->textures, data->textures_count);
5300
5301 CGLTF_PTRFIXUP(data->materials[i].pbr_specular_glossiness.diffuse_texture.texture, data->textures, data->textures_count);
5302 CGLTF_PTRFIXUP(data->materials[i].pbr_specular_glossiness.specular_glossiness_texture.texture, data->textures, data->textures_count);
5303
5304 CGLTF_PTRFIXUP(data->materials[i].clearcoat.clearcoat_texture.texture, data->textures, data->textures_count);
5305 CGLTF_PTRFIXUP(data->materials[i].clearcoat.clearcoat_roughness_texture.texture, data->textures, data->textures_count);
5306 CGLTF_PTRFIXUP(data->materials[i].clearcoat.clearcoat_normal_texture.texture, data->textures, data->textures_count);
5307
5308 CGLTF_PTRFIXUP(data->materials[i].specular.specular_texture.texture, data->textures, data->textures_count);
5309
5310 CGLTF_PTRFIXUP(data->materials[i].transmission.transmission_texture.texture, data->textures, data->textures_count);
5311 }
5312
5313 for (cgltf_size i = 0; i < data->buffer_views_count; ++i)
5314 {
5315 CGLTF_PTRFIXUP_REQ(data->buffer_views[i].buffer, data->buffers, data->buffers_count);
5316 }
5317
5318 for (cgltf_size i = 0; i < data->skins_count; ++i)
5319 {
5320 for (cgltf_size j = 0; j < data->skins[i].joints_count; ++j)
5321 {
5322 CGLTF_PTRFIXUP_REQ(data->skins[i].joints[j], data->nodes, data->nodes_count);
5323 }
5324
5325 CGLTF_PTRFIXUP(data->skins[i].skeleton, data->nodes, data->nodes_count);
5326 CGLTF_PTRFIXUP(data->skins[i].inverse_bind_matrices, data->accessors, data->accessors_count);
5327 }
5328
5329 for (cgltf_size i = 0; i < data->nodes_count; ++i)
5330 {
5331 for (cgltf_size j = 0; j < data->nodes[i].children_count; ++j)
5332 {
5333 CGLTF_PTRFIXUP_REQ(data->nodes[i].children[j], data->nodes, data->nodes_count);
5334
5335 if (data->nodes[i].children[j]->parent)
5336 {
5337 return CGLTF_ERROR_JSON;
5338 }
5339
5340 data->nodes[i].children[j]->parent = &data->nodes[i];
5341 }
5342
5343 CGLTF_PTRFIXUP(data->nodes[i].mesh, data->meshes, data->meshes_count);
5344 CGLTF_PTRFIXUP(data->nodes[i].skin, data->skins, data->skins_count);
5345 CGLTF_PTRFIXUP(data->nodes[i].camera, data->cameras, data->cameras_count);
5346 CGLTF_PTRFIXUP(data->nodes[i].light, data->lights, data->lights_count);
5347 }
5348
5349 for (cgltf_size i = 0; i < data->scenes_count; ++i)
5350 {
5351 for (cgltf_size j = 0; j < data->scenes[i].nodes_count; ++j)
5352 {
5353 CGLTF_PTRFIXUP_REQ(data->scenes[i].nodes[j], data->nodes, data->nodes_count);
5354
5355 if (data->scenes[i].nodes[j]->parent)
5356 {
5357 return CGLTF_ERROR_JSON;
5358 }
5359 }
5360 }
5361
5362 CGLTF_PTRFIXUP(data->scene, data->scenes, data->scenes_count);
5363
5364 for (cgltf_size i = 0; i < data->animations_count; ++i)
5365 {
5366 for (cgltf_size j = 0; j < data->animations[i].samplers_count; ++j)
5367 {
5368 CGLTF_PTRFIXUP_REQ(data->animations[i].samplers[j].input, data->accessors, data->accessors_count);
5369 CGLTF_PTRFIXUP_REQ(data->animations[i].samplers[j].output, data->accessors, data->accessors_count);
5370 }
5371
5372 for (cgltf_size j = 0; j < data->animations[i].channels_count; ++j)
5373 {
5374 CGLTF_PTRFIXUP_REQ(data->animations[i].channels[j].sampler, data->animations[i].samplers, data->animations[i].samplers_count);
5375 CGLTF_PTRFIXUP(data->animations[i].channels[j].target_node, data->nodes, data->nodes_count);
5376 }
5377 }
5378
5379 return 0;
5380}
5381
5382/*
5383 * -- jsmn.c start --
5384 * Source: https://github.com/zserge/jsmn
5385 * License: MIT
5386 *
5387 * Copyright (c) 2010 Serge A. Zaitsev
5388
5389 * Permission is hereby granted, free of charge, to any person obtaining a copy
5390 * of this software and associated documentation files (the "Software"), to deal
5391 * in the Software without restriction, including without limitation the rights
5392 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
5393 * copies of the Software, and to permit persons to whom the Software is
5394 * furnished to do so, subject to the following conditions:
5395
5396 * The above copyright notice and this permission notice shall be included in
5397 * all copies or substantial portions of the Software.
5398
5399 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5400 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5401 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
5402 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
5403 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
5404 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
5405 * THE SOFTWARE.
5406 */
5407
5408/**
5409 * Allocates a fresh unused token from the token pull.
5410 */
5411static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser,
5412 jsmntok_t *tokens, size_t num_tokens) {
5413 jsmntok_t *tok;
5414 if (parser->toknext >= num_tokens) {
5415 return NULL;
5416 }
5417 tok = &tokens[parser->toknext++];
5418 tok->start = tok->end = -1;
5419 tok->size = 0;
5420#ifdef JSMN_PARENT_LINKS
5421 tok->parent = -1;
5422#endif
5423 return tok;
5424}
5425
5426/**
5427 * Fills token type and boundaries.
5428 */
5429static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type,
5430 int start, int end) {
5431 token->type = type;
5432 token->start = start;
5433 token->end = end;
5434 token->size = 0;
5435}
5436
5437/**
5438 * Fills next available token with JSON primitive.
5439 */
5440static int jsmn_parse_primitive(jsmn_parser *parser, const char *js,
5441 size_t len, jsmntok_t *tokens, size_t num_tokens) {
5442 jsmntok_t *token;
5443 int start;
5444
5445 start = parser->pos;
5446
5447 for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
5448 switch (js[parser->pos]) {
5449#ifndef JSMN_STRICT
5450 /* In strict mode primitive must be followed by "," or "}" or "]" */
5451 case ':':
5452#endif
5453 case '\t' : case '\r' : case '\n' : case ' ' :
5454 case ',' : case ']' : case '}' :
5455 goto found;
5456 }
5457 if (js[parser->pos] < 32 || js[parser->pos] >= 127) {
5458 parser->pos = start;
5459 return JSMN_ERROR_INVAL;
5460 }
5461 }
5462#ifdef JSMN_STRICT
5463 /* In strict mode primitive must be followed by a comma/object/array */
5464 parser->pos = start;
5465 return JSMN_ERROR_PART;
5466#endif
5467
5468found:
5469 if (tokens == NULL) {
5470 parser->pos--;
5471 return 0;
5472 }
5473 token = jsmn_alloc_token(parser, tokens, num_tokens);
5474 if (token == NULL) {
5475 parser->pos = start;
5476 return JSMN_ERROR_NOMEM;
5477 }
5478 jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos);
5479#ifdef JSMN_PARENT_LINKS
5480 token->parent = parser->toksuper;
5481#endif
5482 parser->pos--;
5483 return 0;
5484}
5485
5486/**
5487 * Fills next token with JSON string.
5488 */
5489static int jsmn_parse_string(jsmn_parser *parser, const char *js,
5490 size_t len, jsmntok_t *tokens, size_t num_tokens) {
5491 jsmntok_t *token;
5492
5493 int start = parser->pos;
5494
5495 parser->pos++;
5496
5497 /* Skip starting quote */
5498 for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
5499 char c = js[parser->pos];
5500
5501 /* Quote: end of string */
5502 if (c == '\"') {
5503 if (tokens == NULL) {
5504 return 0;
5505 }
5506 token = jsmn_alloc_token(parser, tokens, num_tokens);
5507 if (token == NULL) {
5508 parser->pos = start;
5509 return JSMN_ERROR_NOMEM;
5510 }
5511 jsmn_fill_token(token, JSMN_STRING, start+1, parser->pos);
5512#ifdef JSMN_PARENT_LINKS
5513 token->parent = parser->toksuper;
5514#endif
5515 return 0;
5516 }
5517
5518 /* Backslash: Quoted symbol expected */
5519 if (c == '\\' && parser->pos + 1 < len) {
5520 int i;
5521 parser->pos++;
5522 switch (js[parser->pos]) {
5523 /* Allowed escaped symbols */
5524 case '\"': case '/' : case '\\' : case 'b' :
5525 case 'f' : case 'r' : case 'n' : case 't' :
5526 break;
5527 /* Allows escaped symbol \uXXXX */
5528 case 'u':
5529 parser->pos++;
5530 for(i = 0; i < 4 && parser->pos < len && js[parser->pos] != '\0'; i++) {
5531 /* If it isn't a hex character we have an error */
5532 if(!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */
5533 (js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */
5534 (js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */
5535 parser->pos = start;
5536 return JSMN_ERROR_INVAL;
5537 }
5538 parser->pos++;
5539 }
5540 parser->pos--;
5541 break;
5542 /* Unexpected symbol */
5543 default:
5544 parser->pos = start;
5545 return JSMN_ERROR_INVAL;
5546 }
5547 }
5548 }
5549 parser->pos = start;
5550 return JSMN_ERROR_PART;
5551}
5552
5553/**
5554 * Parse JSON string and fill tokens.
5555 */
5556static int jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
5557 jsmntok_t *tokens, size_t num_tokens) {
5558 int r;
5559 int i;
5560 jsmntok_t *token;
5561 int count = parser->toknext;
5562
5563 for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
5564 char c;
5565 jsmntype_t type;
5566
5567 c = js[parser->pos];
5568 switch (c) {
5569 case '{': case '[':
5570 count++;
5571 if (tokens == NULL) {
5572 break;
5573 }
5574 token = jsmn_alloc_token(parser, tokens, num_tokens);
5575 if (token == NULL)
5576 return JSMN_ERROR_NOMEM;
5577 if (parser->toksuper != -1) {
5578 tokens[parser->toksuper].size++;
5579#ifdef JSMN_PARENT_LINKS
5580 token->parent = parser->toksuper;
5581#endif
5582 }
5583 token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY);
5584 token->start = parser->pos;
5585 parser->toksuper = parser->toknext - 1;
5586 break;
5587 case '}': case ']':
5588 if (tokens == NULL)
5589 break;
5590 type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY);
5591#ifdef JSMN_PARENT_LINKS
5592 if (parser->toknext < 1) {
5593 return JSMN_ERROR_INVAL;
5594 }
5595 token = &tokens[parser->toknext - 1];
5596 for (;;) {
5597 if (token->start != -1 && token->end == -1) {
5598 if (token->type != type) {
5599 return JSMN_ERROR_INVAL;
5600 }
5601 token->end = parser->pos + 1;
5602 parser->toksuper = token->parent;
5603 break;
5604 }
5605 if (token->parent == -1) {
5606 if(token->type != type || parser->toksuper == -1) {
5607 return JSMN_ERROR_INVAL;
5608 }
5609 break;
5610 }
5611 token = &tokens[token->parent];
5612 }
5613#else
5614 for (i = parser->toknext - 1; i >= 0; i--) {
5615 token = &tokens[i];
5616 if (token->start != -1 && token->end == -1) {
5617 if (token->type != type) {
5618 return JSMN_ERROR_INVAL;
5619 }
5620 parser->toksuper = -1;
5621 token->end = parser->pos + 1;
5622 break;
5623 }
5624 }
5625 /* Error if unmatched closing bracket */
5626 if (i == -1) return JSMN_ERROR_INVAL;
5627 for (; i >= 0; i--) {
5628 token = &tokens[i];
5629 if (token->start != -1 && token->end == -1) {
5630 parser->toksuper = i;
5631 break;
5632 }
5633 }
5634#endif
5635 break;
5636 case '\"':
5637 r = jsmn_parse_string(parser, js, len, tokens, num_tokens);
5638 if (r < 0) return r;
5639 count++;
5640 if (parser->toksuper != -1 && tokens != NULL)
5641 tokens[parser->toksuper].size++;
5642 break;
5643 case '\t' : case '\r' : case '\n' : case ' ':
5644 break;
5645 case ':':
5646 parser->toksuper = parser->toknext - 1;
5647 break;
5648 case ',':
5649 if (tokens != NULL && parser->toksuper != -1 &&
5650 tokens[parser->toksuper].type != JSMN_ARRAY &&
5651 tokens[parser->toksuper].type != JSMN_OBJECT) {
5652#ifdef JSMN_PARENT_LINKS
5653 parser->toksuper = tokens[parser->toksuper].parent;
5654#else
5655 for (i = parser->toknext - 1; i >= 0; i--) {
5656 if (tokens[i].type == JSMN_ARRAY || tokens[i].type == JSMN_OBJECT) {
5657 if (tokens[i].start != -1 && tokens[i].end == -1) {
5658 parser->toksuper = i;
5659 break;
5660 }
5661 }
5662 }
5663#endif
5664 }
5665 break;
5666#ifdef JSMN_STRICT
5667 /* In strict mode primitives are: numbers and booleans */
5668 case '-': case '0': case '1' : case '2': case '3' : case '4':
5669 case '5': case '6': case '7' : case '8': case '9':
5670 case 't': case 'f': case 'n' :
5671 /* And they must not be keys of the object */
5672 if (tokens != NULL && parser->toksuper != -1) {
5673 jsmntok_t *t = &tokens[parser->toksuper];
5674 if (t->type == JSMN_OBJECT ||
5675 (t->type == JSMN_STRING && t->size != 0)) {
5676 return JSMN_ERROR_INVAL;
5677 }
5678 }
5679#else
5680 /* In non-strict mode every unquoted value is a primitive */
5681 default:
5682#endif
5683 r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens);
5684 if (r < 0) return r;
5685 count++;
5686 if (parser->toksuper != -1 && tokens != NULL)
5687 tokens[parser->toksuper].size++;
5688 break;
5689
5690#ifdef JSMN_STRICT
5691 /* Unexpected char in strict mode */
5692 default:
5693 return JSMN_ERROR_INVAL;
5694#endif
5695 }
5696 }
5697
5698 if (tokens != NULL) {
5699 for (i = parser->toknext - 1; i >= 0; i--) {
5700 /* Unmatched opened object or array */
5701 if (tokens[i].start != -1 && tokens[i].end == -1) {
5702 return JSMN_ERROR_PART;
5703 }
5704 }
5705 }
5706
5707 return count;
5708}
5709
5710/**
5711 * Creates a new parser based over a given buffer with an array of tokens
5712 * available.
5713 */
5714static void jsmn_init(jsmn_parser *parser) {
5715 parser->pos = 0;
5716 parser->toknext = 0;
5717 parser->toksuper = -1;
5718}
5719/*
5720 * -- jsmn.c end --
5721 */
5722
5723#endif /* #ifdef CGLTF_IMPLEMENTATION */
5724
5725/* cgltf is distributed under MIT license:
5726 *
5727 * Copyright (c) 2018 Johannes Kuhlmann
5728
5729 * Permission is hereby granted, free of charge, to any person obtaining a copy
5730 * of this software and associated documentation files (the "Software"), to deal
5731 * in the Software without restriction, including without limitation the rights
5732 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
5733 * copies of the Software, and to permit persons to whom the Software is
5734 * furnished to do so, subject to the following conditions:
5735
5736 * The above copyright notice and this permission notice shall be included in all
5737 * copies or substantial portions of the Software.
5738
5739 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5740 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5741 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
5742 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
5743 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
5744 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
5745 * SOFTWARE.
5746 */
diff --git a/contrib/cgltf/cgltf_write.h b/contrib/cgltf/cgltf_write.h
new file mode 100644
index 0000000..2096a5b
--- /dev/null
+++ b/contrib/cgltf/cgltf_write.h
@@ -0,0 +1,1173 @@
1/**
2 * cgltf_write - a single-file glTF 2.0 writer written in C99.
3 *
4 * Version: 1.7
5 *
6 * Website: https://github.com/jkuhlmann/cgltf
7 *
8 * Distributed under the MIT License, see notice at the end of this file.
9 *
10 * Building:
11 * Include this file where you need the struct and function
12 * declarations. Have exactly one source file where you define
13 * `CGLTF_WRITE_IMPLEMENTATION` before including this file to get the
14 * function definitions.
15 *
16 * Reference:
17 * `cgltf_result cgltf_write_file(const cgltf_options* options, const char*
18 * path, const cgltf_data* data)` writes JSON to the given file path. Buffer
19 * files and external images are not written out. `data` is not deallocated.
20 *
21 * `cgltf_size cgltf_write(const cgltf_options* options, char* buffer,
22 * cgltf_size size, const cgltf_data* data)` writes JSON into the given memory
23 * buffer. Returns the number of bytes written to `buffer`, including a null
24 * terminator. If buffer is null, returns the number of bytes that would have
25 * been written. `data` is not deallocated.
26 *
27 * To write custom JSON into the `extras` field, aggregate all the custom JSON
28 * into a single buffer, then set `file_data` to this buffer. By supplying
29 * start_offset and end_offset values for various objects, you can select a
30 * range of characters within the aggregated buffer.
31 */
32#ifndef CGLTF_WRITE_H_INCLUDED__
33#define CGLTF_WRITE_H_INCLUDED__
34
35#include "cgltf.h"
36
37#include <stddef.h>
38#include <stdbool.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44cgltf_result cgltf_write_file(const cgltf_options* options, const char* path, const cgltf_data* data);
45cgltf_size cgltf_write(const cgltf_options* options, char* buffer, cgltf_size size, const cgltf_data* data);
46
47#ifdef __cplusplus
48}
49#endif
50
51#endif /* #ifndef CGLTF_WRITE_H_INCLUDED__ */
52
53/*
54 *
55 * Stop now, if you are only interested in the API.
56 * Below, you find the implementation.
57 *
58 */
59
60#if defined(__INTELLISENSE__) || defined(__JETBRAINS_IDE__)
61/* This makes MSVC/CLion intellisense work. */
62#define CGLTF_IMPLEMENTATION
63#endif
64
65#ifdef CGLTF_WRITE_IMPLEMENTATION
66
67#include <stdio.h>
68#include <stdint.h>
69#include <stdlib.h>
70#include <string.h>
71
72#define CGLTF_EXTENSION_FLAG_TEXTURE_TRANSFORM (1 << 0)
73#define CGLTF_EXTENSION_FLAG_MATERIALS_UNLIT (1 << 1)
74#define CGLTF_EXTENSION_FLAG_SPECULAR_GLOSSINESS (1 << 2)
75#define CGLTF_EXTENSION_FLAG_LIGHTS_PUNCTUAL (1 << 3)
76#define CGLTF_EXTENSION_FLAG_DRACO_MESH_COMPRESSION (1 << 4)
77#define CGLTF_EXTENSION_FLAG_MATERIALS_CLEARCOAT (1 << 5)
78#define CGLTF_EXTENSION_FLAG_MATERIALS_IOR (1 << 6)
79#define CGLTF_EXTENSION_FLAG_MATERIALS_SPECULAR (1 << 7)
80#define CGLTF_EXTENSION_FLAG_MATERIALS_TRANSMISSION (1 << 8)
81
82typedef struct {
83 char* buffer;
84 cgltf_size buffer_size;
85 cgltf_size remaining;
86 char* cursor;
87 cgltf_size tmp;
88 cgltf_size chars_written;
89 const cgltf_data* data;
90 int depth;
91 const char* indent;
92 int needs_comma;
93 uint32_t extension_flags;
94 uint32_t required_extension_flags;
95} cgltf_write_context;
96
97#define CGLTF_MIN(a, b) (a < b ? a : b)
98
99#define CGLTF_SPRINTF(...) { \
100 context->tmp = snprintf ( context->cursor, context->remaining, __VA_ARGS__ ); \
101 context->chars_written += context->tmp; \
102 if (context->cursor) { \
103 context->cursor += context->tmp; \
104 context->remaining -= context->tmp; \
105 } }
106
107#define CGLTF_SNPRINTF(length, ...) { \
108 context->tmp = snprintf ( context->cursor, CGLTF_MIN(length + 1, context->remaining), __VA_ARGS__ ); \
109 context->chars_written += length; \
110 if (context->cursor) { \
111 context->cursor += length; \
112 context->remaining -= length; \
113 } }
114
115#define CGLTF_WRITE_IDXPROP(label, val, start) if (val) { \
116 cgltf_write_indent(context); \
117 CGLTF_SPRINTF("\"%s\": %d", label, (int) (val - start)); \
118 context->needs_comma = 1; }
119
120#define CGLTF_WRITE_IDXARRPROP(label, dim, vals, start) if (vals) { \
121 cgltf_write_indent(context); \
122 CGLTF_SPRINTF("\"%s\": [", label); \
123 for (int i = 0; i < (int)(dim); ++i) { \
124 int idx = (int) (vals[i] - start); \
125 if (i != 0) CGLTF_SPRINTF(","); \
126 CGLTF_SPRINTF(" %d", idx); \
127 } \
128 CGLTF_SPRINTF(" ]"); \
129 context->needs_comma = 1; }
130
131#define CGLTF_WRITE_TEXTURE_INFO(label, info) if (info.texture) { \
132 cgltf_write_line(context, "\"" label "\": {"); \
133 CGLTF_WRITE_IDXPROP("index", info.texture, context->data->textures); \
134 cgltf_write_intprop(context, "texCoord", info.texcoord, 0); \
135 cgltf_write_floatprop(context, "scale", info.scale, 1.0f); \
136 if (info.has_transform) { \
137 context->extension_flags |= CGLTF_EXTENSION_FLAG_TEXTURE_TRANSFORM; \
138 cgltf_write_texture_transform(context, &info.transform); \
139 } \
140 cgltf_write_extras(context, &info.extras); \
141 cgltf_write_line(context, "}"); }
142
143static void cgltf_write_indent(cgltf_write_context* context)
144{
145 if (context->needs_comma)
146 {
147 CGLTF_SPRINTF(",\n");
148 context->needs_comma = 0;
149 }
150 else
151 {
152 CGLTF_SPRINTF("\n");
153 }
154 for (int i = 0; i < context->depth; ++i)
155 {
156 CGLTF_SPRINTF("%s", context->indent);
157 }
158}
159
160static void cgltf_write_line(cgltf_write_context* context, const char* line)
161{
162 if (line[0] == ']' || line[0] == '}')
163 {
164 --context->depth;
165 context->needs_comma = 0;
166 }
167 cgltf_write_indent(context);
168 CGLTF_SPRINTF("%s", line);
169 cgltf_size last = (cgltf_size)(strlen(line) - 1);
170 if (line[0] == ']' || line[0] == '}')
171 {
172 context->needs_comma = 1;
173 }
174 if (line[last] == '[' || line[last] == '{')
175 {
176 ++context->depth;
177 context->needs_comma = 0;
178 }
179}
180
181static void cgltf_write_strprop(cgltf_write_context* context, const char* label, const char* val)
182{
183 if (val)
184 {
185 cgltf_write_indent(context);
186 CGLTF_SPRINTF("\"%s\": \"%s\"", label, val);
187 context->needs_comma = 1;
188 }
189}
190
191static void cgltf_write_extras(cgltf_write_context* context, const cgltf_extras* extras)
192{
193 cgltf_size length = extras->end_offset - extras->start_offset;
194 if (length > 0 && context->data->file_data)
195 {
196 char* json_string = ((char*) context->data->file_data) + extras->start_offset;
197 cgltf_write_indent(context);
198 CGLTF_SPRINTF("%s", "\"extras\": ");
199 CGLTF_SNPRINTF(length, "%s", json_string);
200 context->needs_comma = 1;
201 }
202}
203
204static void cgltf_write_stritem(cgltf_write_context* context, const char* item)
205{
206 cgltf_write_indent(context);
207 CGLTF_SPRINTF("\"%s\"", item);
208 context->needs_comma = 1;
209}
210
211static void cgltf_write_intprop(cgltf_write_context* context, const char* label, int val, int def)
212{
213 if (val != def)
214 {
215 cgltf_write_indent(context);
216 CGLTF_SPRINTF("\"%s\": %d", label, val);
217 context->needs_comma = 1;
218 }
219}
220
221static void cgltf_write_floatprop(cgltf_write_context* context, const char* label, float val, float def)
222{
223 if (val != def)
224 {
225 cgltf_write_indent(context);
226 CGLTF_SPRINTF("\"%s\": ", label);
227 CGLTF_SPRINTF("%g", val);
228 context->needs_comma = 1;
229
230 if (context->cursor)
231 {
232 char *decimal_comma = strchr(context->cursor - context->tmp, ',');
233 if (decimal_comma)
234 {
235 *decimal_comma = '.';
236 }
237 }
238 }
239}
240
241static void cgltf_write_boolprop_optional(cgltf_write_context* context, const char* label, bool val, bool def)
242{
243 if (val != def)
244 {
245 cgltf_write_indent(context);
246 CGLTF_SPRINTF("\"%s\": %s", label, val ? "true" : "false");
247 context->needs_comma = 1;
248 }
249}
250
251static void cgltf_write_floatarrayprop(cgltf_write_context* context, const char* label, const cgltf_float* vals, cgltf_size dim)
252{
253 cgltf_write_indent(context);
254 CGLTF_SPRINTF("\"%s\": [", label);
255 for (cgltf_size i = 0; i < dim; ++i)
256 {
257 if (i != 0)
258 {
259 CGLTF_SPRINTF(", %g", vals[i]);
260 }
261 else
262 {
263 CGLTF_SPRINTF("%g", vals[i]);
264 }
265 }
266 CGLTF_SPRINTF("]");
267 context->needs_comma = 1;
268}
269
270static bool cgltf_check_floatarray(const float* vals, int dim, float val) {
271 while (dim--)
272 {
273 if (vals[dim] != val)
274 {
275 return true;
276 }
277 }
278 return false;
279}
280
281static int cgltf_int_from_component_type(cgltf_component_type ctype)
282{
283 switch (ctype)
284 {
285 case cgltf_component_type_r_8: return 5120;
286 case cgltf_component_type_r_8u: return 5121;
287 case cgltf_component_type_r_16: return 5122;
288 case cgltf_component_type_r_16u: return 5123;
289 case cgltf_component_type_r_32u: return 5125;
290 case cgltf_component_type_r_32f: return 5126;
291 default: return 0;
292 }
293}
294
295static const char* cgltf_str_from_alpha_mode(cgltf_alpha_mode alpha_mode)
296{
297 switch (alpha_mode)
298 {
299 case cgltf_alpha_mode_mask: return "MASK";
300 case cgltf_alpha_mode_blend: return "BLEND";
301 default: return NULL;
302 }
303}
304
305static const char* cgltf_str_from_type(cgltf_type type)
306{
307 switch (type)
308 {
309 case cgltf_type_scalar: return "SCALAR";
310 case cgltf_type_vec2: return "VEC2";
311 case cgltf_type_vec3: return "VEC3";
312 case cgltf_type_vec4: return "VEC4";
313 case cgltf_type_mat2: return "MAT2";
314 case cgltf_type_mat3: return "MAT3";
315 case cgltf_type_mat4: return "MAT4";
316 default: return NULL;
317 }
318}
319
320static cgltf_size cgltf_dim_from_type(cgltf_type type)
321{
322 switch (type)
323 {
324 case cgltf_type_scalar: return 1;
325 case cgltf_type_vec2: return 2;
326 case cgltf_type_vec3: return 3;
327 case cgltf_type_vec4: return 4;
328 case cgltf_type_mat2: return 4;
329 case cgltf_type_mat3: return 9;
330 case cgltf_type_mat4: return 16;
331 default: return 0;
332 }
333}
334
335static const char* cgltf_str_from_camera_type(cgltf_camera_type camera_type)
336{
337 switch (camera_type)
338 {
339 case cgltf_camera_type_perspective: return "perspective";
340 case cgltf_camera_type_orthographic: return "orthographic";
341 default: return NULL;
342 }
343}
344
345static const char* cgltf_str_from_light_type(cgltf_light_type light_type)
346{
347 switch (light_type)
348 {
349 case cgltf_light_type_directional: return "directional";
350 case cgltf_light_type_point: return "point";
351 case cgltf_light_type_spot: return "spot";
352 default: return NULL;
353 }
354}
355
356static void cgltf_write_texture_transform(cgltf_write_context* context, const cgltf_texture_transform* transform)
357{
358 cgltf_write_line(context, "\"extensions\": {");
359 cgltf_write_line(context, "\"KHR_texture_transform\": {");
360 if (cgltf_check_floatarray(transform->offset, 2, 0.0f))
361 {
362 cgltf_write_floatarrayprop(context, "offset", transform->offset, 2);
363 }
364 cgltf_write_floatprop(context, "rotation", transform->rotation, 0.0f);
365 if (cgltf_check_floatarray(transform->scale, 2, 1.0f))
366 {
367 cgltf_write_floatarrayprop(context, "scale", transform->scale, 2);
368 }
369 cgltf_write_intprop(context, "texCoord", transform->texcoord, 0);
370 cgltf_write_line(context, "}");
371 cgltf_write_line(context, "}");
372}
373
374static void cgltf_write_asset(cgltf_write_context* context, const cgltf_asset* asset)
375{
376 cgltf_write_line(context, "\"asset\": {");
377 cgltf_write_strprop(context, "copyright", asset->copyright);
378 cgltf_write_strprop(context, "generator", asset->generator);
379 cgltf_write_strprop(context, "version", asset->version);
380 cgltf_write_strprop(context, "min_version", asset->min_version);
381 cgltf_write_extras(context, &asset->extras);
382 cgltf_write_line(context, "}");
383}
384
385static void cgltf_write_primitive(cgltf_write_context* context, const cgltf_primitive* prim)
386{
387 cgltf_write_intprop(context, "mode", (int) prim->type, 4);
388 CGLTF_WRITE_IDXPROP("indices", prim->indices, context->data->accessors);
389 CGLTF_WRITE_IDXPROP("material", prim->material, context->data->materials);
390 cgltf_write_line(context, "\"attributes\": {");
391 for (cgltf_size i = 0; i < prim->attributes_count; ++i)
392 {
393 const cgltf_attribute* attr = prim->attributes + i;
394 CGLTF_WRITE_IDXPROP(attr->name, attr->data, context->data->accessors);
395 }
396 cgltf_write_line(context, "}");
397
398 if (prim->targets_count)
399 {
400 cgltf_write_line(context, "\"targets\": [");
401 for (cgltf_size i = 0; i < prim->targets_count; ++i)
402 {
403 cgltf_write_line(context, "{");
404 for (cgltf_size j = 0; j < prim->targets[i].attributes_count; ++j)
405 {
406 const cgltf_attribute* attr = prim->targets[i].attributes + j;
407 CGLTF_WRITE_IDXPROP(attr->name, attr->data, context->data->accessors);
408 }
409 cgltf_write_line(context, "}");
410 }
411 cgltf_write_line(context, "]");
412 }
413 cgltf_write_extras(context, &prim->extras);
414
415 cgltf_bool has_extensions = prim->has_draco_mesh_compression;
416 if (has_extensions) {
417 cgltf_write_line(context, "\"extensions\": {");
418
419 if (prim->has_draco_mesh_compression) {
420 context->extension_flags |= CGLTF_EXTENSION_FLAG_DRACO_MESH_COMPRESSION;
421 if (prim->attributes_count == 0 || prim->indices == 0) {
422 context->required_extension_flags |= CGLTF_EXTENSION_FLAG_DRACO_MESH_COMPRESSION;
423 }
424
425 cgltf_write_line(context, "\"KHR_draco_mesh_compression\": {");
426 CGLTF_WRITE_IDXPROP("bufferView", prim->draco_mesh_compression.buffer_view, context->data->buffer_views);
427 cgltf_write_line(context, "\"attributes\": {");
428 for (cgltf_size i = 0; i < prim->draco_mesh_compression.attributes_count; ++i)
429 {
430 const cgltf_attribute* attr = prim->draco_mesh_compression.attributes + i;
431 CGLTF_WRITE_IDXPROP(attr->name, attr->data, context->data->accessors);
432 }
433 cgltf_write_line(context, "}");
434 cgltf_write_line(context, "}");
435 }
436
437 cgltf_write_line(context, "}");
438 }
439}
440
441static void cgltf_write_mesh(cgltf_write_context* context, const cgltf_mesh* mesh)
442{
443 cgltf_write_line(context, "{");
444 cgltf_write_strprop(context, "name", mesh->name);
445
446 cgltf_write_line(context, "\"primitives\": [");
447 for (cgltf_size i = 0; i < mesh->primitives_count; ++i)
448 {
449 cgltf_write_line(context, "{");
450 cgltf_write_primitive(context, mesh->primitives + i);
451 cgltf_write_line(context, "}");
452 }
453 cgltf_write_line(context, "]");
454
455 if (mesh->weights_count > 0)
456 {
457 cgltf_write_floatarrayprop(context, "weights", mesh->weights, mesh->weights_count);
458 }
459 cgltf_write_extras(context, &mesh->extras);
460 cgltf_write_line(context, "}");
461}
462
463static void cgltf_write_buffer_view(cgltf_write_context* context, const cgltf_buffer_view* view)
464{
465 cgltf_write_line(context, "{");
466 CGLTF_WRITE_IDXPROP("buffer", view->buffer, context->data->buffers);
467 cgltf_write_intprop(context, "byteLength", (int)view->size, -1);
468 cgltf_write_intprop(context, "byteOffset", (int)view->offset, 0);
469 cgltf_write_intprop(context, "byteStride", (int)view->stride, 0);
470 // NOTE: We skip writing "target" because the spec says its usage can be inferred.
471 cgltf_write_extras(context, &view->extras);
472 cgltf_write_line(context, "}");
473}
474
475
476static void cgltf_write_buffer(cgltf_write_context* context, const cgltf_buffer* buffer)
477{
478 cgltf_write_line(context, "{");
479 cgltf_write_strprop(context, "uri", buffer->uri);
480 cgltf_write_intprop(context, "byteLength", (int)buffer->size, -1);
481 cgltf_write_extras(context, &buffer->extras);
482 cgltf_write_line(context, "}");
483}
484
485static void cgltf_write_material(cgltf_write_context* context, const cgltf_material* material)
486{
487 cgltf_write_line(context, "{");
488 cgltf_write_strprop(context, "name", material->name);
489 cgltf_write_floatprop(context, "alphaCutoff", material->alpha_cutoff, 0.5f);
490 cgltf_write_boolprop_optional(context, "doubleSided", material->double_sided, false);
491 // cgltf_write_boolprop_optional(context, "unlit", material->unlit, false);
492
493 if (material->unlit)
494 {
495 context->extension_flags |= CGLTF_EXTENSION_FLAG_MATERIALS_UNLIT;
496 }
497
498 if (material->has_pbr_specular_glossiness)
499 {
500 context->extension_flags |= CGLTF_EXTENSION_FLAG_SPECULAR_GLOSSINESS;
501 }
502
503 if (material->has_clearcoat)
504 {
505 context->extension_flags |= CGLTF_EXTENSION_FLAG_MATERIALS_CLEARCOAT;
506 }
507
508 if (material->has_transmission)
509 {
510 context->extension_flags |= CGLTF_EXTENSION_FLAG_MATERIALS_TRANSMISSION;
511 }
512
513 if (material->has_ior)
514 {
515 context->extension_flags |= CGLTF_EXTENSION_FLAG_MATERIALS_IOR;
516 }
517
518 if (material->has_specular)
519 {
520 context->extension_flags |= CGLTF_EXTENSION_FLAG_MATERIALS_SPECULAR;
521 }
522
523 if (material->has_pbr_metallic_roughness)
524 {
525 const cgltf_pbr_metallic_roughness* params = &material->pbr_metallic_roughness;
526 cgltf_write_line(context, "\"pbrMetallicRoughness\": {");
527 CGLTF_WRITE_TEXTURE_INFO("baseColorTexture", params->base_color_texture);
528 CGLTF_WRITE_TEXTURE_INFO("metallicRoughnessTexture", params->metallic_roughness_texture);
529 cgltf_write_floatprop(context, "metallicFactor", params->metallic_factor, 1.0f);
530 cgltf_write_floatprop(context, "roughnessFactor", params->roughness_factor, 1.0f);
531 if (cgltf_check_floatarray(params->base_color_factor, 4, 1.0f))
532 {
533 cgltf_write_floatarrayprop(context, "baseColorFactor", params->base_color_factor, 4);
534 }
535 cgltf_write_extras(context, &params->extras);
536 cgltf_write_line(context, "}");
537 }
538
539 if (material->unlit || material->has_pbr_specular_glossiness || material->has_clearcoat || material->has_ior || material->has_specular || material->has_transmission)
540 {
541 cgltf_write_line(context, "\"extensions\": {");
542 if (material->has_clearcoat)
543 {
544 const cgltf_clearcoat* params = &material->clearcoat;
545 cgltf_write_line(context, "\"KHR_materials_clearcoat\": {");
546 CGLTF_WRITE_TEXTURE_INFO("clearcoatTexture", params->clearcoat_texture);
547 CGLTF_WRITE_TEXTURE_INFO("clearcoatRoughnessTexture", params->clearcoat_roughness_texture);
548 CGLTF_WRITE_TEXTURE_INFO("clearcoatNormalTexture", params->clearcoat_normal_texture);
549 cgltf_write_floatprop(context, "clearcoatFactor", params->clearcoat_factor, 0.0f);
550 cgltf_write_floatprop(context, "clearcoatRoughnessFactor", params->clearcoat_roughness_factor, 0.0f);
551 cgltf_write_line(context, "}");
552 }
553 if (material->has_ior)
554 {
555 const cgltf_ior* params = &material->ior;
556 cgltf_write_line(context, "\"KHR_materials_ior\": {");
557 cgltf_write_floatprop(context, "ior", params->ior, 1.5f);
558 cgltf_write_line(context, "}");
559 }
560 if (material->has_specular)
561 {
562 const cgltf_specular* params = &material->specular;
563 cgltf_write_line(context, "\"KHR_materials_specular\": {");
564 CGLTF_WRITE_TEXTURE_INFO("specularTexture", params->specular_texture);
565 cgltf_write_floatprop(context, "specularFactor", params->specular_factor, 1.0f);
566 if (cgltf_check_floatarray(params->specular_color_factor, 3, 1.0f))
567 {
568 cgltf_write_floatarrayprop(context, "specularColorFactor", params->specular_color_factor, 3);
569 }
570 cgltf_write_line(context, "}");
571 }
572 if (material->has_transmission)
573 {
574 const cgltf_transmission* params = &material->transmission;
575 cgltf_write_line(context, "\"KHR_materials_transmission\": {");
576 CGLTF_WRITE_TEXTURE_INFO("transmissionTexture", params->transmission_texture);
577 cgltf_write_floatprop(context, "transmissionFactor", params->transmission_factor, 0.0f);
578 cgltf_write_line(context, "}");
579 }
580 if (material->has_pbr_specular_glossiness)
581 {
582 const cgltf_pbr_specular_glossiness* params = &material->pbr_specular_glossiness;
583 cgltf_write_line(context, "\"KHR_materials_pbrSpecularGlossiness\": {");
584 CGLTF_WRITE_TEXTURE_INFO("diffuseTexture", params->diffuse_texture);
585 CGLTF_WRITE_TEXTURE_INFO("specularGlossinessTexture", params->specular_glossiness_texture);
586 if (cgltf_check_floatarray(params->diffuse_factor, 4, 1.0f))
587 {
588 cgltf_write_floatarrayprop(context, "dffuseFactor", params->diffuse_factor, 4);
589 }
590 if (cgltf_check_floatarray(params->specular_factor, 3, 1.0f))
591 {
592 cgltf_write_floatarrayprop(context, "specularFactor", params->specular_factor, 3);
593 }
594 cgltf_write_floatprop(context, "glossinessFactor", params->glossiness_factor, 1.0f);
595 cgltf_write_line(context, "}");
596 }
597 if (material->unlit)
598 {
599 cgltf_write_line(context, "\"KHR_materials_unlit\": {}");
600 }
601 cgltf_write_line(context, "}");
602 }
603
604 CGLTF_WRITE_TEXTURE_INFO("normalTexture", material->normal_texture);
605 CGLTF_WRITE_TEXTURE_INFO("occlusionTexture", material->occlusion_texture);
606 CGLTF_WRITE_TEXTURE_INFO("emissiveTexture", material->emissive_texture);
607 if (cgltf_check_floatarray(material->emissive_factor, 3, 0.0f))
608 {
609 cgltf_write_floatarrayprop(context, "emissiveFactor", material->emissive_factor, 3);
610 }
611 cgltf_write_strprop(context, "alphaMode", cgltf_str_from_alpha_mode(material->alpha_mode));
612 cgltf_write_extras(context, &material->extras);
613 cgltf_write_line(context, "}");
614}
615
616static void cgltf_write_image(cgltf_write_context* context, const cgltf_image* image)
617{
618 cgltf_write_line(context, "{");
619 cgltf_write_strprop(context, "name", image->name);
620 cgltf_write_strprop(context, "uri", image->uri);
621 CGLTF_WRITE_IDXPROP("bufferView", image->buffer_view, context->data->buffer_views);
622 cgltf_write_strprop(context, "mimeType", image->mime_type);
623 cgltf_write_extras(context, &image->extras);
624 cgltf_write_line(context, "}");
625}
626
627static void cgltf_write_texture(cgltf_write_context* context, const cgltf_texture* texture)
628{
629 cgltf_write_line(context, "{");
630 cgltf_write_strprop(context, "name", texture->name);
631 CGLTF_WRITE_IDXPROP("source", texture->image, context->data->images);
632 CGLTF_WRITE_IDXPROP("sampler", texture->sampler, context->data->samplers);
633 cgltf_write_extras(context, &texture->extras);
634 cgltf_write_line(context, "}");
635}
636
637static void cgltf_write_skin(cgltf_write_context* context, const cgltf_skin* skin)
638{
639 cgltf_write_line(context, "{");
640 CGLTF_WRITE_IDXPROP("skeleton", skin->skeleton, context->data->nodes);
641 CGLTF_WRITE_IDXPROP("inverseBindMatrices", skin->inverse_bind_matrices, context->data->accessors);
642 CGLTF_WRITE_IDXARRPROP("joints", skin->joints_count, skin->joints, context->data->nodes);
643 cgltf_write_strprop(context, "name", skin->name);
644 cgltf_write_extras(context, &skin->extras);
645 cgltf_write_line(context, "}");
646}
647
648static const char* cgltf_write_str_path_type(cgltf_animation_path_type path_type)
649{
650 switch (path_type)
651 {
652 case cgltf_animation_path_type_translation:
653 return "translation";
654 case cgltf_animation_path_type_rotation:
655 return "rotation";
656 case cgltf_animation_path_type_scale:
657 return "scale";
658 case cgltf_animation_path_type_weights:
659 return "weights";
660 case cgltf_animation_path_type_invalid:
661 break;
662 }
663 return "invalid";
664}
665
666static const char* cgltf_write_str_interpolation_type(cgltf_interpolation_type interpolation_type)
667{
668 switch (interpolation_type)
669 {
670 case cgltf_interpolation_type_linear:
671 return "LINEAR";
672 case cgltf_interpolation_type_step:
673 return "STEP";
674 case cgltf_interpolation_type_cubic_spline:
675 return "CUBICSPLINE";
676 }
677 return "invalid";
678}
679
680static void cgltf_write_path_type(cgltf_write_context* context, const char *label, cgltf_animation_path_type path_type)
681{
682 cgltf_write_strprop(context, label, cgltf_write_str_path_type(path_type));
683}
684
685static void cgltf_write_interpolation_type(cgltf_write_context* context, const char *label, cgltf_interpolation_type interpolation_type)
686{
687 cgltf_write_strprop(context, label, cgltf_write_str_interpolation_type(interpolation_type));
688}
689
690static void cgltf_write_animation_sampler(cgltf_write_context* context, const cgltf_animation_sampler* animation_sampler)
691{
692 cgltf_write_line(context, "{");
693 cgltf_write_interpolation_type(context, "interpolation", animation_sampler->interpolation);
694 CGLTF_WRITE_IDXPROP("input", animation_sampler->input, context->data->accessors);
695 CGLTF_WRITE_IDXPROP("output", animation_sampler->output, context->data->accessors);
696 cgltf_write_extras(context, &animation_sampler->extras);
697 cgltf_write_line(context, "}");
698}
699
700static void cgltf_write_animation_channel(cgltf_write_context* context, const cgltf_animation* animation, const cgltf_animation_channel* animation_channel)
701{
702 cgltf_write_line(context, "{");
703 CGLTF_WRITE_IDXPROP("sampler", animation_channel->sampler, animation->samplers);
704 cgltf_write_line(context, "\"target\": {");
705 CGLTF_WRITE_IDXPROP("node", animation_channel->target_node, context->data->nodes);
706 cgltf_write_path_type(context, "path", animation_channel->target_path);
707 cgltf_write_line(context, "}");
708 cgltf_write_extras(context, &animation_channel->extras);
709 cgltf_write_line(context, "}");
710}
711
712static void cgltf_write_animation(cgltf_write_context* context, const cgltf_animation* animation)
713{
714 cgltf_write_line(context, "{");
715 cgltf_write_strprop(context, "name", animation->name);
716
717 if (animation->samplers_count > 0)
718 {
719 cgltf_write_line(context, "\"samplers\": [");
720 for (cgltf_size i = 0; i < animation->samplers_count; ++i)
721 {
722 cgltf_write_animation_sampler(context, animation->samplers + i);
723 }
724 cgltf_write_line(context, "]");
725 }
726 if (animation->channels_count > 0)
727 {
728 cgltf_write_line(context, "\"channels\": [");
729 for (cgltf_size i = 0; i < animation->channels_count; ++i)
730 {
731 cgltf_write_animation_channel(context, animation, animation->channels + i);
732 }
733 cgltf_write_line(context, "]");
734 }
735 cgltf_write_extras(context, &animation->extras);
736 cgltf_write_line(context, "}");
737}
738
739static void cgltf_write_sampler(cgltf_write_context* context, const cgltf_sampler* sampler)
740{
741 cgltf_write_line(context, "{");
742 cgltf_write_intprop(context, "magFilter", sampler->mag_filter, 0);
743 cgltf_write_intprop(context, "minFilter", sampler->min_filter, 0);
744 cgltf_write_intprop(context, "wrapS", sampler->wrap_s, 10497);
745 cgltf_write_intprop(context, "wrapT", sampler->wrap_t, 10497);
746 cgltf_write_extras(context, &sampler->extras);
747 cgltf_write_line(context, "}");
748}
749
750static void cgltf_write_node(cgltf_write_context* context, const cgltf_node* node)
751{
752 cgltf_write_line(context, "{");
753 CGLTF_WRITE_IDXARRPROP("children", node->children_count, node->children, context->data->nodes);
754 CGLTF_WRITE_IDXPROP("mesh", node->mesh, context->data->meshes);
755 cgltf_write_strprop(context, "name", node->name);
756 if (node->has_matrix)
757 {
758 cgltf_write_floatarrayprop(context, "matrix", node->matrix, 16);
759 }
760 if (node->has_translation)
761 {
762 cgltf_write_floatarrayprop(context, "translation", node->translation, 3);
763 }
764 if (node->has_rotation)
765 {
766 cgltf_write_floatarrayprop(context, "rotation", node->rotation, 4);
767 }
768 if (node->has_scale)
769 {
770 cgltf_write_floatarrayprop(context, "scale", node->scale, 3);
771 }
772 if (node->skin)
773 {
774 CGLTF_WRITE_IDXPROP("skin", node->skin, context->data->skins);
775 }
776
777 if (node->light)
778 {
779 context->extension_flags |= CGLTF_EXTENSION_FLAG_LIGHTS_PUNCTUAL;
780 cgltf_write_line(context, "\"extensions\": {");
781 cgltf_write_line(context, "\"KHR_lights_punctual\": {");
782 CGLTF_WRITE_IDXPROP("light", node->light, context->data->lights);
783 cgltf_write_line(context, "}");
784 cgltf_write_line(context, "}");
785 }
786
787 if (node->weights_count > 0)
788 {
789 cgltf_write_floatarrayprop(context, "weights", node->weights, node->weights_count);
790 }
791
792 if (node->camera)
793 {
794 CGLTF_WRITE_IDXPROP("camera", node->camera, context->data->cameras);
795 }
796
797 cgltf_write_extras(context, &node->extras);
798 cgltf_write_line(context, "}");
799}
800
801static void cgltf_write_scene(cgltf_write_context* context, const cgltf_scene* scene)
802{
803 cgltf_write_line(context, "{");
804 cgltf_write_strprop(context, "name", scene->name);
805 CGLTF_WRITE_IDXARRPROP("nodes", scene->nodes_count, scene->nodes, context->data->nodes);
806 cgltf_write_extras(context, &scene->extras);
807 cgltf_write_line(context, "}");
808}
809
810static void cgltf_write_accessor(cgltf_write_context* context, const cgltf_accessor* accessor)
811{
812 cgltf_write_line(context, "{");
813 CGLTF_WRITE_IDXPROP("bufferView", accessor->buffer_view, context->data->buffer_views);
814 cgltf_write_intprop(context, "componentType", cgltf_int_from_component_type(accessor->component_type), 0);
815 cgltf_write_strprop(context, "type", cgltf_str_from_type(accessor->type));
816 cgltf_size dim = cgltf_dim_from_type(accessor->type);
817 cgltf_write_boolprop_optional(context, "normalized", accessor->normalized, false);
818 cgltf_write_intprop(context, "byteOffset", (int)accessor->offset, 0);
819 cgltf_write_intprop(context, "count", (int)accessor->count, -1);
820 if (accessor->has_min)
821 {
822 cgltf_write_floatarrayprop(context, "min", accessor->min, dim);
823 }
824 if (accessor->has_max)
825 {
826 cgltf_write_floatarrayprop(context, "max", accessor->max, dim);
827 }
828 if (accessor->is_sparse)
829 {
830 cgltf_write_line(context, "\"sparse\": {");
831 cgltf_write_intprop(context, "count", (int)accessor->sparse.count, 0);
832 cgltf_write_line(context, "\"indices\": {");
833 cgltf_write_intprop(context, "byteOffset", (int)accessor->sparse.indices_byte_offset, 0);
834 CGLTF_WRITE_IDXPROP("bufferView", accessor->sparse.indices_buffer_view, context->data->buffer_views);
835 cgltf_write_intprop(context, "componentType", cgltf_int_from_component_type(accessor->sparse.indices_component_type), 0);
836 cgltf_write_extras(context, &accessor->sparse.indices_extras);
837 cgltf_write_line(context, "}");
838 cgltf_write_line(context, "\"values\": {");
839 cgltf_write_intprop(context, "byteOffset", (int)accessor->sparse.values_byte_offset, 0);
840 CGLTF_WRITE_IDXPROP("bufferView", accessor->sparse.values_buffer_view, context->data->buffer_views);
841 cgltf_write_extras(context, &accessor->sparse.values_extras);
842 cgltf_write_line(context, "}");
843 cgltf_write_extras(context, &accessor->sparse.extras);
844 cgltf_write_line(context, "}");
845 }
846 cgltf_write_extras(context, &accessor->extras);
847 cgltf_write_line(context, "}");
848}
849
850static void cgltf_write_camera(cgltf_write_context* context, const cgltf_camera* camera)
851{
852 cgltf_write_line(context, "{");
853 cgltf_write_strprop(context, "type", cgltf_str_from_camera_type(camera->type));
854 if (camera->name)
855 {
856 cgltf_write_strprop(context, "name", camera->name);
857 }
858
859 if (camera->type == cgltf_camera_type_orthographic)
860 {
861 cgltf_write_line(context, "\"orthographic\": {");
862 cgltf_write_floatprop(context, "xmag", camera->data.orthographic.xmag, -1.0f);
863 cgltf_write_floatprop(context, "ymag", camera->data.orthographic.ymag, -1.0f);
864 cgltf_write_floatprop(context, "zfar", camera->data.orthographic.zfar, -1.0f);
865 cgltf_write_floatprop(context, "znear", camera->data.orthographic.znear, -1.0f);
866 cgltf_write_extras(context, &camera->data.orthographic.extras);
867 cgltf_write_line(context, "}");
868 }
869 else if (camera->type == cgltf_camera_type_perspective)
870 {
871 cgltf_write_line(context, "\"perspective\": {");
872 cgltf_write_floatprop(context, "aspectRatio", camera->data.perspective.aspect_ratio, -1.0f);
873 cgltf_write_floatprop(context, "yfov", camera->data.perspective.yfov, -1.0f);
874 cgltf_write_floatprop(context, "zfar", camera->data.perspective.zfar, -1.0f);
875 cgltf_write_floatprop(context, "znear", camera->data.perspective.znear, -1.0f);
876 cgltf_write_extras(context, &camera->data.perspective.extras);
877 cgltf_write_line(context, "}");
878 }
879 cgltf_write_extras(context, &camera->extras);
880 cgltf_write_line(context, "}");
881}
882
883static void cgltf_write_light(cgltf_write_context* context, const cgltf_light* light)
884{
885 cgltf_write_line(context, "{");
886 cgltf_write_strprop(context, "type", cgltf_str_from_light_type(light->type));
887 if (light->name)
888 {
889 cgltf_write_strprop(context, "name", light->name);
890 }
891 if (cgltf_check_floatarray(light->color, 3, 1.0f))
892 {
893 cgltf_write_floatarrayprop(context, "color", light->color, 3);
894 }
895 cgltf_write_floatprop(context, "intensity", light->intensity, 1.0f);
896 cgltf_write_floatprop(context, "range", light->range, 0.0f);
897
898 if (light->type == cgltf_light_type_spot)
899 {
900 cgltf_write_line(context, "\"spot\": {");
901 cgltf_write_floatprop(context, "innerConeAngle", light->spot_inner_cone_angle, 0.0f);
902 cgltf_write_floatprop(context, "outerConeAngle", light->spot_outer_cone_angle, 3.14159265358979323846f/4.0f);
903 cgltf_write_line(context, "}");
904 }
905 cgltf_write_line(context, "}");
906}
907
908cgltf_result cgltf_write_file(const cgltf_options* options, const char* path, const cgltf_data* data)
909{
910 cgltf_size expected = cgltf_write(options, NULL, 0, data);
911 char* buffer = (char*) malloc(expected);
912 cgltf_size actual = cgltf_write(options, buffer, expected, data);
913 if (expected != actual) {
914 fprintf(stderr, "Error: expected %zu bytes but wrote %zu bytes.\n", expected, actual);
915 }
916 FILE* file = fopen(path, "wt");
917 if (!file)
918 {
919 return cgltf_result_file_not_found;
920 }
921 // Note that cgltf_write() includes a null terminator, which we omit from the file content.
922 fwrite(buffer, actual - 1, 1, file);
923 fclose(file);
924 free(buffer);
925 return cgltf_result_success;
926}
927
928static void cgltf_write_extensions(cgltf_write_context* context, uint32_t extension_flags)
929{
930 if (extension_flags & CGLTF_EXTENSION_FLAG_TEXTURE_TRANSFORM) {
931 cgltf_write_stritem(context, "KHR_texture_transform");
932 }
933 if (extension_flags & CGLTF_EXTENSION_FLAG_MATERIALS_UNLIT) {
934 cgltf_write_stritem(context, "KHR_materials_unlit");
935 }
936 if (extension_flags & CGLTF_EXTENSION_FLAG_SPECULAR_GLOSSINESS) {
937 cgltf_write_stritem(context, "KHR_materials_pbrSpecularGlossiness");
938 }
939 if (extension_flags & CGLTF_EXTENSION_FLAG_LIGHTS_PUNCTUAL) {
940 cgltf_write_stritem(context, "KHR_lights_punctual");
941 }
942 if (extension_flags & CGLTF_EXTENSION_FLAG_DRACO_MESH_COMPRESSION) {
943 cgltf_write_stritem(context, "KHR_draco_mesh_compression");
944 }
945 if (extension_flags & CGLTF_EXTENSION_FLAG_MATERIALS_CLEARCOAT) {
946 cgltf_write_stritem(context, "KHR_materials_clearcoat");
947 }
948 if (extension_flags & CGLTF_EXTENSION_FLAG_MATERIALS_IOR) {
949 cgltf_write_stritem(context, "KHR_materials_ior");
950 }
951 if (extension_flags & CGLTF_EXTENSION_FLAG_MATERIALS_SPECULAR) {
952 cgltf_write_stritem(context, "KHR_materials_specular");
953 }
954 if (extension_flags & CGLTF_EXTENSION_FLAG_MATERIALS_TRANSMISSION) {
955 cgltf_write_stritem(context, "KHR_materials_transmission");
956 }
957}
958
959cgltf_size cgltf_write(const cgltf_options* options, char* buffer, cgltf_size size, const cgltf_data* data)
960{
961 (void)options;
962 cgltf_write_context ctx;
963 ctx.buffer = buffer;
964 ctx.buffer_size = size;
965 ctx.remaining = size;
966 ctx.cursor = buffer;
967 ctx.chars_written = 0;
968 ctx.data = data;
969 ctx.depth = 1;
970 ctx.indent = " ";
971 ctx.needs_comma = 0;
972 ctx.extension_flags = 0;
973 ctx.required_extension_flags = 0;
974
975 cgltf_write_context* context = &ctx;
976
977 CGLTF_SPRINTF("{");
978
979 if (data->accessors_count > 0)
980 {
981 cgltf_write_line(context, "\"accessors\": [");
982 for (cgltf_size i = 0; i < data->accessors_count; ++i)
983 {
984 cgltf_write_accessor(context, data->accessors + i);
985 }
986 cgltf_write_line(context, "]");
987 }
988
989 cgltf_write_asset(context, &data->asset);
990
991 if (data->buffer_views_count > 0)
992 {
993 cgltf_write_line(context, "\"bufferViews\": [");
994 for (cgltf_size i = 0; i < data->buffer_views_count; ++i)
995 {
996 cgltf_write_buffer_view(context, data->buffer_views + i);
997 }
998 cgltf_write_line(context, "]");
999 }
1000
1001 if (data->buffers_count > 0)
1002 {
1003 cgltf_write_line(context, "\"buffers\": [");
1004 for (cgltf_size i = 0; i < data->buffers_count; ++i)
1005 {
1006 cgltf_write_buffer(context, data->buffers + i);
1007 }
1008 cgltf_write_line(context, "]");
1009 }
1010
1011 if (data->images_count > 0)
1012 {
1013 cgltf_write_line(context, "\"images\": [");
1014 for (cgltf_size i = 0; i < data->images_count; ++i)
1015 {
1016 cgltf_write_image(context, data->images + i);
1017 }
1018 cgltf_write_line(context, "]");
1019 }
1020
1021 if (data->meshes_count > 0)
1022 {
1023 cgltf_write_line(context, "\"meshes\": [");
1024 for (cgltf_size i = 0; i < data->meshes_count; ++i)
1025 {
1026 cgltf_write_mesh(context, data->meshes + i);
1027 }
1028 cgltf_write_line(context, "]");
1029 }
1030
1031 if (data->materials_count > 0)
1032 {
1033 cgltf_write_line(context, "\"materials\": [");
1034 for (cgltf_size i = 0; i < data->materials_count; ++i)
1035 {
1036 cgltf_write_material(context, data->materials + i);
1037 }
1038 cgltf_write_line(context, "]");
1039 }
1040
1041 if (data->nodes_count > 0)
1042 {
1043 cgltf_write_line(context, "\"nodes\": [");
1044 for (cgltf_size i = 0; i < data->nodes_count; ++i)
1045 {
1046 cgltf_write_node(context, data->nodes + i);
1047 }
1048 cgltf_write_line(context, "]");
1049 }
1050
1051 if (data->samplers_count > 0)
1052 {
1053 cgltf_write_line(context, "\"samplers\": [");
1054 for (cgltf_size i = 0; i < data->samplers_count; ++i)
1055 {
1056 cgltf_write_sampler(context, data->samplers + i);
1057 }
1058 cgltf_write_line(context, "]");
1059 }
1060
1061 CGLTF_WRITE_IDXPROP("scene", data->scene, data->scenes);
1062
1063 if (data->scenes_count > 0)
1064 {
1065 cgltf_write_line(context, "\"scenes\": [");
1066 for (cgltf_size i = 0; i < data->scenes_count; ++i)
1067 {
1068 cgltf_write_scene(context, data->scenes + i);
1069 }
1070 cgltf_write_line(context, "]");
1071 }
1072
1073 if (data->textures_count > 0)
1074 {
1075 cgltf_write_line(context, "\"textures\": [");
1076 for (cgltf_size i = 0; i < data->textures_count; ++i)
1077 {
1078 cgltf_write_texture(context, data->textures + i);
1079 }
1080 cgltf_write_line(context, "]");
1081 }
1082
1083 if (data->skins_count > 0)
1084 {
1085 cgltf_write_line(context, "\"skins\": [");
1086 for (cgltf_size i = 0; i < data->skins_count; ++i)
1087 {
1088 cgltf_write_skin(context, data->skins + i);
1089 }
1090 cgltf_write_line(context, "]");
1091 }
1092
1093 if (data->animations_count > 0)
1094 {
1095 cgltf_write_line(context, "\"animations\": [");
1096 for (cgltf_size i = 0; i < data->animations_count; ++i)
1097 {
1098 cgltf_write_animation(context, data->animations + i);
1099 }
1100 cgltf_write_line(context, "]");
1101 }
1102
1103 if (data->cameras_count > 0)
1104 {
1105 cgltf_write_line(context, "\"cameras\": [");
1106 for (cgltf_size i = 0; i < data->cameras_count; ++i)
1107 {
1108 cgltf_write_camera(context, data->cameras + i);
1109 }
1110 cgltf_write_line(context, "]");
1111 }
1112
1113 if (data->lights_count > 0)
1114 {
1115 cgltf_write_line(context, "\"extensions\": {");
1116
1117 cgltf_write_line(context, "\"KHR_lights_punctual\": {");
1118 cgltf_write_line(context, "\"lights\": [");
1119 for (cgltf_size i = 0; i < data->lights_count; ++i)
1120 {
1121 cgltf_write_light(context, data->lights + i);
1122 }
1123 cgltf_write_line(context, "]");
1124 cgltf_write_line(context, "}");
1125
1126 cgltf_write_line(context, "}");
1127 }
1128
1129 if (context->extension_flags != 0) {
1130 cgltf_write_line(context, "\"extensionsUsed\": [");
1131 cgltf_write_extensions(context, context->extension_flags);
1132 cgltf_write_line(context, "]");
1133 }
1134
1135 if (context->required_extension_flags != 0) {
1136 cgltf_write_line(context, "\"extensionsRequired\": [");
1137 cgltf_write_extensions(context, context->required_extension_flags);
1138 cgltf_write_line(context, "]");
1139 }
1140
1141 cgltf_write_extras(context, &data->extras);
1142
1143 CGLTF_SPRINTF("\n}\n");
1144
1145 // snprintf does not include the null terminator in its return value, so be sure to include it
1146 // in the returned byte count.
1147 return 1 + ctx.chars_written;
1148}
1149
1150#endif /* #ifdef CGLTF_WRITE_IMPLEMENTATION */
1151
1152/* cgltf is distributed under MIT license:
1153 *
1154 * Copyright (c) 2019 Philip Rideout
1155
1156 * Permission is hereby granted, free of charge, to any person obtaining a copy
1157 * of this software and associated documentation files (the "Software"), to deal
1158 * in the Software without restriction, including without limitation the rights
1159 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1160 * copies of the Software, and to permit persons to whom the Software is
1161 * furnished to do so, subject to the following conditions:
1162
1163 * The above copyright notice and this permission notice shall be included in all
1164 * copies or substantial portions of the Software.
1165
1166 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1167 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1168 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1169 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1170 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1171 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1172 * SOFTWARE.
1173 */
diff --git a/contrib/stb/CMakeLists.txt b/contrib/stb/CMakeLists.txt
new file mode 100644
index 0000000..8cee003
--- /dev/null
+++ b/contrib/stb/CMakeLists.txt
@@ -0,0 +1,8 @@
1cmake_minimum_required(VERSION 3.16)
2
3project(stb)
4
5add_library(stb INTERFACE)
6
7target_include_directories(stb INTERFACE
8 ${CMAKE_CURRENT_SOURCE_DIR})
diff --git a/contrib/stb/stb_image.h b/contrib/stb/stb_image.h
new file mode 100644
index 0000000..97038e6
--- /dev/null
+++ b/contrib/stb/stb_image.h
@@ -0,0 +1,7762 @@
1/* stb_image - v2.26 - public domain image loader - http://nothings.org/stb
2 no warranty implied; use at your own risk
3
4 Do this:
5 #define STB_IMAGE_IMPLEMENTATION
6 before you include this file in *one* C or C++ file to create the implementation.
7
8 // i.e. it should look like this:
9 #include ...
10 #include ...
11 #include ...
12 #define STB_IMAGE_IMPLEMENTATION
13 #include "stb_image.h"
14
15 You can #define STBI_ASSERT(x) before the #include to avoid using assert.h.
16 And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free
17
18
19 QUICK NOTES:
20 Primarily of interest to game developers and other people who can
21 avoid problematic images and only need the trivial interface
22
23 JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib)
24 PNG 1/2/4/8/16-bit-per-channel
25
26 TGA (not sure what subset, if a subset)
27 BMP non-1bpp, non-RLE
28 PSD (composited view only, no extra channels, 8/16 bit-per-channel)
29
30 GIF (*comp always reports as 4-channel)
31 HDR (radiance rgbE format)
32 PIC (Softimage PIC)
33 PNM (PPM and PGM binary only)
34
35 Animated GIF still needs a proper API, but here's one way to do it:
36 http://gist.github.com/urraka/685d9a6340b26b830d49
37
38 - decode from memory or through FILE (define STBI_NO_STDIO to remove code)
39 - decode from arbitrary I/O callbacks
40 - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON)
41
42 Full documentation under "DOCUMENTATION" below.
43
44
45LICENSE
46
47 See end of file for license information.
48
49RECENT REVISION HISTORY:
50
51 2.26 (2020-07-13) many minor fixes
52 2.25 (2020-02-02) fix warnings
53 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically
54 2.23 (2019-08-11) fix clang static analysis warning
55 2.22 (2019-03-04) gif fixes, fix warnings
56 2.21 (2019-02-25) fix typo in comment
57 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs
58 2.19 (2018-02-11) fix warning
59 2.18 (2018-01-30) fix warnings
60 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings
61 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes
62 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC
63 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs
64 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes
65 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes
66 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64
67 RGB-format JPEG; remove white matting in PSD;
68 allocate large structures on the stack;
69 correct channel count for PNG & BMP
70 2.10 (2016-01-22) avoid warning introduced in 2.09
71 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED
72
73 See end of file for full revision history.
74
75
76 ============================ Contributors =========================
77
78 Image formats Extensions, features
79 Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info)
80 Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info)
81 Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG)
82 Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks)
83 Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG)
84 Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip)
85 Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD)
86 github:urraka (animated gif) Junggon Kim (PNM comments)
87 Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA)
88 socks-the-fox (16-bit PNG)
89 Jeremy Sawicki (handle all ImageNet JPGs)
90 Optimizations & bugfixes Mikhail Morozov (1-bit BMP)
91 Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query)
92 Arseny Kapoulkine
93 John-Mark Allen
94 Carmelo J Fdez-Aguera
95
96 Bug & warning fixes
97 Marc LeBlanc David Woo Guillaume George Martins Mozeiko
98 Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski
99 Phil Jordan Dave Moore Roy Eltham
100 Hayaki Saito Nathan Reed Won Chun
101 Luke Graham Johan Duparc Nick Verigakis the Horde3D community
102 Thomas Ruf Ronny Chevalier github:rlyeh
103 Janez Zemva John Bartholomew Michal Cichon github:romigrou
104 Jonathan Blow Ken Hamada Tero Hanninen github:svdijk
105 Laurent Gomila Cort Stratton github:snagar
106 Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex
107 Cass Everitt Ryamond Barbiero github:grim210
108 Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw
109 Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus
110 Josh Tobin Matthew Gregan github:poppolopoppo
111 Julian Raschke Gregory Mullen Christian Floisand github:darealshinji
112 Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007
113 Brad Weinberger Matvey Cherevko [reserved]
114 Luca Sas Alexander Veselov Zack Middleton [reserved]
115 Ryan C. Gordon [reserved] [reserved]
116 DO NOT ADD YOUR NAME HERE
117
118 To add your name to the credits, pick a random blank space in the middle and fill it.
119 80% of merge conflicts on stb PRs are due to people adding their name at the end
120 of the credits.
121*/
122
123#ifndef STBI_INCLUDE_STB_IMAGE_H
124#define STBI_INCLUDE_STB_IMAGE_H
125
126// DOCUMENTATION
127//
128// Limitations:
129// - no 12-bit-per-channel JPEG
130// - no JPEGs with arithmetic coding
131// - GIF always returns *comp=4
132//
133// Basic usage (see HDR discussion below for HDR usage):
134// int x,y,n;
135// unsigned char *data = stbi_load(filename, &x, &y, &n, 0);
136// // ... process data if not NULL ...
137// // ... x = width, y = height, n = # 8-bit components per pixel ...
138// // ... replace '0' with '1'..'4' to force that many components per pixel
139// // ... but 'n' will always be the number that it would have been if you said 0
140// stbi_image_free(data)
141//
142// Standard parameters:
143// int *x -- outputs image width in pixels
144// int *y -- outputs image height in pixels
145// int *channels_in_file -- outputs # of image components in image file
146// int desired_channels -- if non-zero, # of image components requested in result
147//
148// The return value from an image loader is an 'unsigned char *' which points
149// to the pixel data, or NULL on an allocation failure or if the image is
150// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels,
151// with each pixel consisting of N interleaved 8-bit components; the first
152// pixel pointed to is top-left-most in the image. There is no padding between
153// image scanlines or between pixels, regardless of format. The number of
154// components N is 'desired_channels' if desired_channels is non-zero, or
155// *channels_in_file otherwise. If desired_channels is non-zero,
156// *channels_in_file has the number of components that _would_ have been
157// output otherwise. E.g. if you set desired_channels to 4, you will always
158// get RGBA output, but you can check *channels_in_file to see if it's trivially
159// opaque because e.g. there were only 3 channels in the source image.
160//
161// An output image with N components has the following components interleaved
162// in this order in each pixel:
163//
164// N=#comp components
165// 1 grey
166// 2 grey, alpha
167// 3 red, green, blue
168// 4 red, green, blue, alpha
169//
170// If image loading fails for any reason, the return value will be NULL,
171// and *x, *y, *channels_in_file will be unchanged. The function
172// stbi_failure_reason() can be queried for an extremely brief, end-user
173// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS
174// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly
175// more user-friendly ones.
176//
177// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized.
178//
179// ===========================================================================
180//
181// UNICODE:
182//
183// If compiling for Windows and you wish to use Unicode filenames, compile
184// with
185// #define STBI_WINDOWS_UTF8
186// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert
187// Windows wchar_t filenames to utf8.
188//
189// ===========================================================================
190//
191// Philosophy
192//
193// stb libraries are designed with the following priorities:
194//
195// 1. easy to use
196// 2. easy to maintain
197// 3. good performance
198//
199// Sometimes I let "good performance" creep up in priority over "easy to maintain",
200// and for best performance I may provide less-easy-to-use APIs that give higher
201// performance, in addition to the easy-to-use ones. Nevertheless, it's important
202// to keep in mind that from the standpoint of you, a client of this library,
203// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all.
204//
205// Some secondary priorities arise directly from the first two, some of which
206// provide more explicit reasons why performance can't be emphasized.
207//
208// - Portable ("ease of use")
209// - Small source code footprint ("easy to maintain")
210// - No dependencies ("ease of use")
211//
212// ===========================================================================
213//
214// I/O callbacks
215//
216// I/O callbacks allow you to read from arbitrary sources, like packaged
217// files or some other source. Data read from callbacks are processed
218// through a small internal buffer (currently 128 bytes) to try to reduce
219// overhead.
220//
221// The three functions you must define are "read" (reads some bytes of data),
222// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end).
223//
224// ===========================================================================
225//
226// SIMD support
227//
228// The JPEG decoder will try to automatically use SIMD kernels on x86 when
229// supported by the compiler. For ARM Neon support, you must explicitly
230// request it.
231//
232// (The old do-it-yourself SIMD API is no longer supported in the current
233// code.)
234//
235// On x86, SSE2 will automatically be used when available based on a run-time
236// test; if not, the generic C versions are used as a fall-back. On ARM targets,
237// the typical path is to have separate builds for NEON and non-NEON devices
238// (at least this is true for iOS and Android). Therefore, the NEON support is
239// toggled by a build flag: define STBI_NEON to get NEON loops.
240//
241// If for some reason you do not want to use any of SIMD code, or if
242// you have issues compiling it, you can disable it entirely by
243// defining STBI_NO_SIMD.
244//
245// ===========================================================================
246//
247// HDR image support (disable by defining STBI_NO_HDR)
248//
249// stb_image supports loading HDR images in general, and currently the Radiance
250// .HDR file format specifically. You can still load any file through the existing
251// interface; if you attempt to load an HDR file, it will be automatically remapped
252// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1;
253// both of these constants can be reconfigured through this interface:
254//
255// stbi_hdr_to_ldr_gamma(2.2f);
256// stbi_hdr_to_ldr_scale(1.0f);
257//
258// (note, do not use _inverse_ constants; stbi_image will invert them
259// appropriately).
260//
261// Additionally, there is a new, parallel interface for loading files as
262// (linear) floats to preserve the full dynamic range:
263//
264// float *data = stbi_loadf(filename, &x, &y, &n, 0);
265//
266// If you load LDR images through this interface, those images will
267// be promoted to floating point values, run through the inverse of
268// constants corresponding to the above:
269//
270// stbi_ldr_to_hdr_scale(1.0f);
271// stbi_ldr_to_hdr_gamma(2.2f);
272//
273// Finally, given a filename (or an open file or memory block--see header
274// file for details) containing image data, you can query for the "most
275// appropriate" interface to use (that is, whether the image is HDR or
276// not), using:
277//
278// stbi_is_hdr(char *filename);
279//
280// ===========================================================================
281//
282// iPhone PNG support:
283//
284// By default we convert iphone-formatted PNGs back to RGB, even though
285// they are internally encoded differently. You can disable this conversion
286// by calling stbi_convert_iphone_png_to_rgb(0), in which case
287// you will always just get the native iphone "format" through (which
288// is BGR stored in RGB).
289//
290// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per
291// pixel to remove any premultiplied alpha *only* if the image file explicitly
292// says there's premultiplied data (currently only happens in iPhone images,
293// and only if iPhone convert-to-rgb processing is on).
294//
295// ===========================================================================
296//
297// ADDITIONAL CONFIGURATION
298//
299// - You can suppress implementation of any of the decoders to reduce
300// your code footprint by #defining one or more of the following
301// symbols before creating the implementation.
302//
303// STBI_NO_JPEG
304// STBI_NO_PNG
305// STBI_NO_BMP
306// STBI_NO_PSD
307// STBI_NO_TGA
308// STBI_NO_GIF
309// STBI_NO_HDR
310// STBI_NO_PIC
311// STBI_NO_PNM (.ppm and .pgm)
312//
313// - You can request *only* certain decoders and suppress all other ones
314// (this will be more forward-compatible, as addition of new decoders
315// doesn't require you to disable them explicitly):
316//
317// STBI_ONLY_JPEG
318// STBI_ONLY_PNG
319// STBI_ONLY_BMP
320// STBI_ONLY_PSD
321// STBI_ONLY_TGA
322// STBI_ONLY_GIF
323// STBI_ONLY_HDR
324// STBI_ONLY_PIC
325// STBI_ONLY_PNM (.ppm and .pgm)
326//
327// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still
328// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB
329//
330// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater
331// than that size (in either width or height) without further processing.
332// This is to let programs in the wild set an upper bound to prevent
333// denial-of-service attacks on untrusted data, as one could generate a
334// valid image of gigantic dimensions and force stb_image to allocate a
335// huge block of memory and spend disproportionate time decoding it. By
336// default this is set to (1 << 24), which is 16777216, but that's still
337// very big.
338
339#ifndef STBI_NO_STDIO
340#include <stdio.h>
341#endif // STBI_NO_STDIO
342
343#define STBI_VERSION 1
344
345enum
346{
347 STBI_default = 0, // only used for desired_channels
348
349 STBI_grey = 1,
350 STBI_grey_alpha = 2,
351 STBI_rgb = 3,
352 STBI_rgb_alpha = 4
353};
354
355#include <stdlib.h>
356typedef unsigned char stbi_uc;
357typedef unsigned short stbi_us;
358
359#ifdef __cplusplus
360extern "C" {
361#endif
362
363#ifndef STBIDEF
364#ifdef STB_IMAGE_STATIC
365#define STBIDEF static
366#else
367#define STBIDEF extern
368#endif
369#endif
370
371//////////////////////////////////////////////////////////////////////////////
372//
373// PRIMARY API - works on images of any type
374//
375
376//
377// load image by filename, open file, or memory buffer
378//
379
380typedef struct
381{
382 int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read
383 void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative
384 int (*eof) (void *user); // returns nonzero if we are at end of file/data
385} stbi_io_callbacks;
386
387////////////////////////////////////
388//
389// 8-bits-per-channel interface
390//
391
392STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels);
393STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels);
394
395#ifndef STBI_NO_STDIO
396STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
397STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
398// for stbi_load_from_file, file pointer is left pointing immediately after image
399#endif
400
401#ifndef STBI_NO_GIF
402STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp);
403#endif
404
405#ifdef STBI_WINDOWS_UTF8
406STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input);
407#endif
408
409////////////////////////////////////
410//
411// 16-bits-per-channel interface
412//
413
414STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels);
415STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels);
416
417#ifndef STBI_NO_STDIO
418STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
419STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
420#endif
421
422////////////////////////////////////
423//
424// float-per-channel interface
425//
426#ifndef STBI_NO_LINEAR
427 STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels);
428 STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels);
429
430 #ifndef STBI_NO_STDIO
431 STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
432 STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
433 #endif
434#endif
435
436#ifndef STBI_NO_HDR
437 STBIDEF void stbi_hdr_to_ldr_gamma(float gamma);
438 STBIDEF void stbi_hdr_to_ldr_scale(float scale);
439#endif // STBI_NO_HDR
440
441#ifndef STBI_NO_LINEAR
442 STBIDEF void stbi_ldr_to_hdr_gamma(float gamma);
443 STBIDEF void stbi_ldr_to_hdr_scale(float scale);
444#endif // STBI_NO_LINEAR
445
446// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR
447STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user);
448STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len);
449#ifndef STBI_NO_STDIO
450STBIDEF int stbi_is_hdr (char const *filename);
451STBIDEF int stbi_is_hdr_from_file(FILE *f);
452#endif // STBI_NO_STDIO
453
454
455// get a VERY brief reason for failure
456// on most compilers (and ALL modern mainstream compilers) this is threadsafe
457STBIDEF const char *stbi_failure_reason (void);
458
459// free the loaded image -- this is just free()
460STBIDEF void stbi_image_free (void *retval_from_stbi_load);
461
462// get image dimensions & components without fully decoding
463STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp);
464STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp);
465STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len);
466STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user);
467
468#ifndef STBI_NO_STDIO
469STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp);
470STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp);
471STBIDEF int stbi_is_16_bit (char const *filename);
472STBIDEF int stbi_is_16_bit_from_file(FILE *f);
473#endif
474
475
476
477// for image formats that explicitly notate that they have premultiplied alpha,
478// we just return the colors as stored in the file. set this flag to force
479// unpremultiplication. results are undefined if the unpremultiply overflow.
480STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply);
481
482// indicate whether we should process iphone images back to canonical format,
483// or just pass them through "as-is"
484STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert);
485
486// flip the image vertically, so the first pixel in the output array is the bottom left
487STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip);
488
489// as above, but only applies to images loaded on the thread that calls the function
490// this function is only available if your compiler supports thread-local variables;
491// calling it will fail to link if your compiler doesn't
492STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip);
493
494// ZLIB client - used by PNG, available for other purposes
495
496STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen);
497STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header);
498STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen);
499STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen);
500
501STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen);
502STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen);
503
504
505#ifdef __cplusplus
506}
507#endif
508
509//
510//
511//// end header file /////////////////////////////////////////////////////
512#endif // STBI_INCLUDE_STB_IMAGE_H
513
514#ifdef STB_IMAGE_IMPLEMENTATION
515
516#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \
517 || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \
518 || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \
519 || defined(STBI_ONLY_ZLIB)
520 #ifndef STBI_ONLY_JPEG
521 #define STBI_NO_JPEG
522 #endif
523 #ifndef STBI_ONLY_PNG
524 #define STBI_NO_PNG
525 #endif
526 #ifndef STBI_ONLY_BMP
527 #define STBI_NO_BMP
528 #endif
529 #ifndef STBI_ONLY_PSD
530 #define STBI_NO_PSD
531 #endif
532 #ifndef STBI_ONLY_TGA
533 #define STBI_NO_TGA
534 #endif
535 #ifndef STBI_ONLY_GIF
536 #define STBI_NO_GIF
537 #endif
538 #ifndef STBI_ONLY_HDR
539 #define STBI_NO_HDR
540 #endif
541 #ifndef STBI_ONLY_PIC
542 #define STBI_NO_PIC
543 #endif
544 #ifndef STBI_ONLY_PNM
545 #define STBI_NO_PNM
546 #endif
547#endif
548
549#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB)
550#define STBI_NO_ZLIB
551#endif
552
553
554#include <stdarg.h>
555#include <stddef.h> // ptrdiff_t on osx
556#include <stdlib.h>
557#include <string.h>
558#include <limits.h>
559
560#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
561#include <math.h> // ldexp, pow
562#endif
563
564#ifndef STBI_NO_STDIO
565#include <stdio.h>
566#endif
567
568#ifndef STBI_ASSERT
569#include <assert.h>
570#define STBI_ASSERT(x) assert(x)
571#endif
572
573#ifdef __cplusplus
574#define STBI_EXTERN extern "C"
575#else
576#define STBI_EXTERN extern
577#endif
578
579
580#ifndef _MSC_VER
581 #ifdef __cplusplus
582 #define stbi_inline inline
583 #else
584 #define stbi_inline
585 #endif
586#else
587 #define stbi_inline __forceinline
588#endif
589
590#ifndef STBI_NO_THREAD_LOCALS
591 #if defined(__cplusplus) && __cplusplus >= 201103L
592 #define STBI_THREAD_LOCAL thread_local
593 #elif defined(__GNUC__) && __GNUC__ < 5
594 #define STBI_THREAD_LOCAL __thread
595 #elif defined(_MSC_VER)
596 #define STBI_THREAD_LOCAL __declspec(thread)
597 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__)
598 #define STBI_THREAD_LOCAL _Thread_local
599 #endif
600
601 #ifndef STBI_THREAD_LOCAL
602 #if defined(__GNUC__)
603 #define STBI_THREAD_LOCAL __thread
604 #endif
605 #endif
606#endif
607
608#ifdef _MSC_VER
609typedef unsigned short stbi__uint16;
610typedef signed short stbi__int16;
611typedef unsigned int stbi__uint32;
612typedef signed int stbi__int32;
613#else
614#include <stdint.h>
615typedef uint16_t stbi__uint16;
616typedef int16_t stbi__int16;
617typedef uint32_t stbi__uint32;
618typedef int32_t stbi__int32;
619#endif
620
621// should produce compiler error if size is wrong
622typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1];
623
624#ifdef _MSC_VER
625#define STBI_NOTUSED(v) (void)(v)
626#else
627#define STBI_NOTUSED(v) (void)sizeof(v)
628#endif
629
630#ifdef _MSC_VER
631#define STBI_HAS_LROTL
632#endif
633
634#ifdef STBI_HAS_LROTL
635 #define stbi_lrot(x,y) _lrotl(x,y)
636#else
637 #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
638#endif
639
640#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED))
641// ok
642#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED)
643// ok
644#else
645#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)."
646#endif
647
648#ifndef STBI_MALLOC
649#define STBI_MALLOC(sz) malloc(sz)
650#define STBI_REALLOC(p,newsz) realloc(p,newsz)
651#define STBI_FREE(p) free(p)
652#endif
653
654#ifndef STBI_REALLOC_SIZED
655#define STBI_REALLOC_SIZED(p,oldsz,newsz) ((void)oldsz, STBI_REALLOC(p,newsz))
656#endif
657
658// x86/x64 detection
659#if defined(__x86_64__) || defined(_M_X64)
660#define STBI__X64_TARGET
661#elif defined(__i386) || defined(_M_IX86)
662#define STBI__X86_TARGET
663#endif
664
665#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD)
666// gcc doesn't support sse2 intrinsics unless you compile with -msse2,
667// which in turn means it gets to use SSE2 everywhere. This is unfortunate,
668// but previous attempts to provide the SSE2 functions with runtime
669// detection caused numerous issues. The way architecture extensions are
670// exposed in GCC/Clang is, sadly, not really suited for one-file libs.
671// New behavior: if compiled with -msse2, we use SSE2 without any
672// detection; if not, we don't use it at all.
673#define STBI_NO_SIMD
674#endif
675
676#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD)
677// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET
678//
679// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the
680// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant.
681// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not
682// simultaneously enabling "-mstackrealign".
683//
684// See https://github.com/nothings/stb/issues/81 for more information.
685//
686// So default to no SSE2 on 32-bit MinGW. If you've read this far and added
687// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2.
688#define STBI_NO_SIMD
689#endif
690
691#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET))
692#define STBI_SSE2
693#include <emmintrin.h>
694
695#ifdef _MSC_VER
696
697#if _MSC_VER >= 1400 // not VC6
698#include <intrin.h> // __cpuid
699static int stbi__cpuid3(void)
700{
701 int info[4];
702 __cpuid(info,1);
703 return info[3];
704}
705#else
706static int stbi__cpuid3(void)
707{
708 int res;
709 __asm {
710 mov eax,1
711 cpuid
712 mov res,edx
713 }
714 return res;
715}
716#endif
717
718#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name
719
720#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2)
721static int stbi__sse2_available(void)
722{
723 int info3 = stbi__cpuid3();
724 return ((info3 >> 26) & 1) != 0;
725}
726#endif
727
728#else // assume GCC-style if not VC++
729#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
730
731#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2)
732static int stbi__sse2_available(void)
733{
734 // If we're even attempting to compile this on GCC/Clang, that means
735 // -msse2 is on, which means the compiler is allowed to use SSE2
736 // instructions at will, and so are we.
737 return 1;
738}
739#endif
740
741#endif
742#endif
743
744// ARM NEON
745#if defined(STBI_NO_SIMD) && defined(STBI_NEON)
746#undef STBI_NEON
747#endif
748
749#ifdef STBI_NEON
750#include <arm_neon.h>
751// assume GCC or Clang on ARM targets
752#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
753#endif
754
755#ifndef STBI_SIMD_ALIGN
756#define STBI_SIMD_ALIGN(type, name) type name
757#endif
758
759#ifndef STBI_MAX_DIMENSIONS
760#define STBI_MAX_DIMENSIONS (1 << 24)
761#endif
762
763///////////////////////////////////////////////
764//
765// stbi__context struct and start_xxx functions
766
767// stbi__context structure is our basic context used by all images, so it
768// contains all the IO context, plus some basic image information
769typedef struct
770{
771 stbi__uint32 img_x, img_y;
772 int img_n, img_out_n;
773
774 stbi_io_callbacks io;
775 void *io_user_data;
776
777 int read_from_callbacks;
778 int buflen;
779 stbi_uc buffer_start[128];
780 int callback_already_read;
781
782 stbi_uc *img_buffer, *img_buffer_end;
783 stbi_uc *img_buffer_original, *img_buffer_original_end;
784} stbi__context;
785
786
787static void stbi__refill_buffer(stbi__context *s);
788
789// initialize a memory-decode context
790static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len)
791{
792 s->io.read = NULL;
793 s->read_from_callbacks = 0;
794 s->callback_already_read = 0;
795 s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer;
796 s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len;
797}
798
799// initialize a callback-based context
800static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user)
801{
802 s->io = *c;
803 s->io_user_data = user;
804 s->buflen = sizeof(s->buffer_start);
805 s->read_from_callbacks = 1;
806 s->callback_already_read = 0;
807 s->img_buffer = s->img_buffer_original = s->buffer_start;
808 stbi__refill_buffer(s);
809 s->img_buffer_original_end = s->img_buffer_end;
810}
811
812#ifndef STBI_NO_STDIO
813
814static int stbi__stdio_read(void *user, char *data, int size)
815{
816 return (int) fread(data,1,size,(FILE*) user);
817}
818
819static void stbi__stdio_skip(void *user, int n)
820{
821 int ch;
822 fseek((FILE*) user, n, SEEK_CUR);
823 ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */
824 if (ch != EOF) {
825 ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */
826 }
827}
828
829static int stbi__stdio_eof(void *user)
830{
831 return feof((FILE*) user) || ferror((FILE *) user);
832}
833
834static stbi_io_callbacks stbi__stdio_callbacks =
835{
836 stbi__stdio_read,
837 stbi__stdio_skip,
838 stbi__stdio_eof,
839};
840
841static void stbi__start_file(stbi__context *s, FILE *f)
842{
843 stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f);
844}
845
846//static void stop_file(stbi__context *s) { }
847
848#endif // !STBI_NO_STDIO
849
850static void stbi__rewind(stbi__context *s)
851{
852 // conceptually rewind SHOULD rewind to the beginning of the stream,
853 // but we just rewind to the beginning of the initial buffer, because
854 // we only use it after doing 'test', which only ever looks at at most 92 bytes
855 s->img_buffer = s->img_buffer_original;
856 s->img_buffer_end = s->img_buffer_original_end;
857}
858
859enum
860{
861 STBI_ORDER_RGB,
862 STBI_ORDER_BGR
863};
864
865typedef struct
866{
867 int bits_per_channel;
868 int num_channels;
869 int channel_order;
870} stbi__result_info;
871
872#ifndef STBI_NO_JPEG
873static int stbi__jpeg_test(stbi__context *s);
874static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
875static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp);
876#endif
877
878#ifndef STBI_NO_PNG
879static int stbi__png_test(stbi__context *s);
880static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
881static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp);
882static int stbi__png_is16(stbi__context *s);
883#endif
884
885#ifndef STBI_NO_BMP
886static int stbi__bmp_test(stbi__context *s);
887static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
888static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp);
889#endif
890
891#ifndef STBI_NO_TGA
892static int stbi__tga_test(stbi__context *s);
893static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
894static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp);
895#endif
896
897#ifndef STBI_NO_PSD
898static int stbi__psd_test(stbi__context *s);
899static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc);
900static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp);
901static int stbi__psd_is16(stbi__context *s);
902#endif
903
904#ifndef STBI_NO_HDR
905static int stbi__hdr_test(stbi__context *s);
906static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
907static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp);
908#endif
909
910#ifndef STBI_NO_PIC
911static int stbi__pic_test(stbi__context *s);
912static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
913static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp);
914#endif
915
916#ifndef STBI_NO_GIF
917static int stbi__gif_test(stbi__context *s);
918static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
919static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp);
920static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp);
921#endif
922
923#ifndef STBI_NO_PNM
924static int stbi__pnm_test(stbi__context *s);
925static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
926static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp);
927#endif
928
929static
930#ifdef STBI_THREAD_LOCAL
931STBI_THREAD_LOCAL
932#endif
933const char *stbi__g_failure_reason;
934
935STBIDEF const char *stbi_failure_reason(void)
936{
937 return stbi__g_failure_reason;
938}
939
940#ifndef STBI_NO_FAILURE_STRINGS
941static int stbi__err(const char *str)
942{
943 stbi__g_failure_reason = str;
944 return 0;
945}
946#endif
947
948static void *stbi__malloc(size_t size)
949{
950 return STBI_MALLOC(size);
951}
952
953// stb_image uses ints pervasively, including for offset calculations.
954// therefore the largest decoded image size we can support with the
955// current code, even on 64-bit targets, is INT_MAX. this is not a
956// significant limitation for the intended use case.
957//
958// we do, however, need to make sure our size calculations don't
959// overflow. hence a few helper functions for size calculations that
960// multiply integers together, making sure that they're non-negative
961// and no overflow occurs.
962
963// return 1 if the sum is valid, 0 on overflow.
964// negative terms are considered invalid.
965static int stbi__addsizes_valid(int a, int b)
966{
967 if (b < 0) return 0;
968 // now 0 <= b <= INT_MAX, hence also
969 // 0 <= INT_MAX - b <= INTMAX.
970 // And "a + b <= INT_MAX" (which might overflow) is the
971 // same as a <= INT_MAX - b (no overflow)
972 return a <= INT_MAX - b;
973}
974
975// returns 1 if the product is valid, 0 on overflow.
976// negative factors are considered invalid.
977static int stbi__mul2sizes_valid(int a, int b)
978{
979 if (a < 0 || b < 0) return 0;
980 if (b == 0) return 1; // mul-by-0 is always safe
981 // portable way to check for no overflows in a*b
982 return a <= INT_MAX/b;
983}
984
985#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR)
986// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow
987static int stbi__mad2sizes_valid(int a, int b, int add)
988{
989 return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add);
990}
991#endif
992
993// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow
994static int stbi__mad3sizes_valid(int a, int b, int c, int add)
995{
996 return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) &&
997 stbi__addsizes_valid(a*b*c, add);
998}
999
1000// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow
1001#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
1002static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add)
1003{
1004 return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) &&
1005 stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add);
1006}
1007#endif
1008
1009#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR)
1010// mallocs with size overflow checking
1011static void *stbi__malloc_mad2(int a, int b, int add)
1012{
1013 if (!stbi__mad2sizes_valid(a, b, add)) return NULL;
1014 return stbi__malloc(a*b + add);
1015}
1016#endif
1017
1018static void *stbi__malloc_mad3(int a, int b, int c, int add)
1019{
1020 if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL;
1021 return stbi__malloc(a*b*c + add);
1022}
1023
1024#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
1025static void *stbi__malloc_mad4(int a, int b, int c, int d, int add)
1026{
1027 if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL;
1028 return stbi__malloc(a*b*c*d + add);
1029}
1030#endif
1031
1032// stbi__err - error
1033// stbi__errpf - error returning pointer to float
1034// stbi__errpuc - error returning pointer to unsigned char
1035
1036#ifdef STBI_NO_FAILURE_STRINGS
1037 #define stbi__err(x,y) 0
1038#elif defined(STBI_FAILURE_USERMSG)
1039 #define stbi__err(x,y) stbi__err(y)
1040#else
1041 #define stbi__err(x,y) stbi__err(x)
1042#endif
1043
1044#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL))
1045#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL))
1046
1047STBIDEF void stbi_image_free(void *retval_from_stbi_load)
1048{
1049 STBI_FREE(retval_from_stbi_load);
1050}
1051
1052#ifndef STBI_NO_LINEAR
1053static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp);
1054#endif
1055
1056#ifndef STBI_NO_HDR
1057static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp);
1058#endif
1059
1060static int stbi__vertically_flip_on_load_global = 0;
1061
1062STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip)
1063{
1064 stbi__vertically_flip_on_load_global = flag_true_if_should_flip;
1065}
1066
1067#ifndef STBI_THREAD_LOCAL
1068#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global
1069#else
1070static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set;
1071
1072STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip)
1073{
1074 stbi__vertically_flip_on_load_local = flag_true_if_should_flip;
1075 stbi__vertically_flip_on_load_set = 1;
1076}
1077
1078#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \
1079 ? stbi__vertically_flip_on_load_local \
1080 : stbi__vertically_flip_on_load_global)
1081#endif // STBI_THREAD_LOCAL
1082
1083static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc)
1084{
1085 memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields
1086 ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed
1087 ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order
1088 ri->num_channels = 0;
1089
1090 #ifndef STBI_NO_JPEG
1091 if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri);
1092 #endif
1093 #ifndef STBI_NO_PNG
1094 if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri);
1095 #endif
1096 #ifndef STBI_NO_BMP
1097 if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri);
1098 #endif
1099 #ifndef STBI_NO_GIF
1100 if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri);
1101 #endif
1102 #ifndef STBI_NO_PSD
1103 if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc);
1104 #else
1105 STBI_NOTUSED(bpc);
1106 #endif
1107 #ifndef STBI_NO_PIC
1108 if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri);
1109 #endif
1110 #ifndef STBI_NO_PNM
1111 if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri);
1112 #endif
1113
1114 #ifndef STBI_NO_HDR
1115 if (stbi__hdr_test(s)) {
1116 float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri);
1117 return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp);
1118 }
1119 #endif
1120
1121 #ifndef STBI_NO_TGA
1122 // test tga last because it's a crappy test!
1123 if (stbi__tga_test(s))
1124 return stbi__tga_load(s,x,y,comp,req_comp, ri);
1125 #endif
1126
1127 return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt");
1128}
1129
1130static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels)
1131{
1132 int i;
1133 int img_len = w * h * channels;
1134 stbi_uc *reduced;
1135
1136 reduced = (stbi_uc *) stbi__malloc(img_len);
1137 if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory");
1138
1139 for (i = 0; i < img_len; ++i)
1140 reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling
1141
1142 STBI_FREE(orig);
1143 return reduced;
1144}
1145
1146static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels)
1147{
1148 int i;
1149 int img_len = w * h * channels;
1150 stbi__uint16 *enlarged;
1151
1152 enlarged = (stbi__uint16 *) stbi__malloc(img_len*2);
1153 if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory");
1154
1155 for (i = 0; i < img_len; ++i)
1156 enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff
1157
1158 STBI_FREE(orig);
1159 return enlarged;
1160}
1161
1162static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel)
1163{
1164 int row;
1165 size_t bytes_per_row = (size_t)w * bytes_per_pixel;
1166 stbi_uc temp[2048];
1167 stbi_uc *bytes = (stbi_uc *)image;
1168
1169 for (row = 0; row < (h>>1); row++) {
1170 stbi_uc *row0 = bytes + row*bytes_per_row;
1171 stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row;
1172 // swap row0 with row1
1173 size_t bytes_left = bytes_per_row;
1174 while (bytes_left) {
1175 size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp);
1176 memcpy(temp, row0, bytes_copy);
1177 memcpy(row0, row1, bytes_copy);
1178 memcpy(row1, temp, bytes_copy);
1179 row0 += bytes_copy;
1180 row1 += bytes_copy;
1181 bytes_left -= bytes_copy;
1182 }
1183 }
1184}
1185
1186#ifndef STBI_NO_GIF
1187static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel)
1188{
1189 int slice;
1190 int slice_size = w * h * bytes_per_pixel;
1191
1192 stbi_uc *bytes = (stbi_uc *)image;
1193 for (slice = 0; slice < z; ++slice) {
1194 stbi__vertical_flip(bytes, w, h, bytes_per_pixel);
1195 bytes += slice_size;
1196 }
1197}
1198#endif
1199
1200static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp)
1201{
1202 stbi__result_info ri;
1203 void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8);
1204
1205 if (result == NULL)
1206 return NULL;
1207
1208 // it is the responsibility of the loaders to make sure we get either 8 or 16 bit.
1209 STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16);
1210
1211 if (ri.bits_per_channel != 8) {
1212 result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp);
1213 ri.bits_per_channel = 8;
1214 }
1215
1216 // @TODO: move stbi__convert_format to here
1217
1218 if (stbi__vertically_flip_on_load) {
1219 int channels = req_comp ? req_comp : *comp;
1220 stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc));
1221 }
1222
1223 return (unsigned char *) result;
1224}
1225
1226static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp)
1227{
1228 stbi__result_info ri;
1229 void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16);
1230
1231 if (result == NULL)
1232 return NULL;
1233
1234 // it is the responsibility of the loaders to make sure we get either 8 or 16 bit.
1235 STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16);
1236
1237 if (ri.bits_per_channel != 16) {
1238 result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp);
1239 ri.bits_per_channel = 16;
1240 }
1241
1242 // @TODO: move stbi__convert_format16 to here
1243 // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision
1244
1245 if (stbi__vertically_flip_on_load) {
1246 int channels = req_comp ? req_comp : *comp;
1247 stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16));
1248 }
1249
1250 return (stbi__uint16 *) result;
1251}
1252
1253#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR)
1254static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp)
1255{
1256 if (stbi__vertically_flip_on_load && result != NULL) {
1257 int channels = req_comp ? req_comp : *comp;
1258 stbi__vertical_flip(result, *x, *y, channels * sizeof(float));
1259 }
1260}
1261#endif
1262
1263#ifndef STBI_NO_STDIO
1264
1265#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8)
1266STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide);
1267STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default);
1268#endif
1269
1270#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8)
1271STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input)
1272{
1273 return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL);
1274}
1275#endif
1276
1277static FILE *stbi__fopen(char const *filename, char const *mode)
1278{
1279 FILE *f;
1280#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8)
1281 wchar_t wMode[64];
1282 wchar_t wFilename[1024];
1283 if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)))
1284 return 0;
1285
1286 if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)))
1287 return 0;
1288
1289#if _MSC_VER >= 1400
1290 if (0 != _wfopen_s(&f, wFilename, wMode))
1291 f = 0;
1292#else
1293 f = _wfopen(wFilename, wMode);
1294#endif
1295
1296#elif defined(_MSC_VER) && _MSC_VER >= 1400
1297 if (0 != fopen_s(&f, filename, mode))
1298 f=0;
1299#else
1300 f = fopen(filename, mode);
1301#endif
1302 return f;
1303}
1304
1305
1306STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp)
1307{
1308 FILE *f = stbi__fopen(filename, "rb");
1309 unsigned char *result;
1310 if (!f) return stbi__errpuc("can't fopen", "Unable to open file");
1311 result = stbi_load_from_file(f,x,y,comp,req_comp);
1312 fclose(f);
1313 return result;
1314}
1315
1316STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
1317{
1318 unsigned char *result;
1319 stbi__context s;
1320 stbi__start_file(&s,f);
1321 result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
1322 if (result) {
1323 // need to 'unget' all the characters in the IO buffer
1324 fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR);
1325 }
1326 return result;
1327}
1328
1329STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp)
1330{
1331 stbi__uint16 *result;
1332 stbi__context s;
1333 stbi__start_file(&s,f);
1334 result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp);
1335 if (result) {
1336 // need to 'unget' all the characters in the IO buffer
1337 fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR);
1338 }
1339 return result;
1340}
1341
1342STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp)
1343{
1344 FILE *f = stbi__fopen(filename, "rb");
1345 stbi__uint16 *result;
1346 if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file");
1347 result = stbi_load_from_file_16(f,x,y,comp,req_comp);
1348 fclose(f);
1349 return result;
1350}
1351
1352
1353#endif //!STBI_NO_STDIO
1354
1355STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels)
1356{
1357 stbi__context s;
1358 stbi__start_mem(&s,buffer,len);
1359 return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels);
1360}
1361
1362STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels)
1363{
1364 stbi__context s;
1365 stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user);
1366 return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels);
1367}
1368
1369STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp)
1370{
1371 stbi__context s;
1372 stbi__start_mem(&s,buffer,len);
1373 return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
1374}
1375
1376STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
1377{
1378 stbi__context s;
1379 stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
1380 return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
1381}
1382
1383#ifndef STBI_NO_GIF
1384STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp)
1385{
1386 unsigned char *result;
1387 stbi__context s;
1388 stbi__start_mem(&s,buffer,len);
1389
1390 result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp);
1391 if (stbi__vertically_flip_on_load) {
1392 stbi__vertical_flip_slices( result, *x, *y, *z, *comp );
1393 }
1394
1395 return result;
1396}
1397#endif
1398
1399#ifndef STBI_NO_LINEAR
1400static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp)
1401{
1402 unsigned char *data;
1403 #ifndef STBI_NO_HDR
1404 if (stbi__hdr_test(s)) {
1405 stbi__result_info ri;
1406 float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri);
1407 if (hdr_data)
1408 stbi__float_postprocess(hdr_data,x,y,comp,req_comp);
1409 return hdr_data;
1410 }
1411 #endif
1412 data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp);
1413 if (data)
1414 return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp);
1415 return stbi__errpf("unknown image type", "Image not of any known type, or corrupt");
1416}
1417
1418STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp)
1419{
1420 stbi__context s;
1421 stbi__start_mem(&s,buffer,len);
1422 return stbi__loadf_main(&s,x,y,comp,req_comp);
1423}
1424
1425STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
1426{
1427 stbi__context s;
1428 stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
1429 return stbi__loadf_main(&s,x,y,comp,req_comp);
1430}
1431
1432#ifndef STBI_NO_STDIO
1433STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp)
1434{
1435 float *result;
1436 FILE *f = stbi__fopen(filename, "rb");
1437 if (!f) return stbi__errpf("can't fopen", "Unable to open file");
1438 result = stbi_loadf_from_file(f,x,y,comp,req_comp);
1439 fclose(f);
1440 return result;
1441}
1442
1443STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
1444{
1445 stbi__context s;
1446 stbi__start_file(&s,f);
1447 return stbi__loadf_main(&s,x,y,comp,req_comp);
1448}
1449#endif // !STBI_NO_STDIO
1450
1451#endif // !STBI_NO_LINEAR
1452
1453// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is
1454// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always
1455// reports false!
1456
1457STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len)
1458{
1459 #ifndef STBI_NO_HDR
1460 stbi__context s;
1461 stbi__start_mem(&s,buffer,len);
1462 return stbi__hdr_test(&s);
1463 #else
1464 STBI_NOTUSED(buffer);
1465 STBI_NOTUSED(len);
1466 return 0;
1467 #endif
1468}
1469
1470#ifndef STBI_NO_STDIO
1471STBIDEF int stbi_is_hdr (char const *filename)
1472{
1473 FILE *f = stbi__fopen(filename, "rb");
1474 int result=0;
1475 if (f) {
1476 result = stbi_is_hdr_from_file(f);
1477 fclose(f);
1478 }
1479 return result;
1480}
1481
1482STBIDEF int stbi_is_hdr_from_file(FILE *f)
1483{
1484 #ifndef STBI_NO_HDR
1485 long pos = ftell(f);
1486 int res;
1487 stbi__context s;
1488 stbi__start_file(&s,f);
1489 res = stbi__hdr_test(&s);
1490 fseek(f, pos, SEEK_SET);
1491 return res;
1492 #else
1493 STBI_NOTUSED(f);
1494 return 0;
1495 #endif
1496}
1497#endif // !STBI_NO_STDIO
1498
1499STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user)
1500{
1501 #ifndef STBI_NO_HDR
1502 stbi__context s;
1503 stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
1504 return stbi__hdr_test(&s);
1505 #else
1506 STBI_NOTUSED(clbk);
1507 STBI_NOTUSED(user);
1508 return 0;
1509 #endif
1510}
1511
1512#ifndef STBI_NO_LINEAR
1513static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f;
1514
1515STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; }
1516STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; }
1517#endif
1518
1519static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f;
1520
1521STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; }
1522STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; }
1523
1524
1525//////////////////////////////////////////////////////////////////////////////
1526//
1527// Common code used by all image loaders
1528//
1529
1530enum
1531{
1532 STBI__SCAN_load=0,
1533 STBI__SCAN_type,
1534 STBI__SCAN_header
1535};
1536
1537static void stbi__refill_buffer(stbi__context *s)
1538{
1539 int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen);
1540 s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original);
1541 if (n == 0) {
1542 // at end of file, treat same as if from memory, but need to handle case
1543 // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file
1544 s->read_from_callbacks = 0;
1545 s->img_buffer = s->buffer_start;
1546 s->img_buffer_end = s->buffer_start+1;
1547 *s->img_buffer = 0;
1548 } else {
1549 s->img_buffer = s->buffer_start;
1550 s->img_buffer_end = s->buffer_start + n;
1551 }
1552}
1553
1554stbi_inline static stbi_uc stbi__get8(stbi__context *s)
1555{
1556 if (s->img_buffer < s->img_buffer_end)
1557 return *s->img_buffer++;
1558 if (s->read_from_callbacks) {
1559 stbi__refill_buffer(s);
1560 return *s->img_buffer++;
1561 }
1562 return 0;
1563}
1564
1565#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM)
1566// nothing
1567#else
1568stbi_inline static int stbi__at_eof(stbi__context *s)
1569{
1570 if (s->io.read) {
1571 if (!(s->io.eof)(s->io_user_data)) return 0;
1572 // if feof() is true, check if buffer = end
1573 // special case: we've only got the special 0 character at the end
1574 if (s->read_from_callbacks == 0) return 1;
1575 }
1576
1577 return s->img_buffer >= s->img_buffer_end;
1578}
1579#endif
1580
1581#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC)
1582// nothing
1583#else
1584static void stbi__skip(stbi__context *s, int n)
1585{
1586 if (n == 0) return; // already there!
1587 if (n < 0) {
1588 s->img_buffer = s->img_buffer_end;
1589 return;
1590 }
1591 if (s->io.read) {
1592 int blen = (int) (s->img_buffer_end - s->img_buffer);
1593 if (blen < n) {
1594 s->img_buffer = s->img_buffer_end;
1595 (s->io.skip)(s->io_user_data, n - blen);
1596 return;
1597 }
1598 }
1599 s->img_buffer += n;
1600}
1601#endif
1602
1603#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM)
1604// nothing
1605#else
1606static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n)
1607{
1608 if (s->io.read) {
1609 int blen = (int) (s->img_buffer_end - s->img_buffer);
1610 if (blen < n) {
1611 int res, count;
1612
1613 memcpy(buffer, s->img_buffer, blen);
1614
1615 count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen);
1616 res = (count == (n-blen));
1617 s->img_buffer = s->img_buffer_end;
1618 return res;
1619 }
1620 }
1621
1622 if (s->img_buffer+n <= s->img_buffer_end) {
1623 memcpy(buffer, s->img_buffer, n);
1624 s->img_buffer += n;
1625 return 1;
1626 } else
1627 return 0;
1628}
1629#endif
1630
1631#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC)
1632// nothing
1633#else
1634static int stbi__get16be(stbi__context *s)
1635{
1636 int z = stbi__get8(s);
1637 return (z << 8) + stbi__get8(s);
1638}
1639#endif
1640
1641#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC)
1642// nothing
1643#else
1644static stbi__uint32 stbi__get32be(stbi__context *s)
1645{
1646 stbi__uint32 z = stbi__get16be(s);
1647 return (z << 16) + stbi__get16be(s);
1648}
1649#endif
1650
1651#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF)
1652// nothing
1653#else
1654static int stbi__get16le(stbi__context *s)
1655{
1656 int z = stbi__get8(s);
1657 return z + (stbi__get8(s) << 8);
1658}
1659#endif
1660
1661#ifndef STBI_NO_BMP
1662static stbi__uint32 stbi__get32le(stbi__context *s)
1663{
1664 stbi__uint32 z = stbi__get16le(s);
1665 return z + (stbi__get16le(s) << 16);
1666}
1667#endif
1668
1669#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings
1670
1671#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM)
1672// nothing
1673#else
1674//////////////////////////////////////////////////////////////////////////////
1675//
1676// generic converter from built-in img_n to req_comp
1677// individual types do this automatically as much as possible (e.g. jpeg
1678// does all cases internally since it needs to colorspace convert anyway,
1679// and it never has alpha, so very few cases ). png can automatically
1680// interleave an alpha=255 channel, but falls back to this for other cases
1681//
1682// assume data buffer is malloced, so malloc a new one and free that one
1683// only failure mode is malloc failing
1684
1685static stbi_uc stbi__compute_y(int r, int g, int b)
1686{
1687 return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8);
1688}
1689#endif
1690
1691#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM)
1692// nothing
1693#else
1694static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y)
1695{
1696 int i,j;
1697 unsigned char *good;
1698
1699 if (req_comp == img_n) return data;
1700 STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
1701
1702 good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0);
1703 if (good == NULL) {
1704 STBI_FREE(data);
1705 return stbi__errpuc("outofmem", "Out of memory");
1706 }
1707
1708 for (j=0; j < (int) y; ++j) {
1709 unsigned char *src = data + j * x * img_n ;
1710 unsigned char *dest = good + j * x * req_comp;
1711
1712 #define STBI__COMBO(a,b) ((a)*8+(b))
1713 #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b)
1714 // convert source image with img_n components to one with req_comp components;
1715 // avoid switch per pixel, so use switch per scanline and massive macros
1716 switch (STBI__COMBO(img_n, req_comp)) {
1717 STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break;
1718 STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
1719 STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break;
1720 STBI__CASE(2,1) { dest[0]=src[0]; } break;
1721 STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
1722 STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break;
1723 STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break;
1724 STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break;
1725 STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break;
1726 STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break;
1727 STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break;
1728 STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break;
1729 default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion");
1730 }
1731 #undef STBI__CASE
1732 }
1733
1734 STBI_FREE(data);
1735 return good;
1736}
1737#endif
1738
1739#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD)
1740// nothing
1741#else
1742static stbi__uint16 stbi__compute_y_16(int r, int g, int b)
1743{
1744 return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8);
1745}
1746#endif
1747
1748#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD)
1749// nothing
1750#else
1751static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y)
1752{
1753 int i,j;
1754 stbi__uint16 *good;
1755
1756 if (req_comp == img_n) return data;
1757 STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
1758
1759 good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2);
1760 if (good == NULL) {
1761 STBI_FREE(data);
1762 return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory");
1763 }
1764
1765 for (j=0; j < (int) y; ++j) {
1766 stbi__uint16 *src = data + j * x * img_n ;
1767 stbi__uint16 *dest = good + j * x * req_comp;
1768
1769 #define STBI__COMBO(a,b) ((a)*8+(b))
1770 #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b)
1771 // convert source image with img_n components to one with req_comp components;
1772 // avoid switch per pixel, so use switch per scanline and massive macros
1773 switch (STBI__COMBO(img_n, req_comp)) {
1774 STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break;
1775 STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
1776 STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break;
1777 STBI__CASE(2,1) { dest[0]=src[0]; } break;
1778 STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
1779 STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break;
1780 STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break;
1781 STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break;
1782 STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break;
1783 STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break;
1784 STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break;
1785 STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break;
1786 default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion");
1787 }
1788 #undef STBI__CASE
1789 }
1790
1791 STBI_FREE(data);
1792 return good;
1793}
1794#endif
1795
1796#ifndef STBI_NO_LINEAR
1797static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp)
1798{
1799 int i,k,n;
1800 float *output;
1801 if (!data) return NULL;
1802 output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0);
1803 if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); }
1804 // compute number of non-alpha components
1805 if (comp & 1) n = comp; else n = comp-1;
1806 for (i=0; i < x*y; ++i) {
1807 for (k=0; k < n; ++k) {
1808 output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale);
1809 }
1810 }
1811 if (n < comp) {
1812 for (i=0; i < x*y; ++i) {
1813 output[i*comp + n] = data[i*comp + n]/255.0f;
1814 }
1815 }
1816 STBI_FREE(data);
1817 return output;
1818}
1819#endif
1820
1821#ifndef STBI_NO_HDR
1822#define stbi__float2int(x) ((int) (x))
1823static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp)
1824{
1825 int i,k,n;
1826 stbi_uc *output;
1827 if (!data) return NULL;
1828 output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0);
1829 if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); }
1830 // compute number of non-alpha components
1831 if (comp & 1) n = comp; else n = comp-1;
1832 for (i=0; i < x*y; ++i) {
1833 for (k=0; k < n; ++k) {
1834 float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f;
1835 if (z < 0) z = 0;
1836 if (z > 255) z = 255;
1837 output[i*comp + k] = (stbi_uc) stbi__float2int(z);
1838 }
1839 if (k < comp) {
1840 float z = data[i*comp+k] * 255 + 0.5f;
1841 if (z < 0) z = 0;
1842 if (z > 255) z = 255;
1843 output[i*comp + k] = (stbi_uc) stbi__float2int(z);
1844 }
1845 }
1846 STBI_FREE(data);
1847 return output;
1848}
1849#endif
1850
1851//////////////////////////////////////////////////////////////////////////////
1852//
1853// "baseline" JPEG/JFIF decoder
1854//
1855// simple implementation
1856// - doesn't support delayed output of y-dimension
1857// - simple interface (only one output format: 8-bit interleaved RGB)
1858// - doesn't try to recover corrupt jpegs
1859// - doesn't allow partial loading, loading multiple at once
1860// - still fast on x86 (copying globals into locals doesn't help x86)
1861// - allocates lots of intermediate memory (full size of all components)
1862// - non-interleaved case requires this anyway
1863// - allows good upsampling (see next)
1864// high-quality
1865// - upsampled channels are bilinearly interpolated, even across blocks
1866// - quality integer IDCT derived from IJG's 'slow'
1867// performance
1868// - fast huffman; reasonable integer IDCT
1869// - some SIMD kernels for common paths on targets with SSE2/NEON
1870// - uses a lot of intermediate memory, could cache poorly
1871
1872#ifndef STBI_NO_JPEG
1873
1874// huffman decoding acceleration
1875#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache
1876
1877typedef struct
1878{
1879 stbi_uc fast[1 << FAST_BITS];
1880 // weirdly, repacking this into AoS is a 10% speed loss, instead of a win
1881 stbi__uint16 code[256];
1882 stbi_uc values[256];
1883 stbi_uc size[257];
1884 unsigned int maxcode[18];
1885 int delta[17]; // old 'firstsymbol' - old 'firstcode'
1886} stbi__huffman;
1887
1888typedef struct
1889{
1890 stbi__context *s;
1891 stbi__huffman huff_dc[4];
1892 stbi__huffman huff_ac[4];
1893 stbi__uint16 dequant[4][64];
1894 stbi__int16 fast_ac[4][1 << FAST_BITS];
1895
1896// sizes for components, interleaved MCUs
1897 int img_h_max, img_v_max;
1898 int img_mcu_x, img_mcu_y;
1899 int img_mcu_w, img_mcu_h;
1900
1901// definition of jpeg image component
1902 struct
1903 {
1904 int id;
1905 int h,v;
1906 int tq;
1907 int hd,ha;
1908 int dc_pred;
1909
1910 int x,y,w2,h2;
1911 stbi_uc *data;
1912 void *raw_data, *raw_coeff;
1913 stbi_uc *linebuf;
1914 short *coeff; // progressive only
1915 int coeff_w, coeff_h; // number of 8x8 coefficient blocks
1916 } img_comp[4];
1917
1918 stbi__uint32 code_buffer; // jpeg entropy-coded buffer
1919 int code_bits; // number of valid bits
1920 unsigned char marker; // marker seen while filling entropy buffer
1921 int nomore; // flag if we saw a marker so must stop
1922
1923 int progressive;
1924 int spec_start;
1925 int spec_end;
1926 int succ_high;
1927 int succ_low;
1928 int eob_run;
1929 int jfif;
1930 int app14_color_transform; // Adobe APP14 tag
1931 int rgb;
1932
1933 int scan_n, order[4];
1934 int restart_interval, todo;
1935
1936// kernels
1937 void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]);
1938 void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step);
1939 stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs);
1940} stbi__jpeg;
1941
1942static int stbi__build_huffman(stbi__huffman *h, int *count)
1943{
1944 int i,j,k=0;
1945 unsigned int code;
1946 // build size list for each symbol (from JPEG spec)
1947 for (i=0; i < 16; ++i)
1948 for (j=0; j < count[i]; ++j)
1949 h->size[k++] = (stbi_uc) (i+1);
1950 h->size[k] = 0;
1951
1952 // compute actual symbols (from jpeg spec)
1953 code = 0;
1954 k = 0;
1955 for(j=1; j <= 16; ++j) {
1956 // compute delta to add to code to compute symbol id
1957 h->delta[j] = k - code;
1958 if (h->size[k] == j) {
1959 while (h->size[k] == j)
1960 h->code[k++] = (stbi__uint16) (code++);
1961 if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG");
1962 }
1963 // compute largest code + 1 for this size, preshifted as needed later
1964 h->maxcode[j] = code << (16-j);
1965 code <<= 1;
1966 }
1967 h->maxcode[j] = 0xffffffff;
1968
1969 // build non-spec acceleration table; 255 is flag for not-accelerated
1970 memset(h->fast, 255, 1 << FAST_BITS);
1971 for (i=0; i < k; ++i) {
1972 int s = h->size[i];
1973 if (s <= FAST_BITS) {
1974 int c = h->code[i] << (FAST_BITS-s);
1975 int m = 1 << (FAST_BITS-s);
1976 for (j=0; j < m; ++j) {
1977 h->fast[c+j] = (stbi_uc) i;
1978 }
1979 }
1980 }
1981 return 1;
1982}
1983
1984// build a table that decodes both magnitude and value of small ACs in
1985// one go.
1986static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h)
1987{
1988 int i;
1989 for (i=0; i < (1 << FAST_BITS); ++i) {
1990 stbi_uc fast = h->fast[i];
1991 fast_ac[i] = 0;
1992 if (fast < 255) {
1993 int rs = h->values[fast];
1994 int run = (rs >> 4) & 15;
1995 int magbits = rs & 15;
1996 int len = h->size[fast];
1997
1998 if (magbits && len + magbits <= FAST_BITS) {
1999 // magnitude code followed by receive_extend code
2000 int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits);
2001 int m = 1 << (magbits - 1);
2002 if (k < m) k += (~0U << magbits) + 1;
2003 // if the result is small enough, we can fit it in fast_ac table
2004 if (k >= -128 && k <= 127)
2005 fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits));
2006 }
2007 }
2008 }
2009}
2010
2011static void stbi__grow_buffer_unsafe(stbi__jpeg *j)
2012{
2013 do {
2014 unsigned int b = j->nomore ? 0 : stbi__get8(j->s);
2015 if (b == 0xff) {
2016 int c = stbi__get8(j->s);
2017 while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes
2018 if (c != 0) {
2019 j->marker = (unsigned char) c;
2020 j->nomore = 1;
2021 return;
2022 }
2023 }
2024 j->code_buffer |= b << (24 - j->code_bits);
2025 j->code_bits += 8;
2026 } while (j->code_bits <= 24);
2027}
2028
2029// (1 << n) - 1
2030static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535};
2031
2032// decode a jpeg huffman value from the bitstream
2033stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h)
2034{
2035 unsigned int temp;
2036 int c,k;
2037
2038 if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
2039
2040 // look at the top FAST_BITS and determine what symbol ID it is,
2041 // if the code is <= FAST_BITS
2042 c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
2043 k = h->fast[c];
2044 if (k < 255) {
2045 int s = h->size[k];
2046 if (s > j->code_bits)
2047 return -1;
2048 j->code_buffer <<= s;
2049 j->code_bits -= s;
2050 return h->values[k];
2051 }
2052
2053 // naive test is to shift the code_buffer down so k bits are
2054 // valid, then test against maxcode. To speed this up, we've
2055 // preshifted maxcode left so that it has (16-k) 0s at the
2056 // end; in other words, regardless of the number of bits, it
2057 // wants to be compared against something shifted to have 16;
2058 // that way we don't need to shift inside the loop.
2059 temp = j->code_buffer >> 16;
2060 for (k=FAST_BITS+1 ; ; ++k)
2061 if (temp < h->maxcode[k])
2062 break;
2063 if (k == 17) {
2064 // error! code not found
2065 j->code_bits -= 16;
2066 return -1;
2067 }
2068
2069 if (k > j->code_bits)
2070 return -1;
2071
2072 // convert the huffman code to the symbol id
2073 c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k];
2074 STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]);
2075
2076 // convert the id to a symbol
2077 j->code_bits -= k;
2078 j->code_buffer <<= k;
2079 return h->values[c];
2080}
2081
2082// bias[n] = (-1<<n) + 1
2083static const int stbi__jbias[16] = {0,-1,-3,-7,-15,-31,-63,-127,-255,-511,-1023,-2047,-4095,-8191,-16383,-32767};
2084
2085// combined JPEG 'receive' and JPEG 'extend', since baseline
2086// always extends everything it receives.
2087stbi_inline static int stbi__extend_receive(stbi__jpeg *j, int n)
2088{
2089 unsigned int k;
2090 int sgn;
2091 if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
2092
2093 sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB
2094 k = stbi_lrot(j->code_buffer, n);
2095 if (n < 0 || n >= (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))) return 0;
2096 j->code_buffer = k & ~stbi__bmask[n];
2097 k &= stbi__bmask[n];
2098 j->code_bits -= n;
2099 return k + (stbi__jbias[n] & ~sgn);
2100}
2101
2102// get some unsigned bits
2103stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n)
2104{
2105 unsigned int k;
2106 if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
2107 k = stbi_lrot(j->code_buffer, n);
2108 j->code_buffer = k & ~stbi__bmask[n];
2109 k &= stbi__bmask[n];
2110 j->code_bits -= n;
2111 return k;
2112}
2113
2114stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j)
2115{
2116 unsigned int k;
2117 if (j->code_bits < 1) stbi__grow_buffer_unsafe(j);
2118 k = j->code_buffer;
2119 j->code_buffer <<= 1;
2120 --j->code_bits;
2121 return k & 0x80000000;
2122}
2123
2124// given a value that's at position X in the zigzag stream,
2125// where does it appear in the 8x8 matrix coded as row-major?
2126static const stbi_uc stbi__jpeg_dezigzag[64+15] =
2127{
2128 0, 1, 8, 16, 9, 2, 3, 10,
2129 17, 24, 32, 25, 18, 11, 4, 5,
2130 12, 19, 26, 33, 40, 48, 41, 34,
2131 27, 20, 13, 6, 7, 14, 21, 28,
2132 35, 42, 49, 56, 57, 50, 43, 36,
2133 29, 22, 15, 23, 30, 37, 44, 51,
2134 58, 59, 52, 45, 38, 31, 39, 46,
2135 53, 60, 61, 54, 47, 55, 62, 63,
2136 // let corrupt input sample past end
2137 63, 63, 63, 63, 63, 63, 63, 63,
2138 63, 63, 63, 63, 63, 63, 63
2139};
2140
2141// decode one 64-entry block--
2142static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant)
2143{
2144 int diff,dc,k;
2145 int t;
2146
2147 if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
2148 t = stbi__jpeg_huff_decode(j, hdc);
2149 if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG");
2150
2151 // 0 all the ac values now so we can do it 32-bits at a time
2152 memset(data,0,64*sizeof(data[0]));
2153
2154 diff = t ? stbi__extend_receive(j, t) : 0;
2155 dc = j->img_comp[b].dc_pred + diff;
2156 j->img_comp[b].dc_pred = dc;
2157 data[0] = (short) (dc * dequant[0]);
2158
2159 // decode AC components, see JPEG spec
2160 k = 1;
2161 do {
2162 unsigned int zig;
2163 int c,r,s;
2164 if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
2165 c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
2166 r = fac[c];
2167 if (r) { // fast-AC path
2168 k += (r >> 4) & 15; // run
2169 s = r & 15; // combined length
2170 j->code_buffer <<= s;
2171 j->code_bits -= s;
2172 // decode into unzigzag'd location
2173 zig = stbi__jpeg_dezigzag[k++];
2174 data[zig] = (short) ((r >> 8) * dequant[zig]);
2175 } else {
2176 int rs = stbi__jpeg_huff_decode(j, hac);
2177 if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
2178 s = rs & 15;
2179 r = rs >> 4;
2180 if (s == 0) {
2181 if (rs != 0xf0) break; // end block
2182 k += 16;
2183 } else {
2184 k += r;
2185 // decode into unzigzag'd location
2186 zig = stbi__jpeg_dezigzag[k++];
2187 data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]);
2188 }
2189 }
2190 } while (k < 64);
2191 return 1;
2192}
2193
2194static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b)
2195{
2196 int diff,dc;
2197 int t;
2198 if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
2199
2200 if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
2201
2202 if (j->succ_high == 0) {
2203 // first scan for DC coefficient, must be first
2204 memset(data,0,64*sizeof(data[0])); // 0 all the ac values now
2205 t = stbi__jpeg_huff_decode(j, hdc);
2206 if (t == -1) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
2207 diff = t ? stbi__extend_receive(j, t) : 0;
2208
2209 dc = j->img_comp[b].dc_pred + diff;
2210 j->img_comp[b].dc_pred = dc;
2211 data[0] = (short) (dc << j->succ_low);
2212 } else {
2213 // refinement scan for DC coefficient
2214 if (stbi__jpeg_get_bit(j))
2215 data[0] += (short) (1 << j->succ_low);
2216 }
2217 return 1;
2218}
2219
2220// @OPTIMIZE: store non-zigzagged during the decode passes,
2221// and only de-zigzag when dequantizing
2222static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac)
2223{
2224 int k;
2225 if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
2226
2227 if (j->succ_high == 0) {
2228 int shift = j->succ_low;
2229
2230 if (j->eob_run) {
2231 --j->eob_run;
2232 return 1;
2233 }
2234
2235 k = j->spec_start;
2236 do {
2237 unsigned int zig;
2238 int c,r,s;
2239 if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
2240 c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
2241 r = fac[c];
2242 if (r) { // fast-AC path
2243 k += (r >> 4) & 15; // run
2244 s = r & 15; // combined length
2245 j->code_buffer <<= s;
2246 j->code_bits -= s;
2247 zig = stbi__jpeg_dezigzag[k++];
2248 data[zig] = (short) ((r >> 8) << shift);
2249 } else {
2250 int rs = stbi__jpeg_huff_decode(j, hac);
2251 if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
2252 s = rs & 15;
2253 r = rs >> 4;
2254 if (s == 0) {
2255 if (r < 15) {
2256 j->eob_run = (1 << r);
2257 if (r)
2258 j->eob_run += stbi__jpeg_get_bits(j, r);
2259 --j->eob_run;
2260 break;
2261 }
2262 k += 16;
2263 } else {
2264 k += r;
2265 zig = stbi__jpeg_dezigzag[k++];
2266 data[zig] = (short) (stbi__extend_receive(j,s) << shift);
2267 }
2268 }
2269 } while (k <= j->spec_end);
2270 } else {
2271 // refinement scan for these AC coefficients
2272
2273 short bit = (short) (1 << j->succ_low);
2274
2275 if (j->eob_run) {
2276 --j->eob_run;
2277 for (k = j->spec_start; k <= j->spec_end; ++k) {
2278 short *p = &data[stbi__jpeg_dezigzag[k]];
2279 if (*p != 0)
2280 if (stbi__jpeg_get_bit(j))
2281 if ((*p & bit)==0) {
2282 if (*p > 0)
2283 *p += bit;
2284 else
2285 *p -= bit;
2286 }
2287 }
2288 } else {
2289 k = j->spec_start;
2290 do {
2291 int r,s;
2292 int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh
2293 if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
2294 s = rs & 15;
2295 r = rs >> 4;
2296 if (s == 0) {
2297 if (r < 15) {
2298 j->eob_run = (1 << r) - 1;
2299 if (r)
2300 j->eob_run += stbi__jpeg_get_bits(j, r);
2301 r = 64; // force end of block
2302 } else {
2303 // r=15 s=0 should write 16 0s, so we just do
2304 // a run of 15 0s and then write s (which is 0),
2305 // so we don't have to do anything special here
2306 }
2307 } else {
2308 if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG");
2309 // sign bit
2310 if (stbi__jpeg_get_bit(j))
2311 s = bit;
2312 else
2313 s = -bit;
2314 }
2315
2316 // advance by r
2317 while (k <= j->spec_end) {
2318 short *p = &data[stbi__jpeg_dezigzag[k++]];
2319 if (*p != 0) {
2320 if (stbi__jpeg_get_bit(j))
2321 if ((*p & bit)==0) {
2322 if (*p > 0)
2323 *p += bit;
2324 else
2325 *p -= bit;
2326 }
2327 } else {
2328 if (r == 0) {
2329 *p = (short) s;
2330 break;
2331 }
2332 --r;
2333 }
2334 }
2335 } while (k <= j->spec_end);
2336 }
2337 }
2338 return 1;
2339}
2340
2341// take a -128..127 value and stbi__clamp it and convert to 0..255
2342stbi_inline static stbi_uc stbi__clamp(int x)
2343{
2344 // trick to use a single test to catch both cases
2345 if ((unsigned int) x > 255) {
2346 if (x < 0) return 0;
2347 if (x > 255) return 255;
2348 }
2349 return (stbi_uc) x;
2350}
2351
2352#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5)))
2353#define stbi__fsh(x) ((x) * 4096)
2354
2355// derived from jidctint -- DCT_ISLOW
2356#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \
2357 int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \
2358 p2 = s2; \
2359 p3 = s6; \
2360 p1 = (p2+p3) * stbi__f2f(0.5411961f); \
2361 t2 = p1 + p3*stbi__f2f(-1.847759065f); \
2362 t3 = p1 + p2*stbi__f2f( 0.765366865f); \
2363 p2 = s0; \
2364 p3 = s4; \
2365 t0 = stbi__fsh(p2+p3); \
2366 t1 = stbi__fsh(p2-p3); \
2367 x0 = t0+t3; \
2368 x3 = t0-t3; \
2369 x1 = t1+t2; \
2370 x2 = t1-t2; \
2371 t0 = s7; \
2372 t1 = s5; \
2373 t2 = s3; \
2374 t3 = s1; \
2375 p3 = t0+t2; \
2376 p4 = t1+t3; \
2377 p1 = t0+t3; \
2378 p2 = t1+t2; \
2379 p5 = (p3+p4)*stbi__f2f( 1.175875602f); \
2380 t0 = t0*stbi__f2f( 0.298631336f); \
2381 t1 = t1*stbi__f2f( 2.053119869f); \
2382 t2 = t2*stbi__f2f( 3.072711026f); \
2383 t3 = t3*stbi__f2f( 1.501321110f); \
2384 p1 = p5 + p1*stbi__f2f(-0.899976223f); \
2385 p2 = p5 + p2*stbi__f2f(-2.562915447f); \
2386 p3 = p3*stbi__f2f(-1.961570560f); \
2387 p4 = p4*stbi__f2f(-0.390180644f); \
2388 t3 += p1+p4; \
2389 t2 += p2+p3; \
2390 t1 += p2+p4; \
2391 t0 += p1+p3;
2392
2393static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64])
2394{
2395 int i,val[64],*v=val;
2396 stbi_uc *o;
2397 short *d = data;
2398
2399 // columns
2400 for (i=0; i < 8; ++i,++d, ++v) {
2401 // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing
2402 if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0
2403 && d[40]==0 && d[48]==0 && d[56]==0) {
2404 // no shortcut 0 seconds
2405 // (1|2|3|4|5|6|7)==0 0 seconds
2406 // all separate -0.047 seconds
2407 // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds
2408 int dcterm = d[0]*4;
2409 v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm;
2410 } else {
2411 STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56])
2412 // constants scaled things up by 1<<12; let's bring them back
2413 // down, but keep 2 extra bits of precision
2414 x0 += 512; x1 += 512; x2 += 512; x3 += 512;
2415 v[ 0] = (x0+t3) >> 10;
2416 v[56] = (x0-t3) >> 10;
2417 v[ 8] = (x1+t2) >> 10;
2418 v[48] = (x1-t2) >> 10;
2419 v[16] = (x2+t1) >> 10;
2420 v[40] = (x2-t1) >> 10;
2421 v[24] = (x3+t0) >> 10;
2422 v[32] = (x3-t0) >> 10;
2423 }
2424 }
2425
2426 for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) {
2427 // no fast case since the first 1D IDCT spread components out
2428 STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7])
2429 // constants scaled things up by 1<<12, plus we had 1<<2 from first
2430 // loop, plus horizontal and vertical each scale by sqrt(8) so together
2431 // we've got an extra 1<<3, so 1<<17 total we need to remove.
2432 // so we want to round that, which means adding 0.5 * 1<<17,
2433 // aka 65536. Also, we'll end up with -128 to 127 that we want
2434 // to encode as 0..255 by adding 128, so we'll add that before the shift
2435 x0 += 65536 + (128<<17);
2436 x1 += 65536 + (128<<17);
2437 x2 += 65536 + (128<<17);
2438 x3 += 65536 + (128<<17);
2439 // tried computing the shifts into temps, or'ing the temps to see
2440 // if any were out of range, but that was slower
2441 o[0] = stbi__clamp((x0+t3) >> 17);
2442 o[7] = stbi__clamp((x0-t3) >> 17);
2443 o[1] = stbi__clamp((x1+t2) >> 17);
2444 o[6] = stbi__clamp((x1-t2) >> 17);
2445 o[2] = stbi__clamp((x2+t1) >> 17);
2446 o[5] = stbi__clamp((x2-t1) >> 17);
2447 o[3] = stbi__clamp((x3+t0) >> 17);
2448 o[4] = stbi__clamp((x3-t0) >> 17);
2449 }
2450}
2451
2452#ifdef STBI_SSE2
2453// sse2 integer IDCT. not the fastest possible implementation but it
2454// produces bit-identical results to the generic C version so it's
2455// fully "transparent".
2456static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64])
2457{
2458 // This is constructed to match our regular (generic) integer IDCT exactly.
2459 __m128i row0, row1, row2, row3, row4, row5, row6, row7;
2460 __m128i tmp;
2461
2462 // dot product constant: even elems=x, odd elems=y
2463 #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y))
2464
2465 // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit)
2466 // out(1) = c1[even]*x + c1[odd]*y
2467 #define dct_rot(out0,out1, x,y,c0,c1) \
2468 __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \
2469 __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \
2470 __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \
2471 __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \
2472 __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \
2473 __m128i out1##_h = _mm_madd_epi16(c0##hi, c1)
2474
2475 // out = in << 12 (in 16-bit, out 32-bit)
2476 #define dct_widen(out, in) \
2477 __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \
2478 __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4)
2479
2480 // wide add
2481 #define dct_wadd(out, a, b) \
2482 __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \
2483 __m128i out##_h = _mm_add_epi32(a##_h, b##_h)
2484
2485 // wide sub
2486 #define dct_wsub(out, a, b) \
2487 __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \
2488 __m128i out##_h = _mm_sub_epi32(a##_h, b##_h)
2489
2490 // butterfly a/b, add bias, then shift by "s" and pack
2491 #define dct_bfly32o(out0, out1, a,b,bias,s) \
2492 { \
2493 __m128i abiased_l = _mm_add_epi32(a##_l, bias); \
2494 __m128i abiased_h = _mm_add_epi32(a##_h, bias); \
2495 dct_wadd(sum, abiased, b); \
2496 dct_wsub(dif, abiased, b); \
2497 out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \
2498 out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \
2499 }
2500
2501 // 8-bit interleave step (for transposes)
2502 #define dct_interleave8(a, b) \
2503 tmp = a; \
2504 a = _mm_unpacklo_epi8(a, b); \
2505 b = _mm_unpackhi_epi8(tmp, b)
2506
2507 // 16-bit interleave step (for transposes)
2508 #define dct_interleave16(a, b) \
2509 tmp = a; \
2510 a = _mm_unpacklo_epi16(a, b); \
2511 b = _mm_unpackhi_epi16(tmp, b)
2512
2513 #define dct_pass(bias,shift) \
2514 { \
2515 /* even part */ \
2516 dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \
2517 __m128i sum04 = _mm_add_epi16(row0, row4); \
2518 __m128i dif04 = _mm_sub_epi16(row0, row4); \
2519 dct_widen(t0e, sum04); \
2520 dct_widen(t1e, dif04); \
2521 dct_wadd(x0, t0e, t3e); \
2522 dct_wsub(x3, t0e, t3e); \
2523 dct_wadd(x1, t1e, t2e); \
2524 dct_wsub(x2, t1e, t2e); \
2525 /* odd part */ \
2526 dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \
2527 dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \
2528 __m128i sum17 = _mm_add_epi16(row1, row7); \
2529 __m128i sum35 = _mm_add_epi16(row3, row5); \
2530 dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \
2531 dct_wadd(x4, y0o, y4o); \
2532 dct_wadd(x5, y1o, y5o); \
2533 dct_wadd(x6, y2o, y5o); \
2534 dct_wadd(x7, y3o, y4o); \
2535 dct_bfly32o(row0,row7, x0,x7,bias,shift); \
2536 dct_bfly32o(row1,row6, x1,x6,bias,shift); \
2537 dct_bfly32o(row2,row5, x2,x5,bias,shift); \
2538 dct_bfly32o(row3,row4, x3,x4,bias,shift); \
2539 }
2540
2541 __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f));
2542 __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f));
2543 __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f));
2544 __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f));
2545 __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f));
2546 __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f));
2547 __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f));
2548 __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f));
2549
2550 // rounding biases in column/row passes, see stbi__idct_block for explanation.
2551 __m128i bias_0 = _mm_set1_epi32(512);
2552 __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17));
2553
2554 // load
2555 row0 = _mm_load_si128((const __m128i *) (data + 0*8));
2556 row1 = _mm_load_si128((const __m128i *) (data + 1*8));
2557 row2 = _mm_load_si128((const __m128i *) (data + 2*8));
2558 row3 = _mm_load_si128((const __m128i *) (data + 3*8));
2559 row4 = _mm_load_si128((const __m128i *) (data + 4*8));
2560 row5 = _mm_load_si128((const __m128i *) (data + 5*8));
2561 row6 = _mm_load_si128((const __m128i *) (data + 6*8));
2562 row7 = _mm_load_si128((const __m128i *) (data + 7*8));
2563
2564 // column pass
2565 dct_pass(bias_0, 10);
2566
2567 {
2568 // 16bit 8x8 transpose pass 1
2569 dct_interleave16(row0, row4);
2570 dct_interleave16(row1, row5);
2571 dct_interleave16(row2, row6);
2572 dct_interleave16(row3, row7);
2573
2574 // transpose pass 2
2575 dct_interleave16(row0, row2);
2576 dct_interleave16(row1, row3);
2577 dct_interleave16(row4, row6);
2578 dct_interleave16(row5, row7);
2579
2580 // transpose pass 3
2581 dct_interleave16(row0, row1);
2582 dct_interleave16(row2, row3);
2583 dct_interleave16(row4, row5);
2584 dct_interleave16(row6, row7);
2585 }
2586
2587 // row pass
2588 dct_pass(bias_1, 17);
2589
2590 {
2591 // pack
2592 __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7
2593 __m128i p1 = _mm_packus_epi16(row2, row3);
2594 __m128i p2 = _mm_packus_epi16(row4, row5);
2595 __m128i p3 = _mm_packus_epi16(row6, row7);
2596
2597 // 8bit 8x8 transpose pass 1
2598 dct_interleave8(p0, p2); // a0e0a1e1...
2599 dct_interleave8(p1, p3); // c0g0c1g1...
2600
2601 // transpose pass 2
2602 dct_interleave8(p0, p1); // a0c0e0g0...
2603 dct_interleave8(p2, p3); // b0d0f0h0...
2604
2605 // transpose pass 3
2606 dct_interleave8(p0, p2); // a0b0c0d0...
2607 dct_interleave8(p1, p3); // a4b4c4d4...
2608
2609 // store
2610 _mm_storel_epi64((__m128i *) out, p0); out += out_stride;
2611 _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride;
2612 _mm_storel_epi64((__m128i *) out, p2); out += out_stride;
2613 _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride;
2614 _mm_storel_epi64((__m128i *) out, p1); out += out_stride;
2615 _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride;
2616 _mm_storel_epi64((__m128i *) out, p3); out += out_stride;
2617 _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e));
2618 }
2619
2620#undef dct_const
2621#undef dct_rot
2622#undef dct_widen
2623#undef dct_wadd
2624#undef dct_wsub
2625#undef dct_bfly32o
2626#undef dct_interleave8
2627#undef dct_interleave16
2628#undef dct_pass
2629}
2630
2631#endif // STBI_SSE2
2632
2633#ifdef STBI_NEON
2634
2635// NEON integer IDCT. should produce bit-identical
2636// results to the generic C version.
2637static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64])
2638{
2639 int16x8_t row0, row1, row2, row3, row4, row5, row6, row7;
2640
2641 int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f));
2642 int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f));
2643 int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f));
2644 int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f));
2645 int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f));
2646 int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f));
2647 int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f));
2648 int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f));
2649 int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f));
2650 int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f));
2651 int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f));
2652 int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f));
2653
2654#define dct_long_mul(out, inq, coeff) \
2655 int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \
2656 int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff)
2657
2658#define dct_long_mac(out, acc, inq, coeff) \
2659 int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \
2660 int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff)
2661
2662#define dct_widen(out, inq) \
2663 int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \
2664 int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12)
2665
2666// wide add
2667#define dct_wadd(out, a, b) \
2668 int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \
2669 int32x4_t out##_h = vaddq_s32(a##_h, b##_h)
2670
2671// wide sub
2672#define dct_wsub(out, a, b) \
2673 int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \
2674 int32x4_t out##_h = vsubq_s32(a##_h, b##_h)
2675
2676// butterfly a/b, then shift using "shiftop" by "s" and pack
2677#define dct_bfly32o(out0,out1, a,b,shiftop,s) \
2678 { \
2679 dct_wadd(sum, a, b); \
2680 dct_wsub(dif, a, b); \
2681 out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \
2682 out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \
2683 }
2684
2685#define dct_pass(shiftop, shift) \
2686 { \
2687 /* even part */ \
2688 int16x8_t sum26 = vaddq_s16(row2, row6); \
2689 dct_long_mul(p1e, sum26, rot0_0); \
2690 dct_long_mac(t2e, p1e, row6, rot0_1); \
2691 dct_long_mac(t3e, p1e, row2, rot0_2); \
2692 int16x8_t sum04 = vaddq_s16(row0, row4); \
2693 int16x8_t dif04 = vsubq_s16(row0, row4); \
2694 dct_widen(t0e, sum04); \
2695 dct_widen(t1e, dif04); \
2696 dct_wadd(x0, t0e, t3e); \
2697 dct_wsub(x3, t0e, t3e); \
2698 dct_wadd(x1, t1e, t2e); \
2699 dct_wsub(x2, t1e, t2e); \
2700 /* odd part */ \
2701 int16x8_t sum15 = vaddq_s16(row1, row5); \
2702 int16x8_t sum17 = vaddq_s16(row1, row7); \
2703 int16x8_t sum35 = vaddq_s16(row3, row5); \
2704 int16x8_t sum37 = vaddq_s16(row3, row7); \
2705 int16x8_t sumodd = vaddq_s16(sum17, sum35); \
2706 dct_long_mul(p5o, sumodd, rot1_0); \
2707 dct_long_mac(p1o, p5o, sum17, rot1_1); \
2708 dct_long_mac(p2o, p5o, sum35, rot1_2); \
2709 dct_long_mul(p3o, sum37, rot2_0); \
2710 dct_long_mul(p4o, sum15, rot2_1); \
2711 dct_wadd(sump13o, p1o, p3o); \
2712 dct_wadd(sump24o, p2o, p4o); \
2713 dct_wadd(sump23o, p2o, p3o); \
2714 dct_wadd(sump14o, p1o, p4o); \
2715 dct_long_mac(x4, sump13o, row7, rot3_0); \
2716 dct_long_mac(x5, sump24o, row5, rot3_1); \
2717 dct_long_mac(x6, sump23o, row3, rot3_2); \
2718 dct_long_mac(x7, sump14o, row1, rot3_3); \
2719 dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \
2720 dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \
2721 dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \
2722 dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \
2723 }
2724
2725 // load
2726 row0 = vld1q_s16(data + 0*8);
2727 row1 = vld1q_s16(data + 1*8);
2728 row2 = vld1q_s16(data + 2*8);
2729 row3 = vld1q_s16(data + 3*8);
2730 row4 = vld1q_s16(data + 4*8);
2731 row5 = vld1q_s16(data + 5*8);
2732 row6 = vld1q_s16(data + 6*8);
2733 row7 = vld1q_s16(data + 7*8);
2734
2735 // add DC bias
2736 row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0));
2737
2738 // column pass
2739 dct_pass(vrshrn_n_s32, 10);
2740
2741 // 16bit 8x8 transpose
2742 {
2743// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively.
2744// whether compilers actually get this is another story, sadly.
2745#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; }
2746#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); }
2747#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); }
2748
2749 // pass 1
2750 dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6
2751 dct_trn16(row2, row3);
2752 dct_trn16(row4, row5);
2753 dct_trn16(row6, row7);
2754
2755 // pass 2
2756 dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4
2757 dct_trn32(row1, row3);
2758 dct_trn32(row4, row6);
2759 dct_trn32(row5, row7);
2760
2761 // pass 3
2762 dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0
2763 dct_trn64(row1, row5);
2764 dct_trn64(row2, row6);
2765 dct_trn64(row3, row7);
2766
2767#undef dct_trn16
2768#undef dct_trn32
2769#undef dct_trn64
2770 }
2771
2772 // row pass
2773 // vrshrn_n_s32 only supports shifts up to 16, we need
2774 // 17. so do a non-rounding shift of 16 first then follow
2775 // up with a rounding shift by 1.
2776 dct_pass(vshrn_n_s32, 16);
2777
2778 {
2779 // pack and round
2780 uint8x8_t p0 = vqrshrun_n_s16(row0, 1);
2781 uint8x8_t p1 = vqrshrun_n_s16(row1, 1);
2782 uint8x8_t p2 = vqrshrun_n_s16(row2, 1);
2783 uint8x8_t p3 = vqrshrun_n_s16(row3, 1);
2784 uint8x8_t p4 = vqrshrun_n_s16(row4, 1);
2785 uint8x8_t p5 = vqrshrun_n_s16(row5, 1);
2786 uint8x8_t p6 = vqrshrun_n_s16(row6, 1);
2787 uint8x8_t p7 = vqrshrun_n_s16(row7, 1);
2788
2789 // again, these can translate into one instruction, but often don't.
2790#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; }
2791#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); }
2792#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); }
2793
2794 // sadly can't use interleaved stores here since we only write
2795 // 8 bytes to each scan line!
2796
2797 // 8x8 8-bit transpose pass 1
2798 dct_trn8_8(p0, p1);
2799 dct_trn8_8(p2, p3);
2800 dct_trn8_8(p4, p5);
2801 dct_trn8_8(p6, p7);
2802
2803 // pass 2
2804 dct_trn8_16(p0, p2);
2805 dct_trn8_16(p1, p3);
2806 dct_trn8_16(p4, p6);
2807 dct_trn8_16(p5, p7);
2808
2809 // pass 3
2810 dct_trn8_32(p0, p4);
2811 dct_trn8_32(p1, p5);
2812 dct_trn8_32(p2, p6);
2813 dct_trn8_32(p3, p7);
2814
2815 // store
2816 vst1_u8(out, p0); out += out_stride;
2817 vst1_u8(out, p1); out += out_stride;
2818 vst1_u8(out, p2); out += out_stride;
2819 vst1_u8(out, p3); out += out_stride;
2820 vst1_u8(out, p4); out += out_stride;
2821 vst1_u8(out, p5); out += out_stride;
2822 vst1_u8(out, p6); out += out_stride;
2823 vst1_u8(out, p7);
2824
2825#undef dct_trn8_8
2826#undef dct_trn8_16
2827#undef dct_trn8_32
2828 }
2829
2830#undef dct_long_mul
2831#undef dct_long_mac
2832#undef dct_widen
2833#undef dct_wadd
2834#undef dct_wsub
2835#undef dct_bfly32o
2836#undef dct_pass
2837}
2838
2839#endif // STBI_NEON
2840
2841#define STBI__MARKER_none 0xff
2842// if there's a pending marker from the entropy stream, return that
2843// otherwise, fetch from the stream and get a marker. if there's no
2844// marker, return 0xff, which is never a valid marker value
2845static stbi_uc stbi__get_marker(stbi__jpeg *j)
2846{
2847 stbi_uc x;
2848 if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; }
2849 x = stbi__get8(j->s);
2850 if (x != 0xff) return STBI__MARKER_none;
2851 while (x == 0xff)
2852 x = stbi__get8(j->s); // consume repeated 0xff fill bytes
2853 return x;
2854}
2855
2856// in each scan, we'll have scan_n components, and the order
2857// of the components is specified by order[]
2858#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7)
2859
2860// after a restart interval, stbi__jpeg_reset the entropy decoder and
2861// the dc prediction
2862static void stbi__jpeg_reset(stbi__jpeg *j)
2863{
2864 j->code_bits = 0;
2865 j->code_buffer = 0;
2866 j->nomore = 0;
2867 j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0;
2868 j->marker = STBI__MARKER_none;
2869 j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff;
2870 j->eob_run = 0;
2871 // no more than 1<<31 MCUs if no restart_interal? that's plenty safe,
2872 // since we don't even allow 1<<30 pixels
2873}
2874
2875static int stbi__parse_entropy_coded_data(stbi__jpeg *z)
2876{
2877 stbi__jpeg_reset(z);
2878 if (!z->progressive) {
2879 if (z->scan_n == 1) {
2880 int i,j;
2881 STBI_SIMD_ALIGN(short, data[64]);
2882 int n = z->order[0];
2883 // non-interleaved data, we just need to process one block at a time,
2884 // in trivial scanline order
2885 // number of blocks to do just depends on how many actual "pixels" this
2886 // component has, independent of interleaved MCU blocking and such
2887 int w = (z->img_comp[n].x+7) >> 3;
2888 int h = (z->img_comp[n].y+7) >> 3;
2889 for (j=0; j < h; ++j) {
2890 for (i=0; i < w; ++i) {
2891 int ha = z->img_comp[n].ha;
2892 if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
2893 z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data);
2894 // every data block is an MCU, so countdown the restart interval
2895 if (--z->todo <= 0) {
2896 if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
2897 // if it's NOT a restart, then just bail, so we get corrupt data
2898 // rather than no data
2899 if (!STBI__RESTART(z->marker)) return 1;
2900 stbi__jpeg_reset(z);
2901 }
2902 }
2903 }
2904 return 1;
2905 } else { // interleaved
2906 int i,j,k,x,y;
2907 STBI_SIMD_ALIGN(short, data[64]);
2908 for (j=0; j < z->img_mcu_y; ++j) {
2909 for (i=0; i < z->img_mcu_x; ++i) {
2910 // scan an interleaved mcu... process scan_n components in order
2911 for (k=0; k < z->scan_n; ++k) {
2912 int n = z->order[k];
2913 // scan out an mcu's worth of this component; that's just determined
2914 // by the basic H and V specified for the component
2915 for (y=0; y < z->img_comp[n].v; ++y) {
2916 for (x=0; x < z->img_comp[n].h; ++x) {
2917 int x2 = (i*z->img_comp[n].h + x)*8;
2918 int y2 = (j*z->img_comp[n].v + y)*8;
2919 int ha = z->img_comp[n].ha;
2920 if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
2921 z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data);
2922 }
2923 }
2924 }
2925 // after all interleaved components, that's an interleaved MCU,
2926 // so now count down the restart interval
2927 if (--z->todo <= 0) {
2928 if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
2929 if (!STBI__RESTART(z->marker)) return 1;
2930 stbi__jpeg_reset(z);
2931 }
2932 }
2933 }
2934 return 1;
2935 }
2936 } else {
2937 if (z->scan_n == 1) {
2938 int i,j;
2939 int n = z->order[0];
2940 // non-interleaved data, we just need to process one block at a time,
2941 // in trivial scanline order
2942 // number of blocks to do just depends on how many actual "pixels" this
2943 // component has, independent of interleaved MCU blocking and such
2944 int w = (z->img_comp[n].x+7) >> 3;
2945 int h = (z->img_comp[n].y+7) >> 3;
2946 for (j=0; j < h; ++j) {
2947 for (i=0; i < w; ++i) {
2948 short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
2949 if (z->spec_start == 0) {
2950 if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n))
2951 return 0;
2952 } else {
2953 int ha = z->img_comp[n].ha;
2954 if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha]))
2955 return 0;
2956 }
2957 // every data block is an MCU, so countdown the restart interval
2958 if (--z->todo <= 0) {
2959 if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
2960 if (!STBI__RESTART(z->marker)) return 1;
2961 stbi__jpeg_reset(z);
2962 }
2963 }
2964 }
2965 return 1;
2966 } else { // interleaved
2967 int i,j,k,x,y;
2968 for (j=0; j < z->img_mcu_y; ++j) {
2969 for (i=0; i < z->img_mcu_x; ++i) {
2970 // scan an interleaved mcu... process scan_n components in order
2971 for (k=0; k < z->scan_n; ++k) {
2972 int n = z->order[k];
2973 // scan out an mcu's worth of this component; that's just determined
2974 // by the basic H and V specified for the component
2975 for (y=0; y < z->img_comp[n].v; ++y) {
2976 for (x=0; x < z->img_comp[n].h; ++x) {
2977 int x2 = (i*z->img_comp[n].h + x);
2978 int y2 = (j*z->img_comp[n].v + y);
2979 short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w);
2980 if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n))
2981 return 0;
2982 }
2983 }
2984 }
2985 // after all interleaved components, that's an interleaved MCU,
2986 // so now count down the restart interval
2987 if (--z->todo <= 0) {
2988 if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
2989 if (!STBI__RESTART(z->marker)) return 1;
2990 stbi__jpeg_reset(z);
2991 }
2992 }
2993 }
2994 return 1;
2995 }
2996 }
2997}
2998
2999static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant)
3000{
3001 int i;
3002 for (i=0; i < 64; ++i)
3003 data[i] *= dequant[i];
3004}
3005
3006static void stbi__jpeg_finish(stbi__jpeg *z)
3007{
3008 if (z->progressive) {
3009 // dequantize and idct the data
3010 int i,j,n;
3011 for (n=0; n < z->s->img_n; ++n) {
3012 int w = (z->img_comp[n].x+7) >> 3;
3013 int h = (z->img_comp[n].y+7) >> 3;
3014 for (j=0; j < h; ++j) {
3015 for (i=0; i < w; ++i) {
3016 short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
3017 stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]);
3018 z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data);
3019 }
3020 }
3021 }
3022 }
3023}
3024
3025static int stbi__process_marker(stbi__jpeg *z, int m)
3026{
3027 int L;
3028 switch (m) {
3029 case STBI__MARKER_none: // no marker found
3030 return stbi__err("expected marker","Corrupt JPEG");
3031
3032 case 0xDD: // DRI - specify restart interval
3033 if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG");
3034 z->restart_interval = stbi__get16be(z->s);
3035 return 1;
3036
3037 case 0xDB: // DQT - define quantization table
3038 L = stbi__get16be(z->s)-2;
3039 while (L > 0) {
3040 int q = stbi__get8(z->s);
3041 int p = q >> 4, sixteen = (p != 0);
3042 int t = q & 15,i;
3043 if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG");
3044 if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG");
3045
3046 for (i=0; i < 64; ++i)
3047 z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s));
3048 L -= (sixteen ? 129 : 65);
3049 }
3050 return L==0;
3051
3052 case 0xC4: // DHT - define huffman table
3053 L = stbi__get16be(z->s)-2;
3054 while (L > 0) {
3055 stbi_uc *v;
3056 int sizes[16],i,n=0;
3057 int q = stbi__get8(z->s);
3058 int tc = q >> 4;
3059 int th = q & 15;
3060 if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG");
3061 for (i=0; i < 16; ++i) {
3062 sizes[i] = stbi__get8(z->s);
3063 n += sizes[i];
3064 }
3065 L -= 17;
3066 if (tc == 0) {
3067 if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0;
3068 v = z->huff_dc[th].values;
3069 } else {
3070 if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0;
3071 v = z->huff_ac[th].values;
3072 }
3073 for (i=0; i < n; ++i)
3074 v[i] = stbi__get8(z->s);
3075 if (tc != 0)
3076 stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th);
3077 L -= n;
3078 }
3079 return L==0;
3080 }
3081
3082 // check for comment block or APP blocks
3083 if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) {
3084 L = stbi__get16be(z->s);
3085 if (L < 2) {
3086 if (m == 0xFE)
3087 return stbi__err("bad COM len","Corrupt JPEG");
3088 else
3089 return stbi__err("bad APP len","Corrupt JPEG");
3090 }
3091 L -= 2;
3092
3093 if (m == 0xE0 && L >= 5) { // JFIF APP0 segment
3094 static const unsigned char tag[5] = {'J','F','I','F','\0'};
3095 int ok = 1;
3096 int i;
3097 for (i=0; i < 5; ++i)
3098 if (stbi__get8(z->s) != tag[i])
3099 ok = 0;
3100 L -= 5;
3101 if (ok)
3102 z->jfif = 1;
3103 } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment
3104 static const unsigned char tag[6] = {'A','d','o','b','e','\0'};
3105 int ok = 1;
3106 int i;
3107 for (i=0; i < 6; ++i)
3108 if (stbi__get8(z->s) != tag[i])
3109 ok = 0;
3110 L -= 6;
3111 if (ok) {
3112 stbi__get8(z->s); // version
3113 stbi__get16be(z->s); // flags0
3114 stbi__get16be(z->s); // flags1
3115 z->app14_color_transform = stbi__get8(z->s); // color transform
3116 L -= 6;
3117 }
3118 }
3119
3120 stbi__skip(z->s, L);
3121 return 1;
3122 }
3123
3124 return stbi__err("unknown marker","Corrupt JPEG");
3125}
3126
3127// after we see SOS
3128static int stbi__process_scan_header(stbi__jpeg *z)
3129{
3130 int i;
3131 int Ls = stbi__get16be(z->s);
3132 z->scan_n = stbi__get8(z->s);
3133 if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG");
3134 if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG");
3135 for (i=0; i < z->scan_n; ++i) {
3136 int id = stbi__get8(z->s), which;
3137 int q = stbi__get8(z->s);
3138 for (which = 0; which < z->s->img_n; ++which)
3139 if (z->img_comp[which].id == id)
3140 break;
3141 if (which == z->s->img_n) return 0; // no match
3142 z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG");
3143 z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG");
3144 z->order[i] = which;
3145 }
3146
3147 {
3148 int aa;
3149 z->spec_start = stbi__get8(z->s);
3150 z->spec_end = stbi__get8(z->s); // should be 63, but might be 0
3151 aa = stbi__get8(z->s);
3152 z->succ_high = (aa >> 4);
3153 z->succ_low = (aa & 15);
3154 if (z->progressive) {
3155 if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13)
3156 return stbi__err("bad SOS", "Corrupt JPEG");
3157 } else {
3158 if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG");
3159 if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG");
3160 z->spec_end = 63;
3161 }
3162 }
3163
3164 return 1;
3165}
3166
3167static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why)
3168{
3169 int i;
3170 for (i=0; i < ncomp; ++i) {
3171 if (z->img_comp[i].raw_data) {
3172 STBI_FREE(z->img_comp[i].raw_data);
3173 z->img_comp[i].raw_data = NULL;
3174 z->img_comp[i].data = NULL;
3175 }
3176 if (z->img_comp[i].raw_coeff) {
3177 STBI_FREE(z->img_comp[i].raw_coeff);
3178 z->img_comp[i].raw_coeff = 0;
3179 z->img_comp[i].coeff = 0;
3180 }
3181 if (z->img_comp[i].linebuf) {
3182 STBI_FREE(z->img_comp[i].linebuf);
3183 z->img_comp[i].linebuf = NULL;
3184 }
3185 }
3186 return why;
3187}
3188
3189static int stbi__process_frame_header(stbi__jpeg *z, int scan)
3190{
3191 stbi__context *s = z->s;
3192 int Lf,p,i,q, h_max=1,v_max=1,c;
3193 Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG
3194 p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline
3195 s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG
3196 s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires
3197 if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)");
3198 if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)");
3199 c = stbi__get8(s);
3200 if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG");
3201 s->img_n = c;
3202 for (i=0; i < c; ++i) {
3203 z->img_comp[i].data = NULL;
3204 z->img_comp[i].linebuf = NULL;
3205 }
3206
3207 if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG");
3208
3209 z->rgb = 0;
3210 for (i=0; i < s->img_n; ++i) {
3211 static const unsigned char rgb[3] = { 'R', 'G', 'B' };
3212 z->img_comp[i].id = stbi__get8(s);
3213 if (s->img_n == 3 && z->img_comp[i].id == rgb[i])
3214 ++z->rgb;
3215 q = stbi__get8(s);
3216 z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG");
3217 z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG");
3218 z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG");
3219 }
3220
3221 if (scan != STBI__SCAN_load) return 1;
3222
3223 if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode");
3224
3225 for (i=0; i < s->img_n; ++i) {
3226 if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h;
3227 if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v;
3228 }
3229
3230 // compute interleaved mcu info
3231 z->img_h_max = h_max;
3232 z->img_v_max = v_max;
3233 z->img_mcu_w = h_max * 8;
3234 z->img_mcu_h = v_max * 8;
3235 // these sizes can't be more than 17 bits
3236 z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w;
3237 z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h;
3238
3239 for (i=0; i < s->img_n; ++i) {
3240 // number of effective pixels (e.g. for non-interleaved MCU)
3241 z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max;
3242 z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max;
3243 // to simplify generation, we'll allocate enough memory to decode
3244 // the bogus oversized data from using interleaved MCUs and their
3245 // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't
3246 // discard the extra data until colorspace conversion
3247 //
3248 // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier)
3249 // so these muls can't overflow with 32-bit ints (which we require)
3250 z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8;
3251 z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8;
3252 z->img_comp[i].coeff = 0;
3253 z->img_comp[i].raw_coeff = 0;
3254 z->img_comp[i].linebuf = NULL;
3255 z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15);
3256 if (z->img_comp[i].raw_data == NULL)
3257 return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory"));
3258 // align blocks for idct using mmx/sse
3259 z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15);
3260 if (z->progressive) {
3261 // w2, h2 are multiples of 8 (see above)
3262 z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8;
3263 z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8;
3264 z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15);
3265 if (z->img_comp[i].raw_coeff == NULL)
3266 return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory"));
3267 z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15);
3268 }
3269 }
3270
3271 return 1;
3272}
3273
3274// use comparisons since in some cases we handle more than one case (e.g. SOF)
3275#define stbi__DNL(x) ((x) == 0xdc)
3276#define stbi__SOI(x) ((x) == 0xd8)
3277#define stbi__EOI(x) ((x) == 0xd9)
3278#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2)
3279#define stbi__SOS(x) ((x) == 0xda)
3280
3281#define stbi__SOF_progressive(x) ((x) == 0xc2)
3282
3283static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan)
3284{
3285 int m;
3286 z->jfif = 0;
3287 z->app14_color_transform = -1; // valid values are 0,1,2
3288 z->marker = STBI__MARKER_none; // initialize cached marker to empty
3289 m = stbi__get_marker(z);
3290 if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG");
3291 if (scan == STBI__SCAN_type) return 1;
3292 m = stbi__get_marker(z);
3293 while (!stbi__SOF(m)) {
3294 if (!stbi__process_marker(z,m)) return 0;
3295 m = stbi__get_marker(z);
3296 while (m == STBI__MARKER_none) {
3297 // some files have extra padding after their blocks, so ok, we'll scan
3298 if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG");
3299 m = stbi__get_marker(z);
3300 }
3301 }
3302 z->progressive = stbi__SOF_progressive(m);
3303 if (!stbi__process_frame_header(z, scan)) return 0;
3304 return 1;
3305}
3306
3307// decode image to YCbCr format
3308static int stbi__decode_jpeg_image(stbi__jpeg *j)
3309{
3310 int m;
3311 for (m = 0; m < 4; m++) {
3312 j->img_comp[m].raw_data = NULL;
3313 j->img_comp[m].raw_coeff = NULL;
3314 }
3315 j->restart_interval = 0;
3316 if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0;
3317 m = stbi__get_marker(j);
3318 while (!stbi__EOI(m)) {
3319 if (stbi__SOS(m)) {
3320 if (!stbi__process_scan_header(j)) return 0;
3321 if (!stbi__parse_entropy_coded_data(j)) return 0;
3322 if (j->marker == STBI__MARKER_none ) {
3323 // handle 0s at the end of image data from IP Kamera 9060
3324 while (!stbi__at_eof(j->s)) {
3325 int x = stbi__get8(j->s);
3326 if (x == 255) {
3327 j->marker = stbi__get8(j->s);
3328 break;
3329 }
3330 }
3331 // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0
3332 }
3333 } else if (stbi__DNL(m)) {
3334 int Ld = stbi__get16be(j->s);
3335 stbi__uint32 NL = stbi__get16be(j->s);
3336 if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG");
3337 if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG");
3338 } else {
3339 if (!stbi__process_marker(j, m)) return 0;
3340 }
3341 m = stbi__get_marker(j);
3342 }
3343 if (j->progressive)
3344 stbi__jpeg_finish(j);
3345 return 1;
3346}
3347
3348// static jfif-centered resampling (across block boundaries)
3349
3350typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1,
3351 int w, int hs);
3352
3353#define stbi__div4(x) ((stbi_uc) ((x) >> 2))
3354
3355static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
3356{
3357 STBI_NOTUSED(out);
3358 STBI_NOTUSED(in_far);
3359 STBI_NOTUSED(w);
3360 STBI_NOTUSED(hs);
3361 return in_near;
3362}
3363
3364static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
3365{
3366 // need to generate two samples vertically for every one in input
3367 int i;
3368 STBI_NOTUSED(hs);
3369 for (i=0; i < w; ++i)
3370 out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2);
3371 return out;
3372}
3373
3374static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
3375{
3376 // need to generate two samples horizontally for every one in input
3377 int i;
3378 stbi_uc *input = in_near;
3379
3380 if (w == 1) {
3381 // if only one sample, can't do any interpolation
3382 out[0] = out[1] = input[0];
3383 return out;
3384 }
3385
3386 out[0] = input[0];
3387 out[1] = stbi__div4(input[0]*3 + input[1] + 2);
3388 for (i=1; i < w-1; ++i) {
3389 int n = 3*input[i]+2;
3390 out[i*2+0] = stbi__div4(n+input[i-1]);
3391 out[i*2+1] = stbi__div4(n+input[i+1]);
3392 }
3393 out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2);
3394 out[i*2+1] = input[w-1];
3395
3396 STBI_NOTUSED(in_far);
3397 STBI_NOTUSED(hs);
3398
3399 return out;
3400}
3401
3402#define stbi__div16(x) ((stbi_uc) ((x) >> 4))
3403
3404static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
3405{
3406 // need to generate 2x2 samples for every one in input
3407 int i,t0,t1;
3408 if (w == 1) {
3409 out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2);
3410 return out;
3411 }
3412
3413 t1 = 3*in_near[0] + in_far[0];
3414 out[0] = stbi__div4(t1+2);
3415 for (i=1; i < w; ++i) {
3416 t0 = t1;
3417 t1 = 3*in_near[i]+in_far[i];
3418 out[i*2-1] = stbi__div16(3*t0 + t1 + 8);
3419 out[i*2 ] = stbi__div16(3*t1 + t0 + 8);
3420 }
3421 out[w*2-1] = stbi__div4(t1+2);
3422
3423 STBI_NOTUSED(hs);
3424
3425 return out;
3426}
3427
3428#if defined(STBI_SSE2) || defined(STBI_NEON)
3429static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
3430{
3431 // need to generate 2x2 samples for every one in input
3432 int i=0,t0,t1;
3433
3434 if (w == 1) {
3435 out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2);
3436 return out;
3437 }
3438
3439 t1 = 3*in_near[0] + in_far[0];
3440 // process groups of 8 pixels for as long as we can.
3441 // note we can't handle the last pixel in a row in this loop
3442 // because we need to handle the filter boundary conditions.
3443 for (; i < ((w-1) & ~7); i += 8) {
3444#if defined(STBI_SSE2)
3445 // load and perform the vertical filtering pass
3446 // this uses 3*x + y = 4*x + (y - x)
3447 __m128i zero = _mm_setzero_si128();
3448 __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i));
3449 __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i));
3450 __m128i farw = _mm_unpacklo_epi8(farb, zero);
3451 __m128i nearw = _mm_unpacklo_epi8(nearb, zero);
3452 __m128i diff = _mm_sub_epi16(farw, nearw);
3453 __m128i nears = _mm_slli_epi16(nearw, 2);
3454 __m128i curr = _mm_add_epi16(nears, diff); // current row
3455
3456 // horizontal filter works the same based on shifted vers of current
3457 // row. "prev" is current row shifted right by 1 pixel; we need to
3458 // insert the previous pixel value (from t1).
3459 // "next" is current row shifted left by 1 pixel, with first pixel
3460 // of next block of 8 pixels added in.
3461 __m128i prv0 = _mm_slli_si128(curr, 2);
3462 __m128i nxt0 = _mm_srli_si128(curr, 2);
3463 __m128i prev = _mm_insert_epi16(prv0, t1, 0);
3464 __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7);
3465
3466 // horizontal filter, polyphase implementation since it's convenient:
3467 // even pixels = 3*cur + prev = cur*4 + (prev - cur)
3468 // odd pixels = 3*cur + next = cur*4 + (next - cur)
3469 // note the shared term.
3470 __m128i bias = _mm_set1_epi16(8);
3471 __m128i curs = _mm_slli_epi16(curr, 2);
3472 __m128i prvd = _mm_sub_epi16(prev, curr);
3473 __m128i nxtd = _mm_sub_epi16(next, curr);
3474 __m128i curb = _mm_add_epi16(curs, bias);
3475 __m128i even = _mm_add_epi16(prvd, curb);
3476 __m128i odd = _mm_add_epi16(nxtd, curb);
3477
3478 // interleave even and odd pixels, then undo scaling.
3479 __m128i int0 = _mm_unpacklo_epi16(even, odd);
3480 __m128i int1 = _mm_unpackhi_epi16(even, odd);
3481 __m128i de0 = _mm_srli_epi16(int0, 4);
3482 __m128i de1 = _mm_srli_epi16(int1, 4);
3483
3484 // pack and write output
3485 __m128i outv = _mm_packus_epi16(de0, de1);
3486 _mm_storeu_si128((__m128i *) (out + i*2), outv);
3487#elif defined(STBI_NEON)
3488 // load and perform the vertical filtering pass
3489 // this uses 3*x + y = 4*x + (y - x)
3490 uint8x8_t farb = vld1_u8(in_far + i);
3491 uint8x8_t nearb = vld1_u8(in_near + i);
3492 int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb));
3493 int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2));
3494 int16x8_t curr = vaddq_s16(nears, diff); // current row
3495
3496 // horizontal filter works the same based on shifted vers of current
3497 // row. "prev" is current row shifted right by 1 pixel; we need to
3498 // insert the previous pixel value (from t1).
3499 // "next" is current row shifted left by 1 pixel, with first pixel
3500 // of next block of 8 pixels added in.
3501 int16x8_t prv0 = vextq_s16(curr, curr, 7);
3502 int16x8_t nxt0 = vextq_s16(curr, curr, 1);
3503 int16x8_t prev = vsetq_lane_s16(t1, prv0, 0);
3504 int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7);
3505
3506 // horizontal filter, polyphase implementation since it's convenient:
3507 // even pixels = 3*cur + prev = cur*4 + (prev - cur)
3508 // odd pixels = 3*cur + next = cur*4 + (next - cur)
3509 // note the shared term.
3510 int16x8_t curs = vshlq_n_s16(curr, 2);
3511 int16x8_t prvd = vsubq_s16(prev, curr);
3512 int16x8_t nxtd = vsubq_s16(next, curr);
3513 int16x8_t even = vaddq_s16(curs, prvd);
3514 int16x8_t odd = vaddq_s16(curs, nxtd);
3515
3516 // undo scaling and round, then store with even/odd phases interleaved
3517 uint8x8x2_t o;
3518 o.val[0] = vqrshrun_n_s16(even, 4);
3519 o.val[1] = vqrshrun_n_s16(odd, 4);
3520 vst2_u8(out + i*2, o);
3521#endif
3522
3523 // "previous" value for next iter
3524 t1 = 3*in_near[i+7] + in_far[i+7];
3525 }
3526
3527 t0 = t1;
3528 t1 = 3*in_near[i] + in_far[i];
3529 out[i*2] = stbi__div16(3*t1 + t0 + 8);
3530
3531 for (++i; i < w; ++i) {
3532 t0 = t1;
3533 t1 = 3*in_near[i]+in_far[i];
3534 out[i*2-1] = stbi__div16(3*t0 + t1 + 8);
3535 out[i*2 ] = stbi__div16(3*t1 + t0 + 8);
3536 }
3537 out[w*2-1] = stbi__div4(t1+2);
3538
3539 STBI_NOTUSED(hs);
3540
3541 return out;
3542}
3543#endif
3544
3545static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
3546{
3547 // resample with nearest-neighbor
3548 int i,j;
3549 STBI_NOTUSED(in_far);
3550 for (i=0; i < w; ++i)
3551 for (j=0; j < hs; ++j)
3552 out[i*hs+j] = in_near[i];
3553 return out;
3554}
3555
3556// this is a reduced-precision calculation of YCbCr-to-RGB introduced
3557// to make sure the code produces the same results in both SIMD and scalar
3558#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8)
3559static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step)
3560{
3561 int i;
3562 for (i=0; i < count; ++i) {
3563 int y_fixed = (y[i] << 20) + (1<<19); // rounding
3564 int r,g,b;
3565 int cr = pcr[i] - 128;
3566 int cb = pcb[i] - 128;
3567 r = y_fixed + cr* stbi__float2fixed(1.40200f);
3568 g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000);
3569 b = y_fixed + cb* stbi__float2fixed(1.77200f);
3570 r >>= 20;
3571 g >>= 20;
3572 b >>= 20;
3573 if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
3574 if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
3575 if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
3576 out[0] = (stbi_uc)r;
3577 out[1] = (stbi_uc)g;
3578 out[2] = (stbi_uc)b;
3579 out[3] = 255;
3580 out += step;
3581 }
3582}
3583
3584#if defined(STBI_SSE2) || defined(STBI_NEON)
3585static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step)
3586{
3587 int i = 0;
3588
3589#ifdef STBI_SSE2
3590 // step == 3 is pretty ugly on the final interleave, and i'm not convinced
3591 // it's useful in practice (you wouldn't use it for textures, for example).
3592 // so just accelerate step == 4 case.
3593 if (step == 4) {
3594 // this is a fairly straightforward implementation and not super-optimized.
3595 __m128i signflip = _mm_set1_epi8(-0x80);
3596 __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f));
3597 __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f));
3598 __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f));
3599 __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f));
3600 __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128);
3601 __m128i xw = _mm_set1_epi16(255); // alpha channel
3602
3603 for (; i+7 < count; i += 8) {
3604 // load
3605 __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i));
3606 __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i));
3607 __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i));
3608 __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128
3609 __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128
3610
3611 // unpack to short (and left-shift cr, cb by 8)
3612 __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes);
3613 __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased);
3614 __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased);
3615
3616 // color transform
3617 __m128i yws = _mm_srli_epi16(yw, 4);
3618 __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw);
3619 __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw);
3620 __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1);
3621 __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1);
3622 __m128i rws = _mm_add_epi16(cr0, yws);
3623 __m128i gwt = _mm_add_epi16(cb0, yws);
3624 __m128i bws = _mm_add_epi16(yws, cb1);
3625 __m128i gws = _mm_add_epi16(gwt, cr1);
3626
3627 // descale
3628 __m128i rw = _mm_srai_epi16(rws, 4);
3629 __m128i bw = _mm_srai_epi16(bws, 4);
3630 __m128i gw = _mm_srai_epi16(gws, 4);
3631
3632 // back to byte, set up for transpose
3633 __m128i brb = _mm_packus_epi16(rw, bw);
3634 __m128i gxb = _mm_packus_epi16(gw, xw);
3635
3636 // transpose to interleave channels
3637 __m128i t0 = _mm_unpacklo_epi8(brb, gxb);
3638 __m128i t1 = _mm_unpackhi_epi8(brb, gxb);
3639 __m128i o0 = _mm_unpacklo_epi16(t0, t1);
3640 __m128i o1 = _mm_unpackhi_epi16(t0, t1);
3641
3642 // store
3643 _mm_storeu_si128((__m128i *) (out + 0), o0);
3644 _mm_storeu_si128((__m128i *) (out + 16), o1);
3645 out += 32;
3646 }
3647 }
3648#endif
3649
3650#ifdef STBI_NEON
3651 // in this version, step=3 support would be easy to add. but is there demand?
3652 if (step == 4) {
3653 // this is a fairly straightforward implementation and not super-optimized.
3654 uint8x8_t signflip = vdup_n_u8(0x80);
3655 int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f));
3656 int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f));
3657 int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f));
3658 int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f));
3659
3660 for (; i+7 < count; i += 8) {
3661 // load
3662 uint8x8_t y_bytes = vld1_u8(y + i);
3663 uint8x8_t cr_bytes = vld1_u8(pcr + i);
3664 uint8x8_t cb_bytes = vld1_u8(pcb + i);
3665 int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip));
3666 int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip));
3667
3668 // expand to s16
3669 int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4));
3670 int16x8_t crw = vshll_n_s8(cr_biased, 7);
3671 int16x8_t cbw = vshll_n_s8(cb_biased, 7);
3672
3673 // color transform
3674 int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0);
3675 int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0);
3676 int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1);
3677 int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1);
3678 int16x8_t rws = vaddq_s16(yws, cr0);
3679 int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1);
3680 int16x8_t bws = vaddq_s16(yws, cb1);
3681
3682 // undo scaling, round, convert to byte
3683 uint8x8x4_t o;
3684 o.val[0] = vqrshrun_n_s16(rws, 4);
3685 o.val[1] = vqrshrun_n_s16(gws, 4);
3686 o.val[2] = vqrshrun_n_s16(bws, 4);
3687 o.val[3] = vdup_n_u8(255);
3688
3689 // store, interleaving r/g/b/a
3690 vst4_u8(out, o);
3691 out += 8*4;
3692 }
3693 }
3694#endif
3695
3696 for (; i < count; ++i) {
3697 int y_fixed = (y[i] << 20) + (1<<19); // rounding
3698 int r,g,b;
3699 int cr = pcr[i] - 128;
3700 int cb = pcb[i] - 128;
3701 r = y_fixed + cr* stbi__float2fixed(1.40200f);
3702 g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000);
3703 b = y_fixed + cb* stbi__float2fixed(1.77200f);
3704 r >>= 20;
3705 g >>= 20;
3706 b >>= 20;
3707 if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
3708 if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
3709 if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
3710 out[0] = (stbi_uc)r;
3711 out[1] = (stbi_uc)g;
3712 out[2] = (stbi_uc)b;
3713 out[3] = 255;
3714 out += step;
3715 }
3716}
3717#endif
3718
3719// set up the kernels
3720static void stbi__setup_jpeg(stbi__jpeg *j)
3721{
3722 j->idct_block_kernel = stbi__idct_block;
3723 j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row;
3724 j->resample_row_hv_2_kernel = stbi__resample_row_hv_2;
3725
3726#ifdef STBI_SSE2
3727 if (stbi__sse2_available()) {
3728 j->idct_block_kernel = stbi__idct_simd;
3729 j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
3730 j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
3731 }
3732#endif
3733
3734#ifdef STBI_NEON
3735 j->idct_block_kernel = stbi__idct_simd;
3736 j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
3737 j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
3738#endif
3739}
3740
3741// clean up the temporary component buffers
3742static void stbi__cleanup_jpeg(stbi__jpeg *j)
3743{
3744 stbi__free_jpeg_components(j, j->s->img_n, 0);
3745}
3746
3747typedef struct
3748{
3749 resample_row_func resample;
3750 stbi_uc *line0,*line1;
3751 int hs,vs; // expansion factor in each axis
3752 int w_lores; // horizontal pixels pre-expansion
3753 int ystep; // how far through vertical expansion we are
3754 int ypos; // which pre-expansion row we're on
3755} stbi__resample;
3756
3757// fast 0..255 * 0..255 => 0..255 rounded multiplication
3758static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y)
3759{
3760 unsigned int t = x*y + 128;
3761 return (stbi_uc) ((t + (t >>8)) >> 8);
3762}
3763
3764static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp)
3765{
3766 int n, decode_n, is_rgb;
3767 z->s->img_n = 0; // make stbi__cleanup_jpeg safe
3768
3769 // validate req_comp
3770 if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error");
3771
3772 // load a jpeg image from whichever source, but leave in YCbCr format
3773 if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; }
3774
3775 // determine actual number of components to generate
3776 n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1;
3777
3778 is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif));
3779
3780 if (z->s->img_n == 3 && n < 3 && !is_rgb)
3781 decode_n = 1;
3782 else
3783 decode_n = z->s->img_n;
3784
3785 // resample and color-convert
3786 {
3787 int k;
3788 unsigned int i,j;
3789 stbi_uc *output;
3790 stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL };
3791
3792 stbi__resample res_comp[4];
3793
3794 for (k=0; k < decode_n; ++k) {
3795 stbi__resample *r = &res_comp[k];
3796
3797 // allocate line buffer big enough for upsampling off the edges
3798 // with upsample factor of 4
3799 z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3);
3800 if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); }
3801
3802 r->hs = z->img_h_max / z->img_comp[k].h;
3803 r->vs = z->img_v_max / z->img_comp[k].v;
3804 r->ystep = r->vs >> 1;
3805 r->w_lores = (z->s->img_x + r->hs-1) / r->hs;
3806 r->ypos = 0;
3807 r->line0 = r->line1 = z->img_comp[k].data;
3808
3809 if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1;
3810 else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2;
3811 else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2;
3812 else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel;
3813 else r->resample = stbi__resample_row_generic;
3814 }
3815
3816 // can't error after this so, this is safe
3817 output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1);
3818 if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); }
3819
3820 // now go ahead and resample
3821 for (j=0; j < z->s->img_y; ++j) {
3822 stbi_uc *out = output + n * z->s->img_x * j;
3823 for (k=0; k < decode_n; ++k) {
3824 stbi__resample *r = &res_comp[k];
3825 int y_bot = r->ystep >= (r->vs >> 1);
3826 coutput[k] = r->resample(z->img_comp[k].linebuf,
3827 y_bot ? r->line1 : r->line0,
3828 y_bot ? r->line0 : r->line1,
3829 r->w_lores, r->hs);
3830 if (++r->ystep >= r->vs) {
3831 r->ystep = 0;
3832 r->line0 = r->line1;
3833 if (++r->ypos < z->img_comp[k].y)
3834 r->line1 += z->img_comp[k].w2;
3835 }
3836 }
3837 if (n >= 3) {
3838 stbi_uc *y = coutput[0];
3839 if (z->s->img_n == 3) {
3840 if (is_rgb) {
3841 for (i=0; i < z->s->img_x; ++i) {
3842 out[0] = y[i];
3843 out[1] = coutput[1][i];
3844 out[2] = coutput[2][i];
3845 out[3] = 255;
3846 out += n;
3847 }
3848 } else {
3849 z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
3850 }
3851 } else if (z->s->img_n == 4) {
3852 if (z->app14_color_transform == 0) { // CMYK
3853 for (i=0; i < z->s->img_x; ++i) {
3854 stbi_uc m = coutput[3][i];
3855 out[0] = stbi__blinn_8x8(coutput[0][i], m);
3856 out[1] = stbi__blinn_8x8(coutput[1][i], m);
3857 out[2] = stbi__blinn_8x8(coutput[2][i], m);
3858 out[3] = 255;
3859 out += n;
3860 }
3861 } else if (z->app14_color_transform == 2) { // YCCK
3862 z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
3863 for (i=0; i < z->s->img_x; ++i) {
3864 stbi_uc m = coutput[3][i];
3865 out[0] = stbi__blinn_8x8(255 - out[0], m);
3866 out[1] = stbi__blinn_8x8(255 - out[1], m);
3867 out[2] = stbi__blinn_8x8(255 - out[2], m);
3868 out += n;
3869 }
3870 } else { // YCbCr + alpha? Ignore the fourth channel for now
3871 z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
3872 }
3873 } else
3874 for (i=0; i < z->s->img_x; ++i) {
3875 out[0] = out[1] = out[2] = y[i];
3876 out[3] = 255; // not used if n==3
3877 out += n;
3878 }
3879 } else {
3880 if (is_rgb) {
3881 if (n == 1)
3882 for (i=0; i < z->s->img_x; ++i)
3883 *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]);
3884 else {
3885 for (i=0; i < z->s->img_x; ++i, out += 2) {
3886 out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]);
3887 out[1] = 255;
3888 }
3889 }
3890 } else if (z->s->img_n == 4 && z->app14_color_transform == 0) {
3891 for (i=0; i < z->s->img_x; ++i) {
3892 stbi_uc m = coutput[3][i];
3893 stbi_uc r = stbi__blinn_8x8(coutput[0][i], m);
3894 stbi_uc g = stbi__blinn_8x8(coutput[1][i], m);
3895 stbi_uc b = stbi__blinn_8x8(coutput[2][i], m);
3896 out[0] = stbi__compute_y(r, g, b);
3897 out[1] = 255;
3898 out += n;
3899 }
3900 } else if (z->s->img_n == 4 && z->app14_color_transform == 2) {
3901 for (i=0; i < z->s->img_x; ++i) {
3902 out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]);
3903 out[1] = 255;
3904 out += n;
3905 }
3906 } else {
3907 stbi_uc *y = coutput[0];
3908 if (n == 1)
3909 for (i=0; i < z->s->img_x; ++i) out[i] = y[i];
3910 else
3911 for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; }
3912 }
3913 }
3914 }
3915 stbi__cleanup_jpeg(z);
3916 *out_x = z->s->img_x;
3917 *out_y = z->s->img_y;
3918 if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output
3919 return output;
3920 }
3921}
3922
3923static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
3924{
3925 unsigned char* result;
3926 stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg));
3927 STBI_NOTUSED(ri);
3928 j->s = s;
3929 stbi__setup_jpeg(j);
3930 result = load_jpeg_image(j, x,y,comp,req_comp);
3931 STBI_FREE(j);
3932 return result;
3933}
3934
3935static int stbi__jpeg_test(stbi__context *s)
3936{
3937 int r;
3938 stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg));
3939 j->s = s;
3940 stbi__setup_jpeg(j);
3941 r = stbi__decode_jpeg_header(j, STBI__SCAN_type);
3942 stbi__rewind(s);
3943 STBI_FREE(j);
3944 return r;
3945}
3946
3947static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp)
3948{
3949 if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) {
3950 stbi__rewind( j->s );
3951 return 0;
3952 }
3953 if (x) *x = j->s->img_x;
3954 if (y) *y = j->s->img_y;
3955 if (comp) *comp = j->s->img_n >= 3 ? 3 : 1;
3956 return 1;
3957}
3958
3959static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp)
3960{
3961 int result;
3962 stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg)));
3963 j->s = s;
3964 result = stbi__jpeg_info_raw(j, x, y, comp);
3965 STBI_FREE(j);
3966 return result;
3967}
3968#endif
3969
3970// public domain zlib decode v0.2 Sean Barrett 2006-11-18
3971// simple implementation
3972// - all input must be provided in an upfront buffer
3973// - all output is written to a single output buffer (can malloc/realloc)
3974// performance
3975// - fast huffman
3976
3977#ifndef STBI_NO_ZLIB
3978
3979// fast-way is faster to check than jpeg huffman, but slow way is slower
3980#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables
3981#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1)
3982
3983// zlib-style huffman encoding
3984// (jpegs packs from left, zlib from right, so can't share code)
3985typedef struct
3986{
3987 stbi__uint16 fast[1 << STBI__ZFAST_BITS];
3988 stbi__uint16 firstcode[16];
3989 int maxcode[17];
3990 stbi__uint16 firstsymbol[16];
3991 stbi_uc size[288];
3992 stbi__uint16 value[288];
3993} stbi__zhuffman;
3994
3995stbi_inline static int stbi__bitreverse16(int n)
3996{
3997 n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1);
3998 n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2);
3999 n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4);
4000 n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8);
4001 return n;
4002}
4003
4004stbi_inline static int stbi__bit_reverse(int v, int bits)
4005{
4006 STBI_ASSERT(bits <= 16);
4007 // to bit reverse n bits, reverse 16 and shift
4008 // e.g. 11 bits, bit reverse and shift away 5
4009 return stbi__bitreverse16(v) >> (16-bits);
4010}
4011
4012static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num)
4013{
4014 int i,k=0;
4015 int code, next_code[16], sizes[17];
4016
4017 // DEFLATE spec for generating codes
4018 memset(sizes, 0, sizeof(sizes));
4019 memset(z->fast, 0, sizeof(z->fast));
4020 for (i=0; i < num; ++i)
4021 ++sizes[sizelist[i]];
4022 sizes[0] = 0;
4023 for (i=1; i < 16; ++i)
4024 if (sizes[i] > (1 << i))
4025 return stbi__err("bad sizes", "Corrupt PNG");
4026 code = 0;
4027 for (i=1; i < 16; ++i) {
4028 next_code[i] = code;
4029 z->firstcode[i] = (stbi__uint16) code;
4030 z->firstsymbol[i] = (stbi__uint16) k;
4031 code = (code + sizes[i]);
4032 if (sizes[i])
4033 if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG");
4034 z->maxcode[i] = code << (16-i); // preshift for inner loop
4035 code <<= 1;
4036 k += sizes[i];
4037 }
4038 z->maxcode[16] = 0x10000; // sentinel
4039 for (i=0; i < num; ++i) {
4040 int s = sizelist[i];
4041 if (s) {
4042 int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s];
4043 stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i);
4044 z->size [c] = (stbi_uc ) s;
4045 z->value[c] = (stbi__uint16) i;
4046 if (s <= STBI__ZFAST_BITS) {
4047 int j = stbi__bit_reverse(next_code[s],s);
4048 while (j < (1 << STBI__ZFAST_BITS)) {
4049 z->fast[j] = fastv;
4050 j += (1 << s);
4051 }
4052 }
4053 ++next_code[s];
4054 }
4055 }
4056 return 1;
4057}
4058
4059// zlib-from-memory implementation for PNG reading
4060// because PNG allows splitting the zlib stream arbitrarily,
4061// and it's annoying structurally to have PNG call ZLIB call PNG,
4062// we require PNG read all the IDATs and combine them into a single
4063// memory buffer
4064
4065typedef struct
4066{
4067 stbi_uc *zbuffer, *zbuffer_end;
4068 int num_bits;
4069 stbi__uint32 code_buffer;
4070
4071 char *zout;
4072 char *zout_start;
4073 char *zout_end;
4074 int z_expandable;
4075
4076 stbi__zhuffman z_length, z_distance;
4077} stbi__zbuf;
4078
4079stbi_inline static int stbi__zeof(stbi__zbuf *z)
4080{
4081 return (z->zbuffer >= z->zbuffer_end);
4082}
4083
4084stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z)
4085{
4086 return stbi__zeof(z) ? 0 : *z->zbuffer++;
4087}
4088
4089static void stbi__fill_bits(stbi__zbuf *z)
4090{
4091 do {
4092 if (z->code_buffer >= (1U << z->num_bits)) {
4093 z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */
4094 return;
4095 }
4096 z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits;
4097 z->num_bits += 8;
4098 } while (z->num_bits <= 24);
4099}
4100
4101stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n)
4102{
4103 unsigned int k;
4104 if (z->num_bits < n) stbi__fill_bits(z);
4105 k = z->code_buffer & ((1 << n) - 1);
4106 z->code_buffer >>= n;
4107 z->num_bits -= n;
4108 return k;
4109}
4110
4111static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z)
4112{
4113 int b,s,k;
4114 // not resolved by fast table, so compute it the slow way
4115 // use jpeg approach, which requires MSbits at top
4116 k = stbi__bit_reverse(a->code_buffer, 16);
4117 for (s=STBI__ZFAST_BITS+1; ; ++s)
4118 if (k < z->maxcode[s])
4119 break;
4120 if (s >= 16) return -1; // invalid code!
4121 // code size is s, so:
4122 b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s];
4123 if (b >= (int) sizeof (z->size)) return -1; // some data was corrupt somewhere!
4124 if (z->size[b] != s) return -1; // was originally an assert, but report failure instead.
4125 a->code_buffer >>= s;
4126 a->num_bits -= s;
4127 return z->value[b];
4128}
4129
4130stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z)
4131{
4132 int b,s;
4133 if (a->num_bits < 16) {
4134 if (stbi__zeof(a)) {
4135 return -1; /* report error for unexpected end of data. */
4136 }
4137 stbi__fill_bits(a);
4138 }
4139 b = z->fast[a->code_buffer & STBI__ZFAST_MASK];
4140 if (b) {
4141 s = b >> 9;
4142 a->code_buffer >>= s;
4143 a->num_bits -= s;
4144 return b & 511;
4145 }
4146 return stbi__zhuffman_decode_slowpath(a, z);
4147}
4148
4149static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes
4150{
4151 char *q;
4152 unsigned int cur, limit, old_limit;
4153 z->zout = zout;
4154 if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG");
4155 cur = (unsigned int) (z->zout - z->zout_start);
4156 limit = old_limit = (unsigned) (z->zout_end - z->zout_start);
4157 if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory");
4158 while (cur + n > limit) {
4159 if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory");
4160 limit *= 2;
4161 }
4162 q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit);
4163 STBI_NOTUSED(old_limit);
4164 if (q == NULL) return stbi__err("outofmem", "Out of memory");
4165 z->zout_start = q;
4166 z->zout = q + cur;
4167 z->zout_end = q + limit;
4168 return 1;
4169}
4170
4171static const int stbi__zlength_base[31] = {
4172 3,4,5,6,7,8,9,10,11,13,
4173 15,17,19,23,27,31,35,43,51,59,
4174 67,83,99,115,131,163,195,227,258,0,0 };
4175
4176static const int stbi__zlength_extra[31]=
4177{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 };
4178
4179static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
4180257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0};
4181
4182static const int stbi__zdist_extra[32] =
4183{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
4184
4185static int stbi__parse_huffman_block(stbi__zbuf *a)
4186{
4187 char *zout = a->zout;
4188 for(;;) {
4189 int z = stbi__zhuffman_decode(a, &a->z_length);
4190 if (z < 256) {
4191 if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes
4192 if (zout >= a->zout_end) {
4193 if (!stbi__zexpand(a, zout, 1)) return 0;
4194 zout = a->zout;
4195 }
4196 *zout++ = (char) z;
4197 } else {
4198 stbi_uc *p;
4199 int len,dist;
4200 if (z == 256) {
4201 a->zout = zout;
4202 return 1;
4203 }
4204 z -= 257;
4205 len = stbi__zlength_base[z];
4206 if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]);
4207 z = stbi__zhuffman_decode(a, &a->z_distance);
4208 if (z < 0) return stbi__err("bad huffman code","Corrupt PNG");
4209 dist = stbi__zdist_base[z];
4210 if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]);
4211 if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG");
4212 if (zout + len > a->zout_end) {
4213 if (!stbi__zexpand(a, zout, len)) return 0;
4214 zout = a->zout;
4215 }
4216 p = (stbi_uc *) (zout - dist);
4217 if (dist == 1) { // run of one byte; common in images.
4218 stbi_uc v = *p;
4219 if (len) { do *zout++ = v; while (--len); }
4220 } else {
4221 if (len) { do *zout++ = *p++; while (--len); }
4222 }
4223 }
4224 }
4225}
4226
4227static int stbi__compute_huffman_codes(stbi__zbuf *a)
4228{
4229 static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
4230 stbi__zhuffman z_codelength;
4231 stbi_uc lencodes[286+32+137];//padding for maximum single op
4232 stbi_uc codelength_sizes[19];
4233 int i,n;
4234
4235 int hlit = stbi__zreceive(a,5) + 257;
4236 int hdist = stbi__zreceive(a,5) + 1;
4237 int hclen = stbi__zreceive(a,4) + 4;
4238 int ntot = hlit + hdist;
4239
4240 memset(codelength_sizes, 0, sizeof(codelength_sizes));
4241 for (i=0; i < hclen; ++i) {
4242 int s = stbi__zreceive(a,3);
4243 codelength_sizes[length_dezigzag[i]] = (stbi_uc) s;
4244 }
4245 if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0;
4246
4247 n = 0;
4248 while (n < ntot) {
4249 int c = stbi__zhuffman_decode(a, &z_codelength);
4250 if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG");
4251 if (c < 16)
4252 lencodes[n++] = (stbi_uc) c;
4253 else {
4254 stbi_uc fill = 0;
4255 if (c == 16) {
4256 c = stbi__zreceive(a,2)+3;
4257 if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG");
4258 fill = lencodes[n-1];
4259 } else if (c == 17) {
4260 c = stbi__zreceive(a,3)+3;
4261 } else if (c == 18) {
4262 c = stbi__zreceive(a,7)+11;
4263 } else {
4264 return stbi__err("bad codelengths", "Corrupt PNG");
4265 }
4266 if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG");
4267 memset(lencodes+n, fill, c);
4268 n += c;
4269 }
4270 }
4271 if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG");
4272 if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0;
4273 if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0;
4274 return 1;
4275}
4276
4277static int stbi__parse_uncompressed_block(stbi__zbuf *a)
4278{
4279 stbi_uc header[4];
4280 int len,nlen,k;
4281 if (a->num_bits & 7)
4282 stbi__zreceive(a, a->num_bits & 7); // discard
4283 // drain the bit-packed data into header
4284 k = 0;
4285 while (a->num_bits > 0) {
4286 header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check
4287 a->code_buffer >>= 8;
4288 a->num_bits -= 8;
4289 }
4290 if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG");
4291 // now fill header the normal way
4292 while (k < 4)
4293 header[k++] = stbi__zget8(a);
4294 len = header[1] * 256 + header[0];
4295 nlen = header[3] * 256 + header[2];
4296 if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG");
4297 if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG");
4298 if (a->zout + len > a->zout_end)
4299 if (!stbi__zexpand(a, a->zout, len)) return 0;
4300 memcpy(a->zout, a->zbuffer, len);
4301 a->zbuffer += len;
4302 a->zout += len;
4303 return 1;
4304}
4305
4306static int stbi__parse_zlib_header(stbi__zbuf *a)
4307{
4308 int cmf = stbi__zget8(a);
4309 int cm = cmf & 15;
4310 /* int cinfo = cmf >> 4; */
4311 int flg = stbi__zget8(a);
4312 if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec
4313 if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec
4314 if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png
4315 if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png
4316 // window = 1 << (8 + cinfo)... but who cares, we fully buffer output
4317 return 1;
4318}
4319
4320static const stbi_uc stbi__zdefault_length[288] =
4321{
4322 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
4323 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
4324 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
4325 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
4326 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
4327 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
4328 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
4329 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
4330 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8
4331};
4332static const stbi_uc stbi__zdefault_distance[32] =
4333{
4334 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
4335};
4336/*
4337Init algorithm:
4338{
4339 int i; // use <= to match clearly with spec
4340 for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8;
4341 for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9;
4342 for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7;
4343 for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8;
4344
4345 for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5;
4346}
4347*/
4348
4349static int stbi__parse_zlib(stbi__zbuf *a, int parse_header)
4350{
4351 int final, type;
4352 if (parse_header)
4353 if (!stbi__parse_zlib_header(a)) return 0;
4354 a->num_bits = 0;
4355 a->code_buffer = 0;
4356 do {
4357 final = stbi__zreceive(a,1);
4358 type = stbi__zreceive(a,2);
4359 if (type == 0) {
4360 if (!stbi__parse_uncompressed_block(a)) return 0;
4361 } else if (type == 3) {
4362 return 0;
4363 } else {
4364 if (type == 1) {
4365 // use fixed code lengths
4366 if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0;
4367 if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0;
4368 } else {
4369 if (!stbi__compute_huffman_codes(a)) return 0;
4370 }
4371 if (!stbi__parse_huffman_block(a)) return 0;
4372 }
4373 } while (!final);
4374 return 1;
4375}
4376
4377static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header)
4378{
4379 a->zout_start = obuf;
4380 a->zout = obuf;
4381 a->zout_end = obuf + olen;
4382 a->z_expandable = exp;
4383
4384 return stbi__parse_zlib(a, parse_header);
4385}
4386
4387STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen)
4388{
4389 stbi__zbuf a;
4390 char *p = (char *) stbi__malloc(initial_size);
4391 if (p == NULL) return NULL;
4392 a.zbuffer = (stbi_uc *) buffer;
4393 a.zbuffer_end = (stbi_uc *) buffer + len;
4394 if (stbi__do_zlib(&a, p, initial_size, 1, 1)) {
4395 if (outlen) *outlen = (int) (a.zout - a.zout_start);
4396 return a.zout_start;
4397 } else {
4398 STBI_FREE(a.zout_start);
4399 return NULL;
4400 }
4401}
4402
4403STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen)
4404{
4405 return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen);
4406}
4407
4408STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header)
4409{
4410 stbi__zbuf a;
4411 char *p = (char *) stbi__malloc(initial_size);
4412 if (p == NULL) return NULL;
4413 a.zbuffer = (stbi_uc *) buffer;
4414 a.zbuffer_end = (stbi_uc *) buffer + len;
4415 if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) {
4416 if (outlen) *outlen = (int) (a.zout - a.zout_start);
4417 return a.zout_start;
4418 } else {
4419 STBI_FREE(a.zout_start);
4420 return NULL;
4421 }
4422}
4423
4424STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen)
4425{
4426 stbi__zbuf a;
4427 a.zbuffer = (stbi_uc *) ibuffer;
4428 a.zbuffer_end = (stbi_uc *) ibuffer + ilen;
4429 if (stbi__do_zlib(&a, obuffer, olen, 0, 1))
4430 return (int) (a.zout - a.zout_start);
4431 else
4432 return -1;
4433}
4434
4435STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen)
4436{
4437 stbi__zbuf a;
4438 char *p = (char *) stbi__malloc(16384);
4439 if (p == NULL) return NULL;
4440 a.zbuffer = (stbi_uc *) buffer;
4441 a.zbuffer_end = (stbi_uc *) buffer+len;
4442 if (stbi__do_zlib(&a, p, 16384, 1, 0)) {
4443 if (outlen) *outlen = (int) (a.zout - a.zout_start);
4444 return a.zout_start;
4445 } else {
4446 STBI_FREE(a.zout_start);
4447 return NULL;
4448 }
4449}
4450
4451STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen)
4452{
4453 stbi__zbuf a;
4454 a.zbuffer = (stbi_uc *) ibuffer;
4455 a.zbuffer_end = (stbi_uc *) ibuffer + ilen;
4456 if (stbi__do_zlib(&a, obuffer, olen, 0, 0))
4457 return (int) (a.zout - a.zout_start);
4458 else
4459 return -1;
4460}
4461#endif
4462
4463// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18
4464// simple implementation
4465// - only 8-bit samples
4466// - no CRC checking
4467// - allocates lots of intermediate memory
4468// - avoids problem of streaming data between subsystems
4469// - avoids explicit window management
4470// performance
4471// - uses stb_zlib, a PD zlib implementation with fast huffman decoding
4472
4473#ifndef STBI_NO_PNG
4474typedef struct
4475{
4476 stbi__uint32 length;
4477 stbi__uint32 type;
4478} stbi__pngchunk;
4479
4480static stbi__pngchunk stbi__get_chunk_header(stbi__context *s)
4481{
4482 stbi__pngchunk c;
4483 c.length = stbi__get32be(s);
4484 c.type = stbi__get32be(s);
4485 return c;
4486}
4487
4488static int stbi__check_png_header(stbi__context *s)
4489{
4490 static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 };
4491 int i;
4492 for (i=0; i < 8; ++i)
4493 if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG");
4494 return 1;
4495}
4496
4497typedef struct
4498{
4499 stbi__context *s;
4500 stbi_uc *idata, *expanded, *out;
4501 int depth;
4502} stbi__png;
4503
4504
4505enum {
4506 STBI__F_none=0,
4507 STBI__F_sub=1,
4508 STBI__F_up=2,
4509 STBI__F_avg=3,
4510 STBI__F_paeth=4,
4511 // synthetic filters used for first scanline to avoid needing a dummy row of 0s
4512 STBI__F_avg_first,
4513 STBI__F_paeth_first
4514};
4515
4516static stbi_uc first_row_filter[5] =
4517{
4518 STBI__F_none,
4519 STBI__F_sub,
4520 STBI__F_none,
4521 STBI__F_avg_first,
4522 STBI__F_paeth_first
4523};
4524
4525static int stbi__paeth(int a, int b, int c)
4526{
4527 int p = a + b - c;
4528 int pa = abs(p-a);
4529 int pb = abs(p-b);
4530 int pc = abs(p-c);
4531 if (pa <= pb && pa <= pc) return a;
4532 if (pb <= pc) return b;
4533 return c;
4534}
4535
4536static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 };
4537
4538// create the png data from post-deflated data
4539static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color)
4540{
4541 int bytes = (depth == 16? 2 : 1);
4542 stbi__context *s = a->s;
4543 stbi__uint32 i,j,stride = x*out_n*bytes;
4544 stbi__uint32 img_len, img_width_bytes;
4545 int k;
4546 int img_n = s->img_n; // copy it into a local for later
4547
4548 int output_bytes = out_n*bytes;
4549 int filter_bytes = img_n*bytes;
4550 int width = x;
4551
4552 STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1);
4553 a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into
4554 if (!a->out) return stbi__err("outofmem", "Out of memory");
4555
4556 if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG");
4557 img_width_bytes = (((img_n * x * depth) + 7) >> 3);
4558 img_len = (img_width_bytes + 1) * y;
4559
4560 // we used to check for exact match between raw_len and img_len on non-interlaced PNGs,
4561 // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros),
4562 // so just check for raw_len < img_len always.
4563 if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG");
4564
4565 for (j=0; j < y; ++j) {
4566 stbi_uc *cur = a->out + stride*j;
4567 stbi_uc *prior;
4568 int filter = *raw++;
4569
4570 if (filter > 4)
4571 return stbi__err("invalid filter","Corrupt PNG");
4572
4573 if (depth < 8) {
4574 if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG");
4575 cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place
4576 filter_bytes = 1;
4577 width = img_width_bytes;
4578 }
4579 prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above
4580
4581 // if first row, use special filter that doesn't sample previous row
4582 if (j == 0) filter = first_row_filter[filter];
4583
4584 // handle first byte explicitly
4585 for (k=0; k < filter_bytes; ++k) {
4586 switch (filter) {
4587 case STBI__F_none : cur[k] = raw[k]; break;
4588 case STBI__F_sub : cur[k] = raw[k]; break;
4589 case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break;
4590 case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break;
4591 case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break;
4592 case STBI__F_avg_first : cur[k] = raw[k]; break;
4593 case STBI__F_paeth_first: cur[k] = raw[k]; break;
4594 }
4595 }
4596
4597 if (depth == 8) {
4598 if (img_n != out_n)
4599 cur[img_n] = 255; // first pixel
4600 raw += img_n;
4601 cur += out_n;
4602 prior += out_n;
4603 } else if (depth == 16) {
4604 if (img_n != out_n) {
4605 cur[filter_bytes] = 255; // first pixel top byte
4606 cur[filter_bytes+1] = 255; // first pixel bottom byte
4607 }
4608 raw += filter_bytes;
4609 cur += output_bytes;
4610 prior += output_bytes;
4611 } else {
4612 raw += 1;
4613 cur += 1;
4614 prior += 1;
4615 }
4616
4617 // this is a little gross, so that we don't switch per-pixel or per-component
4618 if (depth < 8 || img_n == out_n) {
4619 int nk = (width - 1)*filter_bytes;
4620 #define STBI__CASE(f) \
4621 case f: \
4622 for (k=0; k < nk; ++k)
4623 switch (filter) {
4624 // "none" filter turns into a memcpy here; make that explicit.
4625 case STBI__F_none: memcpy(cur, raw, nk); break;
4626 STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break;
4627 STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break;
4628 STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break;
4629 STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break;
4630 STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break;
4631 STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break;
4632 }
4633 #undef STBI__CASE
4634 raw += nk;
4635 } else {
4636 STBI_ASSERT(img_n+1 == out_n);
4637 #define STBI__CASE(f) \
4638 case f: \
4639 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \
4640 for (k=0; k < filter_bytes; ++k)
4641 switch (filter) {
4642 STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break;
4643 STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break;
4644 STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break;
4645 STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break;
4646 STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break;
4647 STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break;
4648 STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break;
4649 }
4650 #undef STBI__CASE
4651
4652 // the loop above sets the high byte of the pixels' alpha, but for
4653 // 16 bit png files we also need the low byte set. we'll do that here.
4654 if (depth == 16) {
4655 cur = a->out + stride*j; // start at the beginning of the row again
4656 for (i=0; i < x; ++i,cur+=output_bytes) {
4657 cur[filter_bytes+1] = 255;
4658 }
4659 }
4660 }
4661 }
4662
4663 // we make a separate pass to expand bits to pixels; for performance,
4664 // this could run two scanlines behind the above code, so it won't
4665 // intefere with filtering but will still be in the cache.
4666 if (depth < 8) {
4667 for (j=0; j < y; ++j) {
4668 stbi_uc *cur = a->out + stride*j;
4669 stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes;
4670 // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit
4671 // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop
4672 stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range
4673
4674 // note that the final byte might overshoot and write more data than desired.
4675 // we can allocate enough data that this never writes out of memory, but it
4676 // could also overwrite the next scanline. can it overwrite non-empty data
4677 // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel.
4678 // so we need to explicitly clamp the final ones
4679
4680 if (depth == 4) {
4681 for (k=x*img_n; k >= 2; k-=2, ++in) {
4682 *cur++ = scale * ((*in >> 4) );
4683 *cur++ = scale * ((*in ) & 0x0f);
4684 }
4685 if (k > 0) *cur++ = scale * ((*in >> 4) );
4686 } else if (depth == 2) {
4687 for (k=x*img_n; k >= 4; k-=4, ++in) {
4688 *cur++ = scale * ((*in >> 6) );
4689 *cur++ = scale * ((*in >> 4) & 0x03);
4690 *cur++ = scale * ((*in >> 2) & 0x03);
4691 *cur++ = scale * ((*in ) & 0x03);
4692 }
4693 if (k > 0) *cur++ = scale * ((*in >> 6) );
4694 if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03);
4695 if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03);
4696 } else if (depth == 1) {
4697 for (k=x*img_n; k >= 8; k-=8, ++in) {
4698 *cur++ = scale * ((*in >> 7) );
4699 *cur++ = scale * ((*in >> 6) & 0x01);
4700 *cur++ = scale * ((*in >> 5) & 0x01);
4701 *cur++ = scale * ((*in >> 4) & 0x01);
4702 *cur++ = scale * ((*in >> 3) & 0x01);
4703 *cur++ = scale * ((*in >> 2) & 0x01);
4704 *cur++ = scale * ((*in >> 1) & 0x01);
4705 *cur++ = scale * ((*in ) & 0x01);
4706 }
4707 if (k > 0) *cur++ = scale * ((*in >> 7) );
4708 if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01);
4709 if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01);
4710 if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01);
4711 if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01);
4712 if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01);
4713 if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01);
4714 }
4715 if (img_n != out_n) {
4716 int q;
4717 // insert alpha = 255
4718 cur = a->out + stride*j;
4719 if (img_n == 1) {
4720 for (q=x-1; q >= 0; --q) {
4721 cur[q*2+1] = 255;
4722 cur[q*2+0] = cur[q];
4723 }
4724 } else {
4725 STBI_ASSERT(img_n == 3);
4726 for (q=x-1; q >= 0; --q) {
4727 cur[q*4+3] = 255;
4728 cur[q*4+2] = cur[q*3+2];
4729 cur[q*4+1] = cur[q*3+1];
4730 cur[q*4+0] = cur[q*3+0];
4731 }
4732 }
4733 }
4734 }
4735 } else if (depth == 16) {
4736 // force the image data from big-endian to platform-native.
4737 // this is done in a separate pass due to the decoding relying
4738 // on the data being untouched, but could probably be done
4739 // per-line during decode if care is taken.
4740 stbi_uc *cur = a->out;
4741 stbi__uint16 *cur16 = (stbi__uint16*)cur;
4742
4743 for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) {
4744 *cur16 = (cur[0] << 8) | cur[1];
4745 }
4746 }
4747
4748 return 1;
4749}
4750
4751static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced)
4752{
4753 int bytes = (depth == 16 ? 2 : 1);
4754 int out_bytes = out_n * bytes;
4755 stbi_uc *final;
4756 int p;
4757 if (!interlaced)
4758 return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color);
4759
4760 // de-interlacing
4761 final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0);
4762 for (p=0; p < 7; ++p) {
4763 int xorig[] = { 0,4,0,2,0,1,0 };
4764 int yorig[] = { 0,0,4,0,2,0,1 };
4765 int xspc[] = { 8,8,4,4,2,2,1 };
4766 int yspc[] = { 8,8,8,4,4,2,2 };
4767 int i,j,x,y;
4768 // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1
4769 x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p];
4770 y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p];
4771 if (x && y) {
4772 stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y;
4773 if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) {
4774 STBI_FREE(final);
4775 return 0;
4776 }
4777 for (j=0; j < y; ++j) {
4778 for (i=0; i < x; ++i) {
4779 int out_y = j*yspc[p]+yorig[p];
4780 int out_x = i*xspc[p]+xorig[p];
4781 memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes,
4782 a->out + (j*x+i)*out_bytes, out_bytes);
4783 }
4784 }
4785 STBI_FREE(a->out);
4786 image_data += img_len;
4787 image_data_len -= img_len;
4788 }
4789 }
4790 a->out = final;
4791
4792 return 1;
4793}
4794
4795static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n)
4796{
4797 stbi__context *s = z->s;
4798 stbi__uint32 i, pixel_count = s->img_x * s->img_y;
4799 stbi_uc *p = z->out;
4800
4801 // compute color-based transparency, assuming we've
4802 // already got 255 as the alpha value in the output
4803 STBI_ASSERT(out_n == 2 || out_n == 4);
4804
4805 if (out_n == 2) {
4806 for (i=0; i < pixel_count; ++i) {
4807 p[1] = (p[0] == tc[0] ? 0 : 255);
4808 p += 2;
4809 }
4810 } else {
4811 for (i=0; i < pixel_count; ++i) {
4812 if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
4813 p[3] = 0;
4814 p += 4;
4815 }
4816 }
4817 return 1;
4818}
4819
4820static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n)
4821{
4822 stbi__context *s = z->s;
4823 stbi__uint32 i, pixel_count = s->img_x * s->img_y;
4824 stbi__uint16 *p = (stbi__uint16*) z->out;
4825
4826 // compute color-based transparency, assuming we've
4827 // already got 65535 as the alpha value in the output
4828 STBI_ASSERT(out_n == 2 || out_n == 4);
4829
4830 if (out_n == 2) {
4831 for (i = 0; i < pixel_count; ++i) {
4832 p[1] = (p[0] == tc[0] ? 0 : 65535);
4833 p += 2;
4834 }
4835 } else {
4836 for (i = 0; i < pixel_count; ++i) {
4837 if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
4838 p[3] = 0;
4839 p += 4;
4840 }
4841 }
4842 return 1;
4843}
4844
4845static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n)
4846{
4847 stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y;
4848 stbi_uc *p, *temp_out, *orig = a->out;
4849
4850 p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0);
4851 if (p == NULL) return stbi__err("outofmem", "Out of memory");
4852
4853 // between here and free(out) below, exitting would leak
4854 temp_out = p;
4855
4856 if (pal_img_n == 3) {
4857 for (i=0; i < pixel_count; ++i) {
4858 int n = orig[i]*4;
4859 p[0] = palette[n ];
4860 p[1] = palette[n+1];
4861 p[2] = palette[n+2];
4862 p += 3;
4863 }
4864 } else {
4865 for (i=0; i < pixel_count; ++i) {
4866 int n = orig[i]*4;
4867 p[0] = palette[n ];
4868 p[1] = palette[n+1];
4869 p[2] = palette[n+2];
4870 p[3] = palette[n+3];
4871 p += 4;
4872 }
4873 }
4874 STBI_FREE(a->out);
4875 a->out = temp_out;
4876
4877 STBI_NOTUSED(len);
4878
4879 return 1;
4880}
4881
4882static int stbi__unpremultiply_on_load = 0;
4883static int stbi__de_iphone_flag = 0;
4884
4885STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply)
4886{
4887 stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply;
4888}
4889
4890STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert)
4891{
4892 stbi__de_iphone_flag = flag_true_if_should_convert;
4893}
4894
4895static void stbi__de_iphone(stbi__png *z)
4896{
4897 stbi__context *s = z->s;
4898 stbi__uint32 i, pixel_count = s->img_x * s->img_y;
4899 stbi_uc *p = z->out;
4900
4901 if (s->img_out_n == 3) { // convert bgr to rgb
4902 for (i=0; i < pixel_count; ++i) {
4903 stbi_uc t = p[0];
4904 p[0] = p[2];
4905 p[2] = t;
4906 p += 3;
4907 }
4908 } else {
4909 STBI_ASSERT(s->img_out_n == 4);
4910 if (stbi__unpremultiply_on_load) {
4911 // convert bgr to rgb and unpremultiply
4912 for (i=0; i < pixel_count; ++i) {
4913 stbi_uc a = p[3];
4914 stbi_uc t = p[0];
4915 if (a) {
4916 stbi_uc half = a / 2;
4917 p[0] = (p[2] * 255 + half) / a;
4918 p[1] = (p[1] * 255 + half) / a;
4919 p[2] = ( t * 255 + half) / a;
4920 } else {
4921 p[0] = p[2];
4922 p[2] = t;
4923 }
4924 p += 4;
4925 }
4926 } else {
4927 // convert bgr to rgb
4928 for (i=0; i < pixel_count; ++i) {
4929 stbi_uc t = p[0];
4930 p[0] = p[2];
4931 p[2] = t;
4932 p += 4;
4933 }
4934 }
4935 }
4936}
4937
4938#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d))
4939
4940static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp)
4941{
4942 stbi_uc palette[1024], pal_img_n=0;
4943 stbi_uc has_trans=0, tc[3]={0};
4944 stbi__uint16 tc16[3];
4945 stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0;
4946 int first=1,k,interlace=0, color=0, is_iphone=0;
4947 stbi__context *s = z->s;
4948
4949 z->expanded = NULL;
4950 z->idata = NULL;
4951 z->out = NULL;
4952
4953 if (!stbi__check_png_header(s)) return 0;
4954
4955 if (scan == STBI__SCAN_type) return 1;
4956
4957 for (;;) {
4958 stbi__pngchunk c = stbi__get_chunk_header(s);
4959 switch (c.type) {
4960 case STBI__PNG_TYPE('C','g','B','I'):
4961 is_iphone = 1;
4962 stbi__skip(s, c.length);
4963 break;
4964 case STBI__PNG_TYPE('I','H','D','R'): {
4965 int comp,filter;
4966 if (!first) return stbi__err("multiple IHDR","Corrupt PNG");
4967 first = 0;
4968 if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG");
4969 s->img_x = stbi__get32be(s);
4970 s->img_y = stbi__get32be(s);
4971 if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)");
4972 if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)");
4973 z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only");
4974 color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG");
4975 if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG");
4976 if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG");
4977 comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG");
4978 filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG");
4979 interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG");
4980 if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG");
4981 if (!pal_img_n) {
4982 s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0);
4983 if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode");
4984 if (scan == STBI__SCAN_header) return 1;
4985 } else {
4986 // if paletted, then pal_n is our final components, and
4987 // img_n is # components to decompress/filter.
4988 s->img_n = 1;
4989 if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG");
4990 // if SCAN_header, have to scan to see if we have a tRNS
4991 }
4992 break;
4993 }
4994
4995 case STBI__PNG_TYPE('P','L','T','E'): {
4996 if (first) return stbi__err("first not IHDR", "Corrupt PNG");
4997 if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG");
4998 pal_len = c.length / 3;
4999 if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG");
5000 for (i=0; i < pal_len; ++i) {
5001 palette[i*4+0] = stbi__get8(s);
5002 palette[i*4+1] = stbi__get8(s);
5003 palette[i*4+2] = stbi__get8(s);
5004 palette[i*4+3] = 255;
5005 }
5006 break;
5007 }
5008
5009 case STBI__PNG_TYPE('t','R','N','S'): {
5010 if (first) return stbi__err("first not IHDR", "Corrupt PNG");
5011 if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG");
5012 if (pal_img_n) {
5013 if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; }
5014 if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG");
5015 if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG");
5016 pal_img_n = 4;
5017 for (i=0; i < c.length; ++i)
5018 palette[i*4+3] = stbi__get8(s);
5019 } else {
5020 if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG");
5021 if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG");
5022 has_trans = 1;
5023 if (z->depth == 16) {
5024 for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is
5025 } else {
5026 for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger
5027 }
5028 }
5029 break;
5030 }
5031
5032 case STBI__PNG_TYPE('I','D','A','T'): {
5033 if (first) return stbi__err("first not IHDR", "Corrupt PNG");
5034 if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG");
5035 if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; }
5036 if ((int)(ioff + c.length) < (int)ioff) return 0;
5037 if (ioff + c.length > idata_limit) {
5038 stbi__uint32 idata_limit_old = idata_limit;
5039 stbi_uc *p;
5040 if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096;
5041 while (ioff + c.length > idata_limit)
5042 idata_limit *= 2;
5043 STBI_NOTUSED(idata_limit_old);
5044 p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory");
5045 z->idata = p;
5046 }
5047 if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG");
5048 ioff += c.length;
5049 break;
5050 }
5051
5052 case STBI__PNG_TYPE('I','E','N','D'): {
5053 stbi__uint32 raw_len, bpl;
5054 if (first) return stbi__err("first not IHDR", "Corrupt PNG");
5055 if (scan != STBI__SCAN_load) return 1;
5056 if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG");
5057 // initial guess for decoded data size to avoid unnecessary reallocs
5058 bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component
5059 raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */;
5060 z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone);
5061 if (z->expanded == NULL) return 0; // zlib should set error
5062 STBI_FREE(z->idata); z->idata = NULL;
5063 if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans)
5064 s->img_out_n = s->img_n+1;
5065 else
5066 s->img_out_n = s->img_n;
5067 if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0;
5068 if (has_trans) {
5069 if (z->depth == 16) {
5070 if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0;
5071 } else {
5072 if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0;
5073 }
5074 }
5075 if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2)
5076 stbi__de_iphone(z);
5077 if (pal_img_n) {
5078 // pal_img_n == 3 or 4
5079 s->img_n = pal_img_n; // record the actual colors we had
5080 s->img_out_n = pal_img_n;
5081 if (req_comp >= 3) s->img_out_n = req_comp;
5082 if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n))
5083 return 0;
5084 } else if (has_trans) {
5085 // non-paletted image with tRNS -> source image has (constant) alpha
5086 ++s->img_n;
5087 }
5088 STBI_FREE(z->expanded); z->expanded = NULL;
5089 // end of PNG chunk, read and skip CRC
5090 stbi__get32be(s);
5091 return 1;
5092 }
5093
5094 default:
5095 // if critical, fail
5096 if (first) return stbi__err("first not IHDR", "Corrupt PNG");
5097 if ((c.type & (1 << 29)) == 0) {
5098 #ifndef STBI_NO_FAILURE_STRINGS
5099 // not threadsafe
5100 static char invalid_chunk[] = "XXXX PNG chunk not known";
5101 invalid_chunk[0] = STBI__BYTECAST(c.type >> 24);
5102 invalid_chunk[1] = STBI__BYTECAST(c.type >> 16);
5103 invalid_chunk[2] = STBI__BYTECAST(c.type >> 8);
5104 invalid_chunk[3] = STBI__BYTECAST(c.type >> 0);
5105 #endif
5106 return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type");
5107 }
5108 stbi__skip(s, c.length);
5109 break;
5110 }
5111 // end of PNG chunk, read and skip CRC
5112 stbi__get32be(s);
5113 }
5114}
5115
5116static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri)
5117{
5118 void *result=NULL;
5119 if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error");
5120 if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) {
5121 if (p->depth <= 8)
5122 ri->bits_per_channel = 8;
5123 else if (p->depth == 16)
5124 ri->bits_per_channel = 16;
5125 else
5126 return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth");
5127 result = p->out;
5128 p->out = NULL;
5129 if (req_comp && req_comp != p->s->img_out_n) {
5130 if (ri->bits_per_channel == 8)
5131 result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
5132 else
5133 result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
5134 p->s->img_out_n = req_comp;
5135 if (result == NULL) return result;
5136 }
5137 *x = p->s->img_x;
5138 *y = p->s->img_y;
5139 if (n) *n = p->s->img_n;
5140 }
5141 STBI_FREE(p->out); p->out = NULL;
5142 STBI_FREE(p->expanded); p->expanded = NULL;
5143 STBI_FREE(p->idata); p->idata = NULL;
5144
5145 return result;
5146}
5147
5148static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
5149{
5150 stbi__png p;
5151 p.s = s;
5152 return stbi__do_png(&p, x,y,comp,req_comp, ri);
5153}
5154
5155static int stbi__png_test(stbi__context *s)
5156{
5157 int r;
5158 r = stbi__check_png_header(s);
5159 stbi__rewind(s);
5160 return r;
5161}
5162
5163static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp)
5164{
5165 if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) {
5166 stbi__rewind( p->s );
5167 return 0;
5168 }
5169 if (x) *x = p->s->img_x;
5170 if (y) *y = p->s->img_y;
5171 if (comp) *comp = p->s->img_n;
5172 return 1;
5173}
5174
5175static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp)
5176{
5177 stbi__png p;
5178 p.s = s;
5179 return stbi__png_info_raw(&p, x, y, comp);
5180}
5181
5182static int stbi__png_is16(stbi__context *s)
5183{
5184 stbi__png p;
5185 p.s = s;
5186 if (!stbi__png_info_raw(&p, NULL, NULL, NULL))
5187 return 0;
5188 if (p.depth != 16) {
5189 stbi__rewind(p.s);
5190 return 0;
5191 }
5192 return 1;
5193}
5194#endif
5195
5196// Microsoft/Windows BMP image
5197
5198#ifndef STBI_NO_BMP
5199static int stbi__bmp_test_raw(stbi__context *s)
5200{
5201 int r;
5202 int sz;
5203 if (stbi__get8(s) != 'B') return 0;
5204 if (stbi__get8(s) != 'M') return 0;
5205 stbi__get32le(s); // discard filesize
5206 stbi__get16le(s); // discard reserved
5207 stbi__get16le(s); // discard reserved
5208 stbi__get32le(s); // discard data offset
5209 sz = stbi__get32le(s);
5210 r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124);
5211 return r;
5212}
5213
5214static int stbi__bmp_test(stbi__context *s)
5215{
5216 int r = stbi__bmp_test_raw(s);
5217 stbi__rewind(s);
5218 return r;
5219}
5220
5221
5222// returns 0..31 for the highest set bit
5223static int stbi__high_bit(unsigned int z)
5224{
5225 int n=0;
5226 if (z == 0) return -1;
5227 if (z >= 0x10000) { n += 16; z >>= 16; }
5228 if (z >= 0x00100) { n += 8; z >>= 8; }
5229 if (z >= 0x00010) { n += 4; z >>= 4; }
5230 if (z >= 0x00004) { n += 2; z >>= 2; }
5231 if (z >= 0x00002) { n += 1;/* >>= 1;*/ }
5232 return n;
5233}
5234
5235static int stbi__bitcount(unsigned int a)
5236{
5237 a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2
5238 a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4
5239 a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits
5240 a = (a + (a >> 8)); // max 16 per 8 bits
5241 a = (a + (a >> 16)); // max 32 per 8 bits
5242 return a & 0xff;
5243}
5244
5245// extract an arbitrarily-aligned N-bit value (N=bits)
5246// from v, and then make it 8-bits long and fractionally
5247// extend it to full full range.
5248static int stbi__shiftsigned(unsigned int v, int shift, int bits)
5249{
5250 static unsigned int mul_table[9] = {
5251 0,
5252 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/,
5253 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/,
5254 };
5255 static unsigned int shift_table[9] = {
5256 0, 0,0,1,0,2,4,6,0,
5257 };
5258 if (shift < 0)
5259 v <<= -shift;
5260 else
5261 v >>= shift;
5262 STBI_ASSERT(v < 256);
5263 v >>= (8-bits);
5264 STBI_ASSERT(bits >= 0 && bits <= 8);
5265 return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits];
5266}
5267
5268typedef struct
5269{
5270 int bpp, offset, hsz;
5271 unsigned int mr,mg,mb,ma, all_a;
5272 int extra_read;
5273} stbi__bmp_data;
5274
5275static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info)
5276{
5277 int hsz;
5278 if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP");
5279 stbi__get32le(s); // discard filesize
5280 stbi__get16le(s); // discard reserved
5281 stbi__get16le(s); // discard reserved
5282 info->offset = stbi__get32le(s);
5283 info->hsz = hsz = stbi__get32le(s);
5284 info->mr = info->mg = info->mb = info->ma = 0;
5285 info->extra_read = 14;
5286
5287 if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP");
5288
5289 if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown");
5290 if (hsz == 12) {
5291 s->img_x = stbi__get16le(s);
5292 s->img_y = stbi__get16le(s);
5293 } else {
5294 s->img_x = stbi__get32le(s);
5295 s->img_y = stbi__get32le(s);
5296 }
5297 if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP");
5298 info->bpp = stbi__get16le(s);
5299 if (hsz != 12) {
5300 int compress = stbi__get32le(s);
5301 if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE");
5302 stbi__get32le(s); // discard sizeof
5303 stbi__get32le(s); // discard hres
5304 stbi__get32le(s); // discard vres
5305 stbi__get32le(s); // discard colorsused
5306 stbi__get32le(s); // discard max important
5307 if (hsz == 40 || hsz == 56) {
5308 if (hsz == 56) {
5309 stbi__get32le(s);
5310 stbi__get32le(s);
5311 stbi__get32le(s);
5312 stbi__get32le(s);
5313 }
5314 if (info->bpp == 16 || info->bpp == 32) {
5315 if (compress == 0) {
5316 if (info->bpp == 32) {
5317 info->mr = 0xffu << 16;
5318 info->mg = 0xffu << 8;
5319 info->mb = 0xffu << 0;
5320 info->ma = 0xffu << 24;
5321 info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0
5322 } else {
5323 info->mr = 31u << 10;
5324 info->mg = 31u << 5;
5325 info->mb = 31u << 0;
5326 }
5327 } else if (compress == 3) {
5328 info->mr = stbi__get32le(s);
5329 info->mg = stbi__get32le(s);
5330 info->mb = stbi__get32le(s);
5331 info->extra_read += 12;
5332 // not documented, but generated by photoshop and handled by mspaint
5333 if (info->mr == info->mg && info->mg == info->mb) {
5334 // ?!?!?
5335 return stbi__errpuc("bad BMP", "bad BMP");
5336 }
5337 } else
5338 return stbi__errpuc("bad BMP", "bad BMP");
5339 }
5340 } else {
5341 int i;
5342 if (hsz != 108 && hsz != 124)
5343 return stbi__errpuc("bad BMP", "bad BMP");
5344 info->mr = stbi__get32le(s);
5345 info->mg = stbi__get32le(s);
5346 info->mb = stbi__get32le(s);
5347 info->ma = stbi__get32le(s);
5348 stbi__get32le(s); // discard color space
5349 for (i=0; i < 12; ++i)
5350 stbi__get32le(s); // discard color space parameters
5351 if (hsz == 124) {
5352 stbi__get32le(s); // discard rendering intent
5353 stbi__get32le(s); // discard offset of profile data
5354 stbi__get32le(s); // discard size of profile data
5355 stbi__get32le(s); // discard reserved
5356 }
5357 }
5358 }
5359 return (void *) 1;
5360}
5361
5362
5363static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
5364{
5365 stbi_uc *out;
5366 unsigned int mr=0,mg=0,mb=0,ma=0, all_a;
5367 stbi_uc pal[256][4];
5368 int psize=0,i,j,width;
5369 int flip_vertically, pad, target;
5370 stbi__bmp_data info;
5371 STBI_NOTUSED(ri);
5372
5373 info.all_a = 255;
5374 if (stbi__bmp_parse_header(s, &info) == NULL)
5375 return NULL; // error code already set
5376
5377 flip_vertically = ((int) s->img_y) > 0;
5378 s->img_y = abs((int) s->img_y);
5379
5380 if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
5381 if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
5382
5383 mr = info.mr;
5384 mg = info.mg;
5385 mb = info.mb;
5386 ma = info.ma;
5387 all_a = info.all_a;
5388
5389 if (info.hsz == 12) {
5390 if (info.bpp < 24)
5391 psize = (info.offset - info.extra_read - 24) / 3;
5392 } else {
5393 if (info.bpp < 16)
5394 psize = (info.offset - info.extra_read - info.hsz) >> 2;
5395 }
5396 if (psize == 0) {
5397 STBI_ASSERT(info.offset == s->callback_already_read + (int) (s->img_buffer - s->img_buffer_original));
5398 if (info.offset != s->callback_already_read + (s->img_buffer - s->buffer_start)) {
5399 return stbi__errpuc("bad offset", "Corrupt BMP");
5400 }
5401 }
5402
5403 if (info.bpp == 24 && ma == 0xff000000)
5404 s->img_n = 3;
5405 else
5406 s->img_n = ma ? 4 : 3;
5407 if (req_comp && req_comp >= 3) // we can directly decode 3 or 4
5408 target = req_comp;
5409 else
5410 target = s->img_n; // if they want monochrome, we'll post-convert
5411
5412 // sanity-check size
5413 if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0))
5414 return stbi__errpuc("too large", "Corrupt BMP");
5415
5416 out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0);
5417 if (!out) return stbi__errpuc("outofmem", "Out of memory");
5418 if (info.bpp < 16) {
5419 int z=0;
5420 if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); }
5421 for (i=0; i < psize; ++i) {
5422 pal[i][2] = stbi__get8(s);
5423 pal[i][1] = stbi__get8(s);
5424 pal[i][0] = stbi__get8(s);
5425 if (info.hsz != 12) stbi__get8(s);
5426 pal[i][3] = 255;
5427 }
5428 stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4));
5429 if (info.bpp == 1) width = (s->img_x + 7) >> 3;
5430 else if (info.bpp == 4) width = (s->img_x + 1) >> 1;
5431 else if (info.bpp == 8) width = s->img_x;
5432 else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); }
5433 pad = (-width)&3;
5434 if (info.bpp == 1) {
5435 for (j=0; j < (int) s->img_y; ++j) {
5436 int bit_offset = 7, v = stbi__get8(s);
5437 for (i=0; i < (int) s->img_x; ++i) {
5438 int color = (v>>bit_offset)&0x1;
5439 out[z++] = pal[color][0];
5440 out[z++] = pal[color][1];
5441 out[z++] = pal[color][2];
5442 if (target == 4) out[z++] = 255;
5443 if (i+1 == (int) s->img_x) break;
5444 if((--bit_offset) < 0) {
5445 bit_offset = 7;
5446 v = stbi__get8(s);
5447 }
5448 }
5449 stbi__skip(s, pad);
5450 }
5451 } else {
5452 for (j=0; j < (int) s->img_y; ++j) {
5453 for (i=0; i < (int) s->img_x; i += 2) {
5454 int v=stbi__get8(s),v2=0;
5455 if (info.bpp == 4) {
5456 v2 = v & 15;
5457 v >>= 4;
5458 }
5459 out[z++] = pal[v][0];
5460 out[z++] = pal[v][1];
5461 out[z++] = pal[v][2];
5462 if (target == 4) out[z++] = 255;
5463 if (i+1 == (int) s->img_x) break;
5464 v = (info.bpp == 8) ? stbi__get8(s) : v2;
5465 out[z++] = pal[v][0];
5466 out[z++] = pal[v][1];
5467 out[z++] = pal[v][2];
5468 if (target == 4) out[z++] = 255;
5469 }
5470 stbi__skip(s, pad);
5471 }
5472 }
5473 } else {
5474 int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0;
5475 int z = 0;
5476 int easy=0;
5477 stbi__skip(s, info.offset - info.extra_read - info.hsz);
5478 if (info.bpp == 24) width = 3 * s->img_x;
5479 else if (info.bpp == 16) width = 2*s->img_x;
5480 else /* bpp = 32 and pad = 0 */ width=0;
5481 pad = (-width) & 3;
5482 if (info.bpp == 24) {
5483 easy = 1;
5484 } else if (info.bpp == 32) {
5485 if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000)
5486 easy = 2;
5487 }
5488 if (!easy) {
5489 if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); }
5490 // right shift amt to put high bit in position #7
5491 rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr);
5492 gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg);
5493 bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb);
5494 ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma);
5495 if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); }
5496 }
5497 for (j=0; j < (int) s->img_y; ++j) {
5498 if (easy) {
5499 for (i=0; i < (int) s->img_x; ++i) {
5500 unsigned char a;
5501 out[z+2] = stbi__get8(s);
5502 out[z+1] = stbi__get8(s);
5503 out[z+0] = stbi__get8(s);
5504 z += 3;
5505 a = (easy == 2 ? stbi__get8(s) : 255);
5506 all_a |= a;
5507 if (target == 4) out[z++] = a;
5508 }
5509 } else {
5510 int bpp = info.bpp;
5511 for (i=0; i < (int) s->img_x; ++i) {
5512 stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s));
5513 unsigned int a;
5514 out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount));
5515 out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount));
5516 out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount));
5517 a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255);
5518 all_a |= a;
5519 if (target == 4) out[z++] = STBI__BYTECAST(a);
5520 }
5521 }
5522 stbi__skip(s, pad);
5523 }
5524 }
5525
5526 // if alpha channel is all 0s, replace with all 255s
5527 if (target == 4 && all_a == 0)
5528 for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4)
5529 out[i] = 255;
5530
5531 if (flip_vertically) {
5532 stbi_uc t;
5533 for (j=0; j < (int) s->img_y>>1; ++j) {
5534 stbi_uc *p1 = out + j *s->img_x*target;
5535 stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target;
5536 for (i=0; i < (int) s->img_x*target; ++i) {
5537 t = p1[i]; p1[i] = p2[i]; p2[i] = t;
5538 }
5539 }
5540 }
5541
5542 if (req_comp && req_comp != target) {
5543 out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y);
5544 if (out == NULL) return out; // stbi__convert_format frees input on failure
5545 }
5546
5547 *x = s->img_x;
5548 *y = s->img_y;
5549 if (comp) *comp = s->img_n;
5550 return out;
5551}
5552#endif
5553
5554// Targa Truevision - TGA
5555// by Jonathan Dummer
5556#ifndef STBI_NO_TGA
5557// returns STBI_rgb or whatever, 0 on error
5558static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16)
5559{
5560 // only RGB or RGBA (incl. 16bit) or grey allowed
5561 if (is_rgb16) *is_rgb16 = 0;
5562 switch(bits_per_pixel) {
5563 case 8: return STBI_grey;
5564 case 16: if(is_grey) return STBI_grey_alpha;
5565 // fallthrough
5566 case 15: if(is_rgb16) *is_rgb16 = 1;
5567 return STBI_rgb;
5568 case 24: // fallthrough
5569 case 32: return bits_per_pixel/8;
5570 default: return 0;
5571 }
5572}
5573
5574static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp)
5575{
5576 int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp;
5577 int sz, tga_colormap_type;
5578 stbi__get8(s); // discard Offset
5579 tga_colormap_type = stbi__get8(s); // colormap type
5580 if( tga_colormap_type > 1 ) {
5581 stbi__rewind(s);
5582 return 0; // only RGB or indexed allowed
5583 }
5584 tga_image_type = stbi__get8(s); // image type
5585 if ( tga_colormap_type == 1 ) { // colormapped (paletted) image
5586 if (tga_image_type != 1 && tga_image_type != 9) {
5587 stbi__rewind(s);
5588 return 0;
5589 }
5590 stbi__skip(s,4); // skip index of first colormap entry and number of entries
5591 sz = stbi__get8(s); // check bits per palette color entry
5592 if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) {
5593 stbi__rewind(s);
5594 return 0;
5595 }
5596 stbi__skip(s,4); // skip image x and y origin
5597 tga_colormap_bpp = sz;
5598 } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE
5599 if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) {
5600 stbi__rewind(s);
5601 return 0; // only RGB or grey allowed, +/- RLE
5602 }
5603 stbi__skip(s,9); // skip colormap specification and image x/y origin
5604 tga_colormap_bpp = 0;
5605 }
5606 tga_w = stbi__get16le(s);
5607 if( tga_w < 1 ) {
5608 stbi__rewind(s);
5609 return 0; // test width
5610 }
5611 tga_h = stbi__get16le(s);
5612 if( tga_h < 1 ) {
5613 stbi__rewind(s);
5614 return 0; // test height
5615 }
5616 tga_bits_per_pixel = stbi__get8(s); // bits per pixel
5617 stbi__get8(s); // ignore alpha bits
5618 if (tga_colormap_bpp != 0) {
5619 if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) {
5620 // when using a colormap, tga_bits_per_pixel is the size of the indexes
5621 // I don't think anything but 8 or 16bit indexes makes sense
5622 stbi__rewind(s);
5623 return 0;
5624 }
5625 tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL);
5626 } else {
5627 tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL);
5628 }
5629 if(!tga_comp) {
5630 stbi__rewind(s);
5631 return 0;
5632 }
5633 if (x) *x = tga_w;
5634 if (y) *y = tga_h;
5635 if (comp) *comp = tga_comp;
5636 return 1; // seems to have passed everything
5637}
5638
5639static int stbi__tga_test(stbi__context *s)
5640{
5641 int res = 0;
5642 int sz, tga_color_type;
5643 stbi__get8(s); // discard Offset
5644 tga_color_type = stbi__get8(s); // color type
5645 if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed
5646 sz = stbi__get8(s); // image type
5647 if ( tga_color_type == 1 ) { // colormapped (paletted) image
5648 if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9
5649 stbi__skip(s,4); // skip index of first colormap entry and number of entries
5650 sz = stbi__get8(s); // check bits per palette color entry
5651 if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd;
5652 stbi__skip(s,4); // skip image x and y origin
5653 } else { // "normal" image w/o colormap
5654 if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE
5655 stbi__skip(s,9); // skip colormap specification and image x/y origin
5656 }
5657 if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width
5658 if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height
5659 sz = stbi__get8(s); // bits per pixel
5660 if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index
5661 if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd;
5662
5663 res = 1; // if we got this far, everything's good and we can return 1 instead of 0
5664
5665errorEnd:
5666 stbi__rewind(s);
5667 return res;
5668}
5669
5670// read 16bit value and convert to 24bit RGB
5671static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out)
5672{
5673 stbi__uint16 px = (stbi__uint16)stbi__get16le(s);
5674 stbi__uint16 fiveBitMask = 31;
5675 // we have 3 channels with 5bits each
5676 int r = (px >> 10) & fiveBitMask;
5677 int g = (px >> 5) & fiveBitMask;
5678 int b = px & fiveBitMask;
5679 // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later
5680 out[0] = (stbi_uc)((r * 255)/31);
5681 out[1] = (stbi_uc)((g * 255)/31);
5682 out[2] = (stbi_uc)((b * 255)/31);
5683
5684 // some people claim that the most significant bit might be used for alpha
5685 // (possibly if an alpha-bit is set in the "image descriptor byte")
5686 // but that only made 16bit test images completely translucent..
5687 // so let's treat all 15 and 16bit TGAs as RGB with no alpha.
5688}
5689
5690static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
5691{
5692 // read in the TGA header stuff
5693 int tga_offset = stbi__get8(s);
5694 int tga_indexed = stbi__get8(s);
5695 int tga_image_type = stbi__get8(s);
5696 int tga_is_RLE = 0;
5697 int tga_palette_start = stbi__get16le(s);
5698 int tga_palette_len = stbi__get16le(s);
5699 int tga_palette_bits = stbi__get8(s);
5700 int tga_x_origin = stbi__get16le(s);
5701 int tga_y_origin = stbi__get16le(s);
5702 int tga_width = stbi__get16le(s);
5703 int tga_height = stbi__get16le(s);
5704 int tga_bits_per_pixel = stbi__get8(s);
5705 int tga_comp, tga_rgb16=0;
5706 int tga_inverted = stbi__get8(s);
5707 // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?)
5708 // image data
5709 unsigned char *tga_data;
5710 unsigned char *tga_palette = NULL;
5711 int i, j;
5712 unsigned char raw_data[4] = {0};
5713 int RLE_count = 0;
5714 int RLE_repeating = 0;
5715 int read_next_pixel = 1;
5716 STBI_NOTUSED(ri);
5717 STBI_NOTUSED(tga_x_origin); // @TODO
5718 STBI_NOTUSED(tga_y_origin); // @TODO
5719
5720 if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
5721 if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
5722
5723 // do a tiny bit of precessing
5724 if ( tga_image_type >= 8 )
5725 {
5726 tga_image_type -= 8;
5727 tga_is_RLE = 1;
5728 }
5729 tga_inverted = 1 - ((tga_inverted >> 5) & 1);
5730
5731 // If I'm paletted, then I'll use the number of bits from the palette
5732 if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16);
5733 else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16);
5734
5735 if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency
5736 return stbi__errpuc("bad format", "Can't find out TGA pixelformat");
5737
5738 // tga info
5739 *x = tga_width;
5740 *y = tga_height;
5741 if (comp) *comp = tga_comp;
5742
5743 if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0))
5744 return stbi__errpuc("too large", "Corrupt TGA");
5745
5746 tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0);
5747 if (!tga_data) return stbi__errpuc("outofmem", "Out of memory");
5748
5749 // skip to the data's starting position (offset usually = 0)
5750 stbi__skip(s, tga_offset );
5751
5752 if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) {
5753 for (i=0; i < tga_height; ++i) {
5754 int row = tga_inverted ? tga_height -i - 1 : i;
5755 stbi_uc *tga_row = tga_data + row*tga_width*tga_comp;
5756 stbi__getn(s, tga_row, tga_width * tga_comp);
5757 }
5758 } else {
5759 // do I need to load a palette?
5760 if ( tga_indexed)
5761 {
5762 if (tga_palette_len == 0) { /* you have to have at least one entry! */
5763 STBI_FREE(tga_data);
5764 return stbi__errpuc("bad palette", "Corrupt TGA");
5765 }
5766
5767 // any data to skip? (offset usually = 0)
5768 stbi__skip(s, tga_palette_start );
5769 // load the palette
5770 tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0);
5771 if (!tga_palette) {
5772 STBI_FREE(tga_data);
5773 return stbi__errpuc("outofmem", "Out of memory");
5774 }
5775 if (tga_rgb16) {
5776 stbi_uc *pal_entry = tga_palette;
5777 STBI_ASSERT(tga_comp == STBI_rgb);
5778 for (i=0; i < tga_palette_len; ++i) {
5779 stbi__tga_read_rgb16(s, pal_entry);
5780 pal_entry += tga_comp;
5781 }
5782 } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) {
5783 STBI_FREE(tga_data);
5784 STBI_FREE(tga_palette);
5785 return stbi__errpuc("bad palette", "Corrupt TGA");
5786 }
5787 }
5788 // load the data
5789 for (i=0; i < tga_width * tga_height; ++i)
5790 {
5791 // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk?
5792 if ( tga_is_RLE )
5793 {
5794 if ( RLE_count == 0 )
5795 {
5796 // yep, get the next byte as a RLE command
5797 int RLE_cmd = stbi__get8(s);
5798 RLE_count = 1 + (RLE_cmd & 127);
5799 RLE_repeating = RLE_cmd >> 7;
5800 read_next_pixel = 1;
5801 } else if ( !RLE_repeating )
5802 {
5803 read_next_pixel = 1;
5804 }
5805 } else
5806 {
5807 read_next_pixel = 1;
5808 }
5809 // OK, if I need to read a pixel, do it now
5810 if ( read_next_pixel )
5811 {
5812 // load however much data we did have
5813 if ( tga_indexed )
5814 {
5815 // read in index, then perform the lookup
5816 int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s);
5817 if ( pal_idx >= tga_palette_len ) {
5818 // invalid index
5819 pal_idx = 0;
5820 }
5821 pal_idx *= tga_comp;
5822 for (j = 0; j < tga_comp; ++j) {
5823 raw_data[j] = tga_palette[pal_idx+j];
5824 }
5825 } else if(tga_rgb16) {
5826 STBI_ASSERT(tga_comp == STBI_rgb);
5827 stbi__tga_read_rgb16(s, raw_data);
5828 } else {
5829 // read in the data raw
5830 for (j = 0; j < tga_comp; ++j) {
5831 raw_data[j] = stbi__get8(s);
5832 }
5833 }
5834 // clear the reading flag for the next pixel
5835 read_next_pixel = 0;
5836 } // end of reading a pixel
5837
5838 // copy data
5839 for (j = 0; j < tga_comp; ++j)
5840 tga_data[i*tga_comp+j] = raw_data[j];
5841
5842 // in case we're in RLE mode, keep counting down
5843 --RLE_count;
5844 }
5845 // do I need to invert the image?
5846 if ( tga_inverted )
5847 {
5848 for (j = 0; j*2 < tga_height; ++j)
5849 {
5850 int index1 = j * tga_width * tga_comp;
5851 int index2 = (tga_height - 1 - j) * tga_width * tga_comp;
5852 for (i = tga_width * tga_comp; i > 0; --i)
5853 {
5854 unsigned char temp = tga_data[index1];
5855 tga_data[index1] = tga_data[index2];
5856 tga_data[index2] = temp;
5857 ++index1;
5858 ++index2;
5859 }
5860 }
5861 }
5862 // clear my palette, if I had one
5863 if ( tga_palette != NULL )
5864 {
5865 STBI_FREE( tga_palette );
5866 }
5867 }
5868
5869 // swap RGB - if the source data was RGB16, it already is in the right order
5870 if (tga_comp >= 3 && !tga_rgb16)
5871 {
5872 unsigned char* tga_pixel = tga_data;
5873 for (i=0; i < tga_width * tga_height; ++i)
5874 {
5875 unsigned char temp = tga_pixel[0];
5876 tga_pixel[0] = tga_pixel[2];
5877 tga_pixel[2] = temp;
5878 tga_pixel += tga_comp;
5879 }
5880 }
5881
5882 // convert to target component count
5883 if (req_comp && req_comp != tga_comp)
5884 tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height);
5885
5886 // the things I do to get rid of an error message, and yet keep
5887 // Microsoft's C compilers happy... [8^(
5888 tga_palette_start = tga_palette_len = tga_palette_bits =
5889 tga_x_origin = tga_y_origin = 0;
5890 STBI_NOTUSED(tga_palette_start);
5891 // OK, done
5892 return tga_data;
5893}
5894#endif
5895
5896// *************************************************************************************************
5897// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB
5898
5899#ifndef STBI_NO_PSD
5900static int stbi__psd_test(stbi__context *s)
5901{
5902 int r = (stbi__get32be(s) == 0x38425053);
5903 stbi__rewind(s);
5904 return r;
5905}
5906
5907static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount)
5908{
5909 int count, nleft, len;
5910
5911 count = 0;
5912 while ((nleft = pixelCount - count) > 0) {
5913 len = stbi__get8(s);
5914 if (len == 128) {
5915 // No-op.
5916 } else if (len < 128) {
5917 // Copy next len+1 bytes literally.
5918 len++;
5919 if (len > nleft) return 0; // corrupt data
5920 count += len;
5921 while (len) {
5922 *p = stbi__get8(s);
5923 p += 4;
5924 len--;
5925 }
5926 } else if (len > 128) {
5927 stbi_uc val;
5928 // Next -len+1 bytes in the dest are replicated from next source byte.
5929 // (Interpret len as a negative 8-bit int.)
5930 len = 257 - len;
5931 if (len > nleft) return 0; // corrupt data
5932 val = stbi__get8(s);
5933 count += len;
5934 while (len) {
5935 *p = val;
5936 p += 4;
5937 len--;
5938 }
5939 }
5940 }
5941
5942 return 1;
5943}
5944
5945static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc)
5946{
5947 int pixelCount;
5948 int channelCount, compression;
5949 int channel, i;
5950 int bitdepth;
5951 int w,h;
5952 stbi_uc *out;
5953 STBI_NOTUSED(ri);
5954
5955 // Check identifier
5956 if (stbi__get32be(s) != 0x38425053) // "8BPS"
5957 return stbi__errpuc("not PSD", "Corrupt PSD image");
5958
5959 // Check file type version.
5960 if (stbi__get16be(s) != 1)
5961 return stbi__errpuc("wrong version", "Unsupported version of PSD image");
5962
5963 // Skip 6 reserved bytes.
5964 stbi__skip(s, 6 );
5965
5966 // Read the number of channels (R, G, B, A, etc).
5967 channelCount = stbi__get16be(s);
5968 if (channelCount < 0 || channelCount > 16)
5969 return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image");
5970
5971 // Read the rows and columns of the image.
5972 h = stbi__get32be(s);
5973 w = stbi__get32be(s);
5974
5975 if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
5976 if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
5977
5978 // Make sure the depth is 8 bits.
5979 bitdepth = stbi__get16be(s);
5980 if (bitdepth != 8 && bitdepth != 16)
5981 return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit");
5982
5983 // Make sure the color mode is RGB.
5984 // Valid options are:
5985 // 0: Bitmap
5986 // 1: Grayscale
5987 // 2: Indexed color
5988 // 3: RGB color
5989 // 4: CMYK color
5990 // 7: Multichannel
5991 // 8: Duotone
5992 // 9: Lab color
5993 if (stbi__get16be(s) != 3)
5994 return stbi__errpuc("wrong color format", "PSD is not in RGB color format");
5995
5996 // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.)
5997 stbi__skip(s,stbi__get32be(s) );
5998
5999 // Skip the image resources. (resolution, pen tool paths, etc)
6000 stbi__skip(s, stbi__get32be(s) );
6001
6002 // Skip the reserved data.
6003 stbi__skip(s, stbi__get32be(s) );
6004
6005 // Find out if the data is compressed.
6006 // Known values:
6007 // 0: no compression
6008 // 1: RLE compressed
6009 compression = stbi__get16be(s);
6010 if (compression > 1)
6011 return stbi__errpuc("bad compression", "PSD has an unknown compression format");
6012
6013 // Check size
6014 if (!stbi__mad3sizes_valid(4, w, h, 0))
6015 return stbi__errpuc("too large", "Corrupt PSD");
6016
6017 // Create the destination image.
6018
6019 if (!compression && bitdepth == 16 && bpc == 16) {
6020 out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0);
6021 ri->bits_per_channel = 16;
6022 } else
6023 out = (stbi_uc *) stbi__malloc(4 * w*h);
6024
6025 if (!out) return stbi__errpuc("outofmem", "Out of memory");
6026 pixelCount = w*h;
6027
6028 // Initialize the data to zero.
6029 //memset( out, 0, pixelCount * 4 );
6030
6031 // Finally, the image data.
6032 if (compression) {
6033 // RLE as used by .PSD and .TIFF
6034 // Loop until you get the number of unpacked bytes you are expecting:
6035 // Read the next source byte into n.
6036 // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally.
6037 // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times.
6038 // Else if n is 128, noop.
6039 // Endloop
6040
6041 // The RLE-compressed data is preceded by a 2-byte data count for each row in the data,
6042 // which we're going to just skip.
6043 stbi__skip(s, h * channelCount * 2 );
6044
6045 // Read the RLE data by channel.
6046 for (channel = 0; channel < 4; channel++) {
6047 stbi_uc *p;
6048
6049 p = out+channel;
6050 if (channel >= channelCount) {
6051 // Fill this channel with default data.
6052 for (i = 0; i < pixelCount; i++, p += 4)
6053 *p = (channel == 3 ? 255 : 0);
6054 } else {
6055 // Read the RLE data.
6056 if (!stbi__psd_decode_rle(s, p, pixelCount)) {
6057 STBI_FREE(out);
6058 return stbi__errpuc("corrupt", "bad RLE data");
6059 }
6060 }
6061 }
6062
6063 } else {
6064 // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...)
6065 // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image.
6066
6067 // Read the data by channel.
6068 for (channel = 0; channel < 4; channel++) {
6069 if (channel >= channelCount) {
6070 // Fill this channel with default data.
6071 if (bitdepth == 16 && bpc == 16) {
6072 stbi__uint16 *q = ((stbi__uint16 *) out) + channel;
6073 stbi__uint16 val = channel == 3 ? 65535 : 0;
6074 for (i = 0; i < pixelCount; i++, q += 4)
6075 *q = val;
6076 } else {
6077 stbi_uc *p = out+channel;
6078 stbi_uc val = channel == 3 ? 255 : 0;
6079 for (i = 0; i < pixelCount; i++, p += 4)
6080 *p = val;
6081 }
6082 } else {
6083 if (ri->bits_per_channel == 16) { // output bpc
6084 stbi__uint16 *q = ((stbi__uint16 *) out) + channel;
6085 for (i = 0; i < pixelCount; i++, q += 4)
6086 *q = (stbi__uint16) stbi__get16be(s);
6087 } else {
6088 stbi_uc *p = out+channel;
6089 if (bitdepth == 16) { // input bpc
6090 for (i = 0; i < pixelCount; i++, p += 4)
6091 *p = (stbi_uc) (stbi__get16be(s) >> 8);
6092 } else {
6093 for (i = 0; i < pixelCount; i++, p += 4)
6094 *p = stbi__get8(s);
6095 }
6096 }
6097 }
6098 }
6099 }
6100
6101 // remove weird white matte from PSD
6102 if (channelCount >= 4) {
6103 if (ri->bits_per_channel == 16) {
6104 for (i=0; i < w*h; ++i) {
6105 stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i;
6106 if (pixel[3] != 0 && pixel[3] != 65535) {
6107 float a = pixel[3] / 65535.0f;
6108 float ra = 1.0f / a;
6109 float inv_a = 65535.0f * (1 - ra);
6110 pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a);
6111 pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a);
6112 pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a);
6113 }
6114 }
6115 } else {
6116 for (i=0; i < w*h; ++i) {
6117 unsigned char *pixel = out + 4*i;
6118 if (pixel[3] != 0 && pixel[3] != 255) {
6119 float a = pixel[3] / 255.0f;
6120 float ra = 1.0f / a;
6121 float inv_a = 255.0f * (1 - ra);
6122 pixel[0] = (unsigned char) (pixel[0]*ra + inv_a);
6123 pixel[1] = (unsigned char) (pixel[1]*ra + inv_a);
6124 pixel[2] = (unsigned char) (pixel[2]*ra + inv_a);
6125 }
6126 }
6127 }
6128 }
6129
6130 // convert to desired output format
6131 if (req_comp && req_comp != 4) {
6132 if (ri->bits_per_channel == 16)
6133 out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h);
6134 else
6135 out = stbi__convert_format(out, 4, req_comp, w, h);
6136 if (out == NULL) return out; // stbi__convert_format frees input on failure
6137 }
6138
6139 if (comp) *comp = 4;
6140 *y = h;
6141 *x = w;
6142
6143 return out;
6144}
6145#endif
6146
6147// *************************************************************************************************
6148// Softimage PIC loader
6149// by Tom Seddon
6150//
6151// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format
6152// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/
6153
6154#ifndef STBI_NO_PIC
6155static int stbi__pic_is4(stbi__context *s,const char *str)
6156{
6157 int i;
6158 for (i=0; i<4; ++i)
6159 if (stbi__get8(s) != (stbi_uc)str[i])
6160 return 0;
6161
6162 return 1;
6163}
6164
6165static int stbi__pic_test_core(stbi__context *s)
6166{
6167 int i;
6168
6169 if (!stbi__pic_is4(s,"\x53\x80\xF6\x34"))
6170 return 0;
6171
6172 for(i=0;i<84;++i)
6173 stbi__get8(s);
6174
6175 if (!stbi__pic_is4(s,"PICT"))
6176 return 0;
6177
6178 return 1;
6179}
6180
6181typedef struct
6182{
6183 stbi_uc size,type,channel;
6184} stbi__pic_packet;
6185
6186static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest)
6187{
6188 int mask=0x80, i;
6189
6190 for (i=0; i<4; ++i, mask>>=1) {
6191 if (channel & mask) {
6192 if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short");
6193 dest[i]=stbi__get8(s);
6194 }
6195 }
6196
6197 return dest;
6198}
6199
6200static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src)
6201{
6202 int mask=0x80,i;
6203
6204 for (i=0;i<4; ++i, mask>>=1)
6205 if (channel&mask)
6206 dest[i]=src[i];
6207}
6208
6209static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result)
6210{
6211 int act_comp=0,num_packets=0,y,chained;
6212 stbi__pic_packet packets[10];
6213
6214 // this will (should...) cater for even some bizarre stuff like having data
6215 // for the same channel in multiple packets.
6216 do {
6217 stbi__pic_packet *packet;
6218
6219 if (num_packets==sizeof(packets)/sizeof(packets[0]))
6220 return stbi__errpuc("bad format","too many packets");
6221
6222 packet = &packets[num_packets++];
6223
6224 chained = stbi__get8(s);
6225 packet->size = stbi__get8(s);
6226 packet->type = stbi__get8(s);
6227 packet->channel = stbi__get8(s);
6228
6229 act_comp |= packet->channel;
6230
6231 if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)");
6232 if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp");
6233 } while (chained);
6234
6235 *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel?
6236
6237 for(y=0; y<height; ++y) {
6238 int packet_idx;
6239
6240 for(packet_idx=0; packet_idx < num_packets; ++packet_idx) {
6241 stbi__pic_packet *packet = &packets[packet_idx];
6242 stbi_uc *dest = result+y*width*4;
6243
6244 switch (packet->type) {
6245 default:
6246 return stbi__errpuc("bad format","packet has bad compression type");
6247
6248 case 0: {//uncompressed
6249 int x;
6250
6251 for(x=0;x<width;++x, dest+=4)
6252 if (!stbi__readval(s,packet->channel,dest))
6253 return 0;
6254 break;
6255 }
6256
6257 case 1://Pure RLE
6258 {
6259 int left=width, i;
6260
6261 while (left>0) {
6262 stbi_uc count,value[4];
6263
6264 count=stbi__get8(s);
6265 if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)");
6266
6267 if (count > left)
6268 count = (stbi_uc) left;
6269
6270 if (!stbi__readval(s,packet->channel,value)) return 0;
6271
6272 for(i=0; i<count; ++i,dest+=4)
6273 stbi__copyval(packet->channel,dest,value);
6274 left -= count;
6275 }
6276 }
6277 break;
6278
6279 case 2: {//Mixed RLE
6280 int left=width;
6281 while (left>0) {
6282 int count = stbi__get8(s), i;
6283 if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)");
6284
6285 if (count >= 128) { // Repeated
6286 stbi_uc value[4];
6287
6288 if (count==128)
6289 count = stbi__get16be(s);
6290 else
6291 count -= 127;
6292 if (count > left)
6293 return stbi__errpuc("bad file","scanline overrun");
6294
6295 if (!stbi__readval(s,packet->channel,value))
6296 return 0;
6297
6298 for(i=0;i<count;++i, dest += 4)
6299 stbi__copyval(packet->channel,dest,value);
6300 } else { // Raw
6301 ++count;
6302 if (count>left) return stbi__errpuc("bad file","scanline overrun");
6303
6304 for(i=0;i<count;++i, dest+=4)
6305 if (!stbi__readval(s,packet->channel,dest))
6306 return 0;
6307 }
6308 left-=count;
6309 }
6310 break;
6311 }
6312 }
6313 }
6314 }
6315
6316 return result;
6317}
6318
6319static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri)
6320{
6321 stbi_uc *result;
6322 int i, x,y, internal_comp;
6323 STBI_NOTUSED(ri);
6324
6325 if (!comp) comp = &internal_comp;
6326
6327 for (i=0; i<92; ++i)
6328 stbi__get8(s);
6329
6330 x = stbi__get16be(s);
6331 y = stbi__get16be(s);
6332
6333 if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
6334 if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
6335
6336 if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)");
6337 if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode");
6338
6339 stbi__get32be(s); //skip `ratio'
6340 stbi__get16be(s); //skip `fields'
6341 stbi__get16be(s); //skip `pad'
6342
6343 // intermediate buffer is RGBA
6344 result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0);
6345 memset(result, 0xff, x*y*4);
6346
6347 if (!stbi__pic_load_core(s,x,y,comp, result)) {
6348 STBI_FREE(result);
6349 result=0;
6350 }
6351 *px = x;
6352 *py = y;
6353 if (req_comp == 0) req_comp = *comp;
6354 result=stbi__convert_format(result,4,req_comp,x,y);
6355
6356 return result;
6357}
6358
6359static int stbi__pic_test(stbi__context *s)
6360{
6361 int r = stbi__pic_test_core(s);
6362 stbi__rewind(s);
6363 return r;
6364}
6365#endif
6366
6367// *************************************************************************************************
6368// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb
6369
6370#ifndef STBI_NO_GIF
6371typedef struct
6372{
6373 stbi__int16 prefix;
6374 stbi_uc first;
6375 stbi_uc suffix;
6376} stbi__gif_lzw;
6377
6378typedef struct
6379{
6380 int w,h;
6381 stbi_uc *out; // output buffer (always 4 components)
6382 stbi_uc *background; // The current "background" as far as a gif is concerned
6383 stbi_uc *history;
6384 int flags, bgindex, ratio, transparent, eflags;
6385 stbi_uc pal[256][4];
6386 stbi_uc lpal[256][4];
6387 stbi__gif_lzw codes[8192];
6388 stbi_uc *color_table;
6389 int parse, step;
6390 int lflags;
6391 int start_x, start_y;
6392 int max_x, max_y;
6393 int cur_x, cur_y;
6394 int line_size;
6395 int delay;
6396} stbi__gif;
6397
6398static int stbi__gif_test_raw(stbi__context *s)
6399{
6400 int sz;
6401 if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0;
6402 sz = stbi__get8(s);
6403 if (sz != '9' && sz != '7') return 0;
6404 if (stbi__get8(s) != 'a') return 0;
6405 return 1;
6406}
6407
6408static int stbi__gif_test(stbi__context *s)
6409{
6410 int r = stbi__gif_test_raw(s);
6411 stbi__rewind(s);
6412 return r;
6413}
6414
6415static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp)
6416{
6417 int i;
6418 for (i=0; i < num_entries; ++i) {
6419 pal[i][2] = stbi__get8(s);
6420 pal[i][1] = stbi__get8(s);
6421 pal[i][0] = stbi__get8(s);
6422 pal[i][3] = transp == i ? 0 : 255;
6423 }
6424}
6425
6426static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info)
6427{
6428 stbi_uc version;
6429 if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8')
6430 return stbi__err("not GIF", "Corrupt GIF");
6431
6432 version = stbi__get8(s);
6433 if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF");
6434 if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF");
6435
6436 stbi__g_failure_reason = "";
6437 g->w = stbi__get16le(s);
6438 g->h = stbi__get16le(s);
6439 g->flags = stbi__get8(s);
6440 g->bgindex = stbi__get8(s);
6441 g->ratio = stbi__get8(s);
6442 g->transparent = -1;
6443
6444 if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)");
6445 if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)");
6446
6447 if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments
6448
6449 if (is_info) return 1;
6450
6451 if (g->flags & 0x80)
6452 stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1);
6453
6454 return 1;
6455}
6456
6457static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp)
6458{
6459 stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif));
6460 if (!stbi__gif_header(s, g, comp, 1)) {
6461 STBI_FREE(g);
6462 stbi__rewind( s );
6463 return 0;
6464 }
6465 if (x) *x = g->w;
6466 if (y) *y = g->h;
6467 STBI_FREE(g);
6468 return 1;
6469}
6470
6471static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code)
6472{
6473 stbi_uc *p, *c;
6474 int idx;
6475
6476 // recurse to decode the prefixes, since the linked-list is backwards,
6477 // and working backwards through an interleaved image would be nasty
6478 if (g->codes[code].prefix >= 0)
6479 stbi__out_gif_code(g, g->codes[code].prefix);
6480
6481 if (g->cur_y >= g->max_y) return;
6482
6483 idx = g->cur_x + g->cur_y;
6484 p = &g->out[idx];
6485 g->history[idx / 4] = 1;
6486
6487 c = &g->color_table[g->codes[code].suffix * 4];
6488 if (c[3] > 128) { // don't render transparent pixels;
6489 p[0] = c[2];
6490 p[1] = c[1];
6491 p[2] = c[0];
6492 p[3] = c[3];
6493 }
6494 g->cur_x += 4;
6495
6496 if (g->cur_x >= g->max_x) {
6497 g->cur_x = g->start_x;
6498 g->cur_y += g->step;
6499
6500 while (g->cur_y >= g->max_y && g->parse > 0) {
6501 g->step = (1 << g->parse) * g->line_size;
6502 g->cur_y = g->start_y + (g->step >> 1);
6503 --g->parse;
6504 }
6505 }
6506}
6507
6508static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g)
6509{
6510 stbi_uc lzw_cs;
6511 stbi__int32 len, init_code;
6512 stbi__uint32 first;
6513 stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear;
6514 stbi__gif_lzw *p;
6515
6516 lzw_cs = stbi__get8(s);
6517 if (lzw_cs > 12) return NULL;
6518 clear = 1 << lzw_cs;
6519 first = 1;
6520 codesize = lzw_cs + 1;
6521 codemask = (1 << codesize) - 1;
6522 bits = 0;
6523 valid_bits = 0;
6524 for (init_code = 0; init_code < clear; init_code++) {
6525 g->codes[init_code].prefix = -1;
6526 g->codes[init_code].first = (stbi_uc) init_code;
6527 g->codes[init_code].suffix = (stbi_uc) init_code;
6528 }
6529
6530 // support no starting clear code
6531 avail = clear+2;
6532 oldcode = -1;
6533
6534 len = 0;
6535 for(;;) {
6536 if (valid_bits < codesize) {
6537 if (len == 0) {
6538 len = stbi__get8(s); // start new block
6539 if (len == 0)
6540 return g->out;
6541 }
6542 --len;
6543 bits |= (stbi__int32) stbi__get8(s) << valid_bits;
6544 valid_bits += 8;
6545 } else {
6546 stbi__int32 code = bits & codemask;
6547 bits >>= codesize;
6548 valid_bits -= codesize;
6549 // @OPTIMIZE: is there some way we can accelerate the non-clear path?
6550 if (code == clear) { // clear code
6551 codesize = lzw_cs + 1;
6552 codemask = (1 << codesize) - 1;
6553 avail = clear + 2;
6554 oldcode = -1;
6555 first = 0;
6556 } else if (code == clear + 1) { // end of stream code
6557 stbi__skip(s, len);
6558 while ((len = stbi__get8(s)) > 0)
6559 stbi__skip(s,len);
6560 return g->out;
6561 } else if (code <= avail) {
6562 if (first) {
6563 return stbi__errpuc("no clear code", "Corrupt GIF");
6564 }
6565
6566 if (oldcode >= 0) {
6567 p = &g->codes[avail++];
6568 if (avail > 8192) {
6569 return stbi__errpuc("too many codes", "Corrupt GIF");
6570 }
6571
6572 p->prefix = (stbi__int16) oldcode;
6573 p->first = g->codes[oldcode].first;
6574 p->suffix = (code == avail) ? p->first : g->codes[code].first;
6575 } else if (code == avail)
6576 return stbi__errpuc("illegal code in raster", "Corrupt GIF");
6577
6578 stbi__out_gif_code(g, (stbi__uint16) code);
6579
6580 if ((avail & codemask) == 0 && avail <= 0x0FFF) {
6581 codesize++;
6582 codemask = (1 << codesize) - 1;
6583 }
6584
6585 oldcode = code;
6586 } else {
6587 return stbi__errpuc("illegal code in raster", "Corrupt GIF");
6588 }
6589 }
6590 }
6591}
6592
6593// this function is designed to support animated gifs, although stb_image doesn't support it
6594// two back is the image from two frames ago, used for a very specific disposal format
6595static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back)
6596{
6597 int dispose;
6598 int first_frame;
6599 int pi;
6600 int pcount;
6601 STBI_NOTUSED(req_comp);
6602
6603 // on first frame, any non-written pixels get the background colour (non-transparent)
6604 first_frame = 0;
6605 if (g->out == 0) {
6606 if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header
6607 if (!stbi__mad3sizes_valid(4, g->w, g->h, 0))
6608 return stbi__errpuc("too large", "GIF image is too large");
6609 pcount = g->w * g->h;
6610 g->out = (stbi_uc *) stbi__malloc(4 * pcount);
6611 g->background = (stbi_uc *) stbi__malloc(4 * pcount);
6612 g->history = (stbi_uc *) stbi__malloc(pcount);
6613 if (!g->out || !g->background || !g->history)
6614 return stbi__errpuc("outofmem", "Out of memory");
6615
6616 // image is treated as "transparent" at the start - ie, nothing overwrites the current background;
6617 // background colour is only used for pixels that are not rendered first frame, after that "background"
6618 // color refers to the color that was there the previous frame.
6619 memset(g->out, 0x00, 4 * pcount);
6620 memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent)
6621 memset(g->history, 0x00, pcount); // pixels that were affected previous frame
6622 first_frame = 1;
6623 } else {
6624 // second frame - how do we dispose of the previous one?
6625 dispose = (g->eflags & 0x1C) >> 2;
6626 pcount = g->w * g->h;
6627
6628 if ((dispose == 3) && (two_back == 0)) {
6629 dispose = 2; // if I don't have an image to revert back to, default to the old background
6630 }
6631
6632 if (dispose == 3) { // use previous graphic
6633 for (pi = 0; pi < pcount; ++pi) {
6634 if (g->history[pi]) {
6635 memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 );
6636 }
6637 }
6638 } else if (dispose == 2) {
6639 // restore what was changed last frame to background before that frame;
6640 for (pi = 0; pi < pcount; ++pi) {
6641 if (g->history[pi]) {
6642 memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 );
6643 }
6644 }
6645 } else {
6646 // This is a non-disposal case eithe way, so just
6647 // leave the pixels as is, and they will become the new background
6648 // 1: do not dispose
6649 // 0: not specified.
6650 }
6651
6652 // background is what out is after the undoing of the previou frame;
6653 memcpy( g->background, g->out, 4 * g->w * g->h );
6654 }
6655
6656 // clear my history;
6657 memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame
6658
6659 for (;;) {
6660 int tag = stbi__get8(s);
6661 switch (tag) {
6662 case 0x2C: /* Image Descriptor */
6663 {
6664 stbi__int32 x, y, w, h;
6665 stbi_uc *o;
6666
6667 x = stbi__get16le(s);
6668 y = stbi__get16le(s);
6669 w = stbi__get16le(s);
6670 h = stbi__get16le(s);
6671 if (((x + w) > (g->w)) || ((y + h) > (g->h)))
6672 return stbi__errpuc("bad Image Descriptor", "Corrupt GIF");
6673
6674 g->line_size = g->w * 4;
6675 g->start_x = x * 4;
6676 g->start_y = y * g->line_size;
6677 g->max_x = g->start_x + w * 4;
6678 g->max_y = g->start_y + h * g->line_size;
6679 g->cur_x = g->start_x;
6680 g->cur_y = g->start_y;
6681
6682 // if the width of the specified rectangle is 0, that means
6683 // we may not see *any* pixels or the image is malformed;
6684 // to make sure this is caught, move the current y down to
6685 // max_y (which is what out_gif_code checks).
6686 if (w == 0)
6687 g->cur_y = g->max_y;
6688
6689 g->lflags = stbi__get8(s);
6690
6691 if (g->lflags & 0x40) {
6692 g->step = 8 * g->line_size; // first interlaced spacing
6693 g->parse = 3;
6694 } else {
6695 g->step = g->line_size;
6696 g->parse = 0;
6697 }
6698
6699 if (g->lflags & 0x80) {
6700 stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1);
6701 g->color_table = (stbi_uc *) g->lpal;
6702 } else if (g->flags & 0x80) {
6703 g->color_table = (stbi_uc *) g->pal;
6704 } else
6705 return stbi__errpuc("missing color table", "Corrupt GIF");
6706
6707 o = stbi__process_gif_raster(s, g);
6708 if (!o) return NULL;
6709
6710 // if this was the first frame,
6711 pcount = g->w * g->h;
6712 if (first_frame && (g->bgindex > 0)) {
6713 // if first frame, any pixel not drawn to gets the background color
6714 for (pi = 0; pi < pcount; ++pi) {
6715 if (g->history[pi] == 0) {
6716 g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be;
6717 memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 );
6718 }
6719 }
6720 }
6721
6722 return o;
6723 }
6724
6725 case 0x21: // Comment Extension.
6726 {
6727 int len;
6728 int ext = stbi__get8(s);
6729 if (ext == 0xF9) { // Graphic Control Extension.
6730 len = stbi__get8(s);
6731 if (len == 4) {
6732 g->eflags = stbi__get8(s);
6733 g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths.
6734
6735 // unset old transparent
6736 if (g->transparent >= 0) {
6737 g->pal[g->transparent][3] = 255;
6738 }
6739 if (g->eflags & 0x01) {
6740 g->transparent = stbi__get8(s);
6741 if (g->transparent >= 0) {
6742 g->pal[g->transparent][3] = 0;
6743 }
6744 } else {
6745 // don't need transparent
6746 stbi__skip(s, 1);
6747 g->transparent = -1;
6748 }
6749 } else {
6750 stbi__skip(s, len);
6751 break;
6752 }
6753 }
6754 while ((len = stbi__get8(s)) != 0) {
6755 stbi__skip(s, len);
6756 }
6757 break;
6758 }
6759
6760 case 0x3B: // gif stream termination code
6761 return (stbi_uc *) s; // using '1' causes warning on some compilers
6762
6763 default:
6764 return stbi__errpuc("unknown code", "Corrupt GIF");
6765 }
6766 }
6767}
6768
6769static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp)
6770{
6771 if (stbi__gif_test(s)) {
6772 int layers = 0;
6773 stbi_uc *u = 0;
6774 stbi_uc *out = 0;
6775 stbi_uc *two_back = 0;
6776 stbi__gif g;
6777 int stride;
6778 int out_size = 0;
6779 int delays_size = 0;
6780 memset(&g, 0, sizeof(g));
6781 if (delays) {
6782 *delays = 0;
6783 }
6784
6785 do {
6786 u = stbi__gif_load_next(s, &g, comp, req_comp, two_back);
6787 if (u == (stbi_uc *) s) u = 0; // end of animated gif marker
6788
6789 if (u) {
6790 *x = g.w;
6791 *y = g.h;
6792 ++layers;
6793 stride = g.w * g.h * 4;
6794
6795 if (out) {
6796 void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride );
6797 if (NULL == tmp) {
6798 STBI_FREE(g.out);
6799 STBI_FREE(g.history);
6800 STBI_FREE(g.background);
6801 return stbi__errpuc("outofmem", "Out of memory");
6802 }
6803 else {
6804 out = (stbi_uc*) tmp;
6805 out_size = layers * stride;
6806 }
6807
6808 if (delays) {
6809 *delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers );
6810 delays_size = layers * sizeof(int);
6811 }
6812 } else {
6813 out = (stbi_uc*)stbi__malloc( layers * stride );
6814 out_size = layers * stride;
6815 if (delays) {
6816 *delays = (int*) stbi__malloc( layers * sizeof(int) );
6817 delays_size = layers * sizeof(int);
6818 }
6819 }
6820 memcpy( out + ((layers - 1) * stride), u, stride );
6821 if (layers >= 2) {
6822 two_back = out - 2 * stride;
6823 }
6824
6825 if (delays) {
6826 (*delays)[layers - 1U] = g.delay;
6827 }
6828 }
6829 } while (u != 0);
6830
6831 // free temp buffer;
6832 STBI_FREE(g.out);
6833 STBI_FREE(g.history);
6834 STBI_FREE(g.background);
6835
6836 // do the final conversion after loading everything;
6837 if (req_comp && req_comp != 4)
6838 out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h);
6839
6840 *z = layers;
6841 return out;
6842 } else {
6843 return stbi__errpuc("not GIF", "Image was not as a gif type.");
6844 }
6845}
6846
6847static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
6848{
6849 stbi_uc *u = 0;
6850 stbi__gif g;
6851 memset(&g, 0, sizeof(g));
6852 STBI_NOTUSED(ri);
6853
6854 u = stbi__gif_load_next(s, &g, comp, req_comp, 0);
6855 if (u == (stbi_uc *) s) u = 0; // end of animated gif marker
6856 if (u) {
6857 *x = g.w;
6858 *y = g.h;
6859
6860 // moved conversion to after successful load so that the same
6861 // can be done for multiple frames.
6862 if (req_comp && req_comp != 4)
6863 u = stbi__convert_format(u, 4, req_comp, g.w, g.h);
6864 } else if (g.out) {
6865 // if there was an error and we allocated an image buffer, free it!
6866 STBI_FREE(g.out);
6867 }
6868
6869 // free buffers needed for multiple frame loading;
6870 STBI_FREE(g.history);
6871 STBI_FREE(g.background);
6872
6873 return u;
6874}
6875
6876static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp)
6877{
6878 return stbi__gif_info_raw(s,x,y,comp);
6879}
6880#endif
6881
6882// *************************************************************************************************
6883// Radiance RGBE HDR loader
6884// originally by Nicolas Schulz
6885#ifndef STBI_NO_HDR
6886static int stbi__hdr_test_core(stbi__context *s, const char *signature)
6887{
6888 int i;
6889 for (i=0; signature[i]; ++i)
6890 if (stbi__get8(s) != signature[i])
6891 return 0;
6892 stbi__rewind(s);
6893 return 1;
6894}
6895
6896static int stbi__hdr_test(stbi__context* s)
6897{
6898 int r = stbi__hdr_test_core(s, "#?RADIANCE\n");
6899 stbi__rewind(s);
6900 if(!r) {
6901 r = stbi__hdr_test_core(s, "#?RGBE\n");
6902 stbi__rewind(s);
6903 }
6904 return r;
6905}
6906
6907#define STBI__HDR_BUFLEN 1024
6908static char *stbi__hdr_gettoken(stbi__context *z, char *buffer)
6909{
6910 int len=0;
6911 char c = '\0';
6912
6913 c = (char) stbi__get8(z);
6914
6915 while (!stbi__at_eof(z) && c != '\n') {
6916 buffer[len++] = c;
6917 if (len == STBI__HDR_BUFLEN-1) {
6918 // flush to end of line
6919 while (!stbi__at_eof(z) && stbi__get8(z) != '\n')
6920 ;
6921 break;
6922 }
6923 c = (char) stbi__get8(z);
6924 }
6925
6926 buffer[len] = 0;
6927 return buffer;
6928}
6929
6930static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp)
6931{
6932 if ( input[3] != 0 ) {
6933 float f1;
6934 // Exponent
6935 f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8));
6936 if (req_comp <= 2)
6937 output[0] = (input[0] + input[1] + input[2]) * f1 / 3;
6938 else {
6939 output[0] = input[0] * f1;
6940 output[1] = input[1] * f1;
6941 output[2] = input[2] * f1;
6942 }
6943 if (req_comp == 2) output[1] = 1;
6944 if (req_comp == 4) output[3] = 1;
6945 } else {
6946 switch (req_comp) {
6947 case 4: output[3] = 1; /* fallthrough */
6948 case 3: output[0] = output[1] = output[2] = 0;
6949 break;
6950 case 2: output[1] = 1; /* fallthrough */
6951 case 1: output[0] = 0;
6952 break;
6953 }
6954 }
6955}
6956
6957static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
6958{
6959 char buffer[STBI__HDR_BUFLEN];
6960 char *token;
6961 int valid = 0;
6962 int width, height;
6963 stbi_uc *scanline;
6964 float *hdr_data;
6965 int len;
6966 unsigned char count, value;
6967 int i, j, k, c1,c2, z;
6968 const char *headerToken;
6969 STBI_NOTUSED(ri);
6970
6971 // Check identifier
6972 headerToken = stbi__hdr_gettoken(s,buffer);
6973 if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0)
6974 return stbi__errpf("not HDR", "Corrupt HDR image");
6975
6976 // Parse header
6977 for(;;) {
6978 token = stbi__hdr_gettoken(s,buffer);
6979 if (token[0] == 0) break;
6980 if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
6981 }
6982
6983 if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format");
6984
6985 // Parse width and height
6986 // can't use sscanf() if we're not using stdio!
6987 token = stbi__hdr_gettoken(s,buffer);
6988 if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format");
6989 token += 3;
6990 height = (int) strtol(token, &token, 10);
6991 while (*token == ' ') ++token;
6992 if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format");
6993 token += 3;
6994 width = (int) strtol(token, NULL, 10);
6995
6996 if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)");
6997 if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)");
6998
6999 *x = width;
7000 *y = height;
7001
7002 if (comp) *comp = 3;
7003 if (req_comp == 0) req_comp = 3;
7004
7005 if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0))
7006 return stbi__errpf("too large", "HDR image is too large");
7007
7008 // Read data
7009 hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0);
7010 if (!hdr_data)
7011 return stbi__errpf("outofmem", "Out of memory");
7012
7013 // Load image data
7014 // image data is stored as some number of sca
7015 if ( width < 8 || width >= 32768) {
7016 // Read flat data
7017 for (j=0; j < height; ++j) {
7018 for (i=0; i < width; ++i) {
7019 stbi_uc rgbe[4];
7020 main_decode_loop:
7021 stbi__getn(s, rgbe, 4);
7022 stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp);
7023 }
7024 }
7025 } else {
7026 // Read RLE-encoded data
7027 scanline = NULL;
7028
7029 for (j = 0; j < height; ++j) {
7030 c1 = stbi__get8(s);
7031 c2 = stbi__get8(s);
7032 len = stbi__get8(s);
7033 if (c1 != 2 || c2 != 2 || (len & 0x80)) {
7034 // not run-length encoded, so we have to actually use THIS data as a decoded
7035 // pixel (note this can't be a valid pixel--one of RGB must be >= 128)
7036 stbi_uc rgbe[4];
7037 rgbe[0] = (stbi_uc) c1;
7038 rgbe[1] = (stbi_uc) c2;
7039 rgbe[2] = (stbi_uc) len;
7040 rgbe[3] = (stbi_uc) stbi__get8(s);
7041 stbi__hdr_convert(hdr_data, rgbe, req_comp);
7042 i = 1;
7043 j = 0;
7044 STBI_FREE(scanline);
7045 goto main_decode_loop; // yes, this makes no sense
7046 }
7047 len <<= 8;
7048 len |= stbi__get8(s);
7049 if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); }
7050 if (scanline == NULL) {
7051 scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0);
7052 if (!scanline) {
7053 STBI_FREE(hdr_data);
7054 return stbi__errpf("outofmem", "Out of memory");
7055 }
7056 }
7057
7058 for (k = 0; k < 4; ++k) {
7059 int nleft;
7060 i = 0;
7061 while ((nleft = width - i) > 0) {
7062 count = stbi__get8(s);
7063 if (count > 128) {
7064 // Run
7065 value = stbi__get8(s);
7066 count -= 128;
7067 if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); }
7068 for (z = 0; z < count; ++z)
7069 scanline[i++ * 4 + k] = value;
7070 } else {
7071 // Dump
7072 if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); }
7073 for (z = 0; z < count; ++z)
7074 scanline[i++ * 4 + k] = stbi__get8(s);
7075 }
7076 }
7077 }
7078 for (i=0; i < width; ++i)
7079 stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp);
7080 }
7081 if (scanline)
7082 STBI_FREE(scanline);
7083 }
7084
7085 return hdr_data;
7086}
7087
7088static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp)
7089{
7090 char buffer[STBI__HDR_BUFLEN];
7091 char *token;
7092 int valid = 0;
7093 int dummy;
7094
7095 if (!x) x = &dummy;
7096 if (!y) y = &dummy;
7097 if (!comp) comp = &dummy;
7098
7099 if (stbi__hdr_test(s) == 0) {
7100 stbi__rewind( s );
7101 return 0;
7102 }
7103
7104 for(;;) {
7105 token = stbi__hdr_gettoken(s,buffer);
7106 if (token[0] == 0) break;
7107 if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
7108 }
7109
7110 if (!valid) {
7111 stbi__rewind( s );
7112 return 0;
7113 }
7114 token = stbi__hdr_gettoken(s,buffer);
7115 if (strncmp(token, "-Y ", 3)) {
7116 stbi__rewind( s );
7117 return 0;
7118 }
7119 token += 3;
7120 *y = (int) strtol(token, &token, 10);
7121 while (*token == ' ') ++token;
7122 if (strncmp(token, "+X ", 3)) {
7123 stbi__rewind( s );
7124 return 0;
7125 }
7126 token += 3;
7127 *x = (int) strtol(token, NULL, 10);
7128 *comp = 3;
7129 return 1;
7130}
7131#endif // STBI_NO_HDR
7132
7133#ifndef STBI_NO_BMP
7134static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp)
7135{
7136 void *p;
7137 stbi__bmp_data info;
7138
7139 info.all_a = 255;
7140 p = stbi__bmp_parse_header(s, &info);
7141 stbi__rewind( s );
7142 if (p == NULL)
7143 return 0;
7144 if (x) *x = s->img_x;
7145 if (y) *y = s->img_y;
7146 if (comp) {
7147 if (info.bpp == 24 && info.ma == 0xff000000)
7148 *comp = 3;
7149 else
7150 *comp = info.ma ? 4 : 3;
7151 }
7152 return 1;
7153}
7154#endif
7155
7156#ifndef STBI_NO_PSD
7157static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp)
7158{
7159 int channelCount, dummy, depth;
7160 if (!x) x = &dummy;
7161 if (!y) y = &dummy;
7162 if (!comp) comp = &dummy;
7163 if (stbi__get32be(s) != 0x38425053) {
7164 stbi__rewind( s );
7165 return 0;
7166 }
7167 if (stbi__get16be(s) != 1) {
7168 stbi__rewind( s );
7169 return 0;
7170 }
7171 stbi__skip(s, 6);
7172 channelCount = stbi__get16be(s);
7173 if (channelCount < 0 || channelCount > 16) {
7174 stbi__rewind( s );
7175 return 0;
7176 }
7177 *y = stbi__get32be(s);
7178 *x = stbi__get32be(s);
7179 depth = stbi__get16be(s);
7180 if (depth != 8 && depth != 16) {
7181 stbi__rewind( s );
7182 return 0;
7183 }
7184 if (stbi__get16be(s) != 3) {
7185 stbi__rewind( s );
7186 return 0;
7187 }
7188 *comp = 4;
7189 return 1;
7190}
7191
7192static int stbi__psd_is16(stbi__context *s)
7193{
7194 int channelCount, depth;
7195 if (stbi__get32be(s) != 0x38425053) {
7196 stbi__rewind( s );
7197 return 0;
7198 }
7199 if (stbi__get16be(s) != 1) {
7200 stbi__rewind( s );
7201 return 0;
7202 }
7203 stbi__skip(s, 6);
7204 channelCount = stbi__get16be(s);
7205 if (channelCount < 0 || channelCount > 16) {
7206 stbi__rewind( s );
7207 return 0;
7208 }
7209 (void) stbi__get32be(s);
7210 (void) stbi__get32be(s);
7211 depth = stbi__get16be(s);
7212 if (depth != 16) {
7213 stbi__rewind( s );
7214 return 0;
7215 }
7216 return 1;
7217}
7218#endif
7219
7220#ifndef STBI_NO_PIC
7221static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp)
7222{
7223 int act_comp=0,num_packets=0,chained,dummy;
7224 stbi__pic_packet packets[10];
7225
7226 if (!x) x = &dummy;
7227 if (!y) y = &dummy;
7228 if (!comp) comp = &dummy;
7229
7230 if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) {
7231 stbi__rewind(s);
7232 return 0;
7233 }
7234
7235 stbi__skip(s, 88);
7236
7237 *x = stbi__get16be(s);
7238 *y = stbi__get16be(s);
7239 if (stbi__at_eof(s)) {
7240 stbi__rewind( s);
7241 return 0;
7242 }
7243 if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) {
7244 stbi__rewind( s );
7245 return 0;
7246 }
7247
7248 stbi__skip(s, 8);
7249
7250 do {
7251 stbi__pic_packet *packet;
7252
7253 if (num_packets==sizeof(packets)/sizeof(packets[0]))
7254 return 0;
7255
7256 packet = &packets[num_packets++];
7257 chained = stbi__get8(s);
7258 packet->size = stbi__get8(s);
7259 packet->type = stbi__get8(s);
7260 packet->channel = stbi__get8(s);
7261 act_comp |= packet->channel;
7262
7263 if (stbi__at_eof(s)) {
7264 stbi__rewind( s );
7265 return 0;
7266 }
7267 if (packet->size != 8) {
7268 stbi__rewind( s );
7269 return 0;
7270 }
7271 } while (chained);
7272
7273 *comp = (act_comp & 0x10 ? 4 : 3);
7274
7275 return 1;
7276}
7277#endif
7278
7279// *************************************************************************************************
7280// Portable Gray Map and Portable Pixel Map loader
7281// by Ken Miller
7282//
7283// PGM: http://netpbm.sourceforge.net/doc/pgm.html
7284// PPM: http://netpbm.sourceforge.net/doc/ppm.html
7285//
7286// Known limitations:
7287// Does not support comments in the header section
7288// Does not support ASCII image data (formats P2 and P3)
7289// Does not support 16-bit-per-channel
7290
7291#ifndef STBI_NO_PNM
7292
7293static int stbi__pnm_test(stbi__context *s)
7294{
7295 char p, t;
7296 p = (char) stbi__get8(s);
7297 t = (char) stbi__get8(s);
7298 if (p != 'P' || (t != '5' && t != '6')) {
7299 stbi__rewind( s );
7300 return 0;
7301 }
7302 return 1;
7303}
7304
7305static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
7306{
7307 stbi_uc *out;
7308 STBI_NOTUSED(ri);
7309
7310 if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n))
7311 return 0;
7312
7313 if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
7314 if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
7315
7316 *x = s->img_x;
7317 *y = s->img_y;
7318 if (comp) *comp = s->img_n;
7319
7320 if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0))
7321 return stbi__errpuc("too large", "PNM too large");
7322
7323 out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0);
7324 if (!out) return stbi__errpuc("outofmem", "Out of memory");
7325 stbi__getn(s, out, s->img_n * s->img_x * s->img_y);
7326
7327 if (req_comp && req_comp != s->img_n) {
7328 out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y);
7329 if (out == NULL) return out; // stbi__convert_format frees input on failure
7330 }
7331 return out;
7332}
7333
7334static int stbi__pnm_isspace(char c)
7335{
7336 return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r';
7337}
7338
7339static void stbi__pnm_skip_whitespace(stbi__context *s, char *c)
7340{
7341 for (;;) {
7342 while (!stbi__at_eof(s) && stbi__pnm_isspace(*c))
7343 *c = (char) stbi__get8(s);
7344
7345 if (stbi__at_eof(s) || *c != '#')
7346 break;
7347
7348 while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' )
7349 *c = (char) stbi__get8(s);
7350 }
7351}
7352
7353static int stbi__pnm_isdigit(char c)
7354{
7355 return c >= '0' && c <= '9';
7356}
7357
7358static int stbi__pnm_getinteger(stbi__context *s, char *c)
7359{
7360 int value = 0;
7361
7362 while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) {
7363 value = value*10 + (*c - '0');
7364 *c = (char) stbi__get8(s);
7365 }
7366
7367 return value;
7368}
7369
7370static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp)
7371{
7372 int maxv, dummy;
7373 char c, p, t;
7374
7375 if (!x) x = &dummy;
7376 if (!y) y = &dummy;
7377 if (!comp) comp = &dummy;
7378
7379 stbi__rewind(s);
7380
7381 // Get identifier
7382 p = (char) stbi__get8(s);
7383 t = (char) stbi__get8(s);
7384 if (p != 'P' || (t != '5' && t != '6')) {
7385 stbi__rewind(s);
7386 return 0;
7387 }
7388
7389 *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm
7390
7391 c = (char) stbi__get8(s);
7392 stbi__pnm_skip_whitespace(s, &c);
7393
7394 *x = stbi__pnm_getinteger(s, &c); // read width
7395 stbi__pnm_skip_whitespace(s, &c);
7396
7397 *y = stbi__pnm_getinteger(s, &c); // read height
7398 stbi__pnm_skip_whitespace(s, &c);
7399
7400 maxv = stbi__pnm_getinteger(s, &c); // read max value
7401
7402 if (maxv > 255)
7403 return stbi__err("max value > 255", "PPM image not 8-bit");
7404 else
7405 return 1;
7406}
7407#endif
7408
7409static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp)
7410{
7411 #ifndef STBI_NO_JPEG
7412 if (stbi__jpeg_info(s, x, y, comp)) return 1;
7413 #endif
7414
7415 #ifndef STBI_NO_PNG
7416 if (stbi__png_info(s, x, y, comp)) return 1;
7417 #endif
7418
7419 #ifndef STBI_NO_GIF
7420 if (stbi__gif_info(s, x, y, comp)) return 1;
7421 #endif
7422
7423 #ifndef STBI_NO_BMP
7424 if (stbi__bmp_info(s, x, y, comp)) return 1;
7425 #endif
7426
7427 #ifndef STBI_NO_PSD
7428 if (stbi__psd_info(s, x, y, comp)) return 1;
7429 #endif
7430
7431 #ifndef STBI_NO_PIC
7432 if (stbi__pic_info(s, x, y, comp)) return 1;
7433 #endif
7434
7435 #ifndef STBI_NO_PNM
7436 if (stbi__pnm_info(s, x, y, comp)) return 1;
7437 #endif
7438
7439 #ifndef STBI_NO_HDR
7440 if (stbi__hdr_info(s, x, y, comp)) return 1;
7441 #endif
7442
7443 // test tga last because it's a crappy test!
7444 #ifndef STBI_NO_TGA
7445 if (stbi__tga_info(s, x, y, comp))
7446 return 1;
7447 #endif
7448 return stbi__err("unknown image type", "Image not of any known type, or corrupt");
7449}
7450
7451static int stbi__is_16_main(stbi__context *s)
7452{
7453 #ifndef STBI_NO_PNG
7454 if (stbi__png_is16(s)) return 1;
7455 #endif
7456
7457 #ifndef STBI_NO_PSD
7458 if (stbi__psd_is16(s)) return 1;
7459 #endif
7460
7461 return 0;
7462}
7463
7464#ifndef STBI_NO_STDIO
7465STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp)
7466{
7467 FILE *f = stbi__fopen(filename, "rb");
7468 int result;
7469 if (!f) return stbi__err("can't fopen", "Unable to open file");
7470 result = stbi_info_from_file(f, x, y, comp);
7471 fclose(f);
7472 return result;
7473}
7474
7475STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp)
7476{
7477 int r;
7478 stbi__context s;
7479 long pos = ftell(f);
7480 stbi__start_file(&s, f);
7481 r = stbi__info_main(&s,x,y,comp);
7482 fseek(f,pos,SEEK_SET);
7483 return r;
7484}
7485
7486STBIDEF int stbi_is_16_bit(char const *filename)
7487{
7488 FILE *f = stbi__fopen(filename, "rb");
7489 int result;
7490 if (!f) return stbi__err("can't fopen", "Unable to open file");
7491 result = stbi_is_16_bit_from_file(f);
7492 fclose(f);
7493 return result;
7494}
7495
7496STBIDEF int stbi_is_16_bit_from_file(FILE *f)
7497{
7498 int r;
7499 stbi__context s;
7500 long pos = ftell(f);
7501 stbi__start_file(&s, f);
7502 r = stbi__is_16_main(&s);
7503 fseek(f,pos,SEEK_SET);
7504 return r;
7505}
7506#endif // !STBI_NO_STDIO
7507
7508STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp)
7509{
7510 stbi__context s;
7511 stbi__start_mem(&s,buffer,len);
7512 return stbi__info_main(&s,x,y,comp);
7513}
7514
7515STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp)
7516{
7517 stbi__context s;
7518 stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user);
7519 return stbi__info_main(&s,x,y,comp);
7520}
7521
7522STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len)
7523{
7524 stbi__context s;
7525 stbi__start_mem(&s,buffer,len);
7526 return stbi__is_16_main(&s);
7527}
7528
7529STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user)
7530{
7531 stbi__context s;
7532 stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user);
7533 return stbi__is_16_main(&s);
7534}
7535
7536#endif // STB_IMAGE_IMPLEMENTATION
7537
7538/*
7539 revision history:
7540 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs
7541 2.19 (2018-02-11) fix warning
7542 2.18 (2018-01-30) fix warnings
7543 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug
7544 1-bit BMP
7545 *_is_16_bit api
7546 avoid warnings
7547 2.16 (2017-07-23) all functions have 16-bit variants;
7548 STBI_NO_STDIO works again;
7549 compilation fixes;
7550 fix rounding in unpremultiply;
7551 optimize vertical flip;
7552 disable raw_len validation;
7553 documentation fixes
7554 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode;
7555 warning fixes; disable run-time SSE detection on gcc;
7556 uniform handling of optional "return" values;
7557 thread-safe initialization of zlib tables
7558 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs
7559 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now
7560 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes
7561 2.11 (2016-04-02) allocate large structures on the stack
7562 remove white matting for transparent PSD
7563 fix reported channel count for PNG & BMP
7564 re-enable SSE2 in non-gcc 64-bit
7565 support RGB-formatted JPEG
7566 read 16-bit PNGs (only as 8-bit)
7567 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED
7568 2.09 (2016-01-16) allow comments in PNM files
7569 16-bit-per-pixel TGA (not bit-per-component)
7570 info() for TGA could break due to .hdr handling
7571 info() for BMP to shares code instead of sloppy parse
7572 can use STBI_REALLOC_SIZED if allocator doesn't support realloc
7573 code cleanup
7574 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA
7575 2.07 (2015-09-13) fix compiler warnings
7576 partial animated GIF support
7577 limited 16-bpc PSD support
7578 #ifdef unused functions
7579 bug with < 92 byte PIC,PNM,HDR,TGA
7580 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value
7581 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning
7582 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit
7583 2.03 (2015-04-12) extra corruption checking (mmozeiko)
7584 stbi_set_flip_vertically_on_load (nguillemot)
7585 fix NEON support; fix mingw support
7586 2.02 (2015-01-19) fix incorrect assert, fix warning
7587 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2
7588 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG
7589 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg)
7590 progressive JPEG (stb)
7591 PGM/PPM support (Ken Miller)
7592 STBI_MALLOC,STBI_REALLOC,STBI_FREE
7593 GIF bugfix -- seemingly never worked
7594 STBI_NO_*, STBI_ONLY_*
7595 1.48 (2014-12-14) fix incorrectly-named assert()
7596 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb)
7597 optimize PNG (ryg)
7598 fix bug in interlaced PNG with user-specified channel count (stb)
7599 1.46 (2014-08-26)
7600 fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG
7601 1.45 (2014-08-16)
7602 fix MSVC-ARM internal compiler error by wrapping malloc
7603 1.44 (2014-08-07)
7604 various warning fixes from Ronny Chevalier
7605 1.43 (2014-07-15)
7606 fix MSVC-only compiler problem in code changed in 1.42
7607 1.42 (2014-07-09)
7608 don't define _CRT_SECURE_NO_WARNINGS (affects user code)
7609 fixes to stbi__cleanup_jpeg path
7610 added STBI_ASSERT to avoid requiring assert.h
7611 1.41 (2014-06-25)
7612 fix search&replace from 1.36 that messed up comments/error messages
7613 1.40 (2014-06-22)
7614 fix gcc struct-initialization warning
7615 1.39 (2014-06-15)
7616 fix to TGA optimization when req_comp != number of components in TGA;
7617 fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite)
7618 add support for BMP version 5 (more ignored fields)
7619 1.38 (2014-06-06)
7620 suppress MSVC warnings on integer casts truncating values
7621 fix accidental rename of 'skip' field of I/O
7622 1.37 (2014-06-04)
7623 remove duplicate typedef
7624 1.36 (2014-06-03)
7625 convert to header file single-file library
7626 if de-iphone isn't set, load iphone images color-swapped instead of returning NULL
7627 1.35 (2014-05-27)
7628 various warnings
7629 fix broken STBI_SIMD path
7630 fix bug where stbi_load_from_file no longer left file pointer in correct place
7631 fix broken non-easy path for 32-bit BMP (possibly never used)
7632 TGA optimization by Arseny Kapoulkine
7633 1.34 (unknown)
7634 use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case
7635 1.33 (2011-07-14)
7636 make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements
7637 1.32 (2011-07-13)
7638 support for "info" function for all supported filetypes (SpartanJ)
7639 1.31 (2011-06-20)
7640 a few more leak fixes, bug in PNG handling (SpartanJ)
7641 1.30 (2011-06-11)
7642 added ability to load files via callbacks to accomidate custom input streams (Ben Wenger)
7643 removed deprecated format-specific test/load functions
7644 removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway
7645 error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha)
7646 fix inefficiency in decoding 32-bit BMP (David Woo)
7647 1.29 (2010-08-16)
7648 various warning fixes from Aurelien Pocheville
7649 1.28 (2010-08-01)
7650 fix bug in GIF palette transparency (SpartanJ)
7651 1.27 (2010-08-01)
7652 cast-to-stbi_uc to fix warnings
7653 1.26 (2010-07-24)
7654 fix bug in file buffering for PNG reported by SpartanJ
7655 1.25 (2010-07-17)
7656 refix trans_data warning (Won Chun)
7657 1.24 (2010-07-12)
7658 perf improvements reading from files on platforms with lock-heavy fgetc()
7659 minor perf improvements for jpeg
7660 deprecated type-specific functions so we'll get feedback if they're needed
7661 attempt to fix trans_data warning (Won Chun)
7662 1.23 fixed bug in iPhone support
7663 1.22 (2010-07-10)
7664 removed image *writing* support
7665 stbi_info support from Jetro Lauha
7666 GIF support from Jean-Marc Lienher
7667 iPhone PNG-extensions from James Brown
7668 warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva)
7669 1.21 fix use of 'stbi_uc' in header (reported by jon blow)
7670 1.20 added support for Softimage PIC, by Tom Seddon
7671 1.19 bug in interlaced PNG corruption check (found by ryg)
7672 1.18 (2008-08-02)
7673 fix a threading bug (local mutable static)
7674 1.17 support interlaced PNG
7675 1.16 major bugfix - stbi__convert_format converted one too many pixels
7676 1.15 initialize some fields for thread safety
7677 1.14 fix threadsafe conversion bug
7678 header-file-only version (#define STBI_HEADER_FILE_ONLY before including)
7679 1.13 threadsafe
7680 1.12 const qualifiers in the API
7681 1.11 Support installable IDCT, colorspace conversion routines
7682 1.10 Fixes for 64-bit (don't use "unsigned long")
7683 optimized upsampling by Fabian "ryg" Giesen
7684 1.09 Fix format-conversion for PSD code (bad global variables!)
7685 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz
7686 1.07 attempt to fix C++ warning/errors again
7687 1.06 attempt to fix C++ warning/errors again
7688 1.05 fix TGA loading to return correct *comp and use good luminance calc
7689 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free
7690 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR
7691 1.02 support for (subset of) HDR files, float interface for preferred access to them
7692 1.01 fix bug: possible bug in handling right-side up bmps... not sure
7693 fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all
7694 1.00 interface to zlib that skips zlib header
7695 0.99 correct handling of alpha in palette
7696 0.98 TGA loader by lonesock; dynamically add loaders (untested)
7697 0.97 jpeg errors on too large a file; also catch another malloc failure
7698 0.96 fix detection of invalid v value - particleman@mollyrocket forum
7699 0.95 during header scan, seek to markers in case of padding
7700 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same
7701 0.93 handle jpegtran output; verbose errors
7702 0.92 read 4,8,16,24,32-bit BMP files of several formats
7703 0.91 output 24-bit Windows 3.0 BMP files
7704 0.90 fix a few more warnings; bump version number to approach 1.0
7705 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd
7706 0.60 fix compiling as c++
7707 0.59 fix warnings: merge Dave Moore's -Wall fixes
7708 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian
7709 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available
7710 0.56 fix bug: zlib uncompressed mode len vs. nlen
7711 0.55 fix bug: restart_interval not initialized to 0
7712 0.54 allow NULL for 'int *comp'
7713 0.53 fix bug in png 3->4; speedup png decoding
7714 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments
7715 0.51 obey req_comp requests, 1-component jpegs return as 1-component,
7716 on 'test' only check type, not whether we support this variant
7717 0.50 (2006-11-19)
7718 first released version
7719*/
7720
7721
7722/*
7723------------------------------------------------------------------------------
7724This software is available under 2 licenses -- choose whichever you prefer.
7725------------------------------------------------------------------------------
7726ALTERNATIVE A - MIT License
7727Copyright (c) 2017 Sean Barrett
7728Permission is hereby granted, free of charge, to any person obtaining a copy of
7729this software and associated documentation files (the "Software"), to deal in
7730the Software without restriction, including without limitation the rights to
7731use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
7732of the Software, and to permit persons to whom the Software is furnished to do
7733so, subject to the following conditions:
7734The above copyright notice and this permission notice shall be included in all
7735copies or substantial portions of the Software.
7736THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
7737IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
7738FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
7739AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
7740LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
7741OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
7742SOFTWARE.
7743------------------------------------------------------------------------------
7744ALTERNATIVE B - Public Domain (www.unlicense.org)
7745This is free and unencumbered software released into the public domain.
7746Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
7747software, either in source code form or as a compiled binary, for any purpose,
7748commercial or non-commercial, and by any means.
7749In jurisdictions that recognize copyright laws, the author or authors of this
7750software dedicate any and all copyright interest in the software to the public
7751domain. We make this dedication for the benefit of the public at large and to
7752the detriment of our heirs and successors. We intend this dedication to be an
7753overt act of relinquishment in perpetuity of all present and future rights to
7754this software under copyright law.
7755THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
7756IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
7757FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
7758AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
7759ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
7760WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
7761------------------------------------------------------------------------------
7762*/